]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Avoid running audio ctl's when vm is not running
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
54936004 19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
b3c7724c 40#include "hw/hw.h"
74576198 41#include "osdep.h"
7ba1e619 42#include "kvm.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
45#endif
54936004 46
fd6ce8f6 47//#define DEBUG_TB_INVALIDATE
66e85a21 48//#define DEBUG_FLUSH
9fa3e853 49//#define DEBUG_TLB
67d3b957 50//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
51
52/* make various TB consistency checks */
5fafdf24
TS
53//#define DEBUG_TB_CHECK
54//#define DEBUG_TLB_CHECK
fd6ce8f6 55
1196be37 56//#define DEBUG_IOPORT
db7b5426 57//#define DEBUG_SUBPAGE
1196be37 58
99773bd4
PB
59#if !defined(CONFIG_USER_ONLY)
60/* TB consistency checks only implemented for usermode emulation. */
61#undef DEBUG_TB_CHECK
62#endif
63
9fa3e853
FB
64#define SMC_BITMAP_USE_THRESHOLD 10
65
66#define MMAP_AREA_START 0x00000000
67#define MMAP_AREA_END 0xa8000000
fd6ce8f6 68
108c49b8
FB
69#if defined(TARGET_SPARC64)
70#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
71#elif defined(TARGET_SPARC)
72#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
73#elif defined(TARGET_ALPHA)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
76#elif defined(TARGET_PPC64)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
78#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 42
80#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
82#else
83/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84#define TARGET_PHYS_ADDR_SPACE_BITS 32
85#endif
86
bdaf78e0 87static TranslationBlock *tbs;
26a5f13b 88int code_gen_max_blocks;
9fa3e853 89TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 90static int nb_tbs;
eb51d102
FB
91/* any access to the tbs or the page table must use this lock */
92spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 93
141ac468
BS
94#if defined(__arm__) || defined(__sparc_v9__)
95/* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
97 section close to code segment. */
98#define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101#else
102#define code_gen_section \
103 __attribute__((aligned (32)))
104#endif
105
106uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
107static uint8_t *code_gen_buffer;
108static unsigned long code_gen_buffer_size;
26a5f13b 109/* threshold to flush the translated code buffer */
bdaf78e0 110static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
111uint8_t *code_gen_ptr;
112
e2eef170 113#if !defined(CONFIG_USER_ONLY)
00f82b8a 114ram_addr_t phys_ram_size;
9fa3e853
FB
115int phys_ram_fd;
116uint8_t *phys_ram_base;
1ccde1cb 117uint8_t *phys_ram_dirty;
74576198 118static int in_migration;
e9a1ab19 119static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 120#endif
9fa3e853 121
6a00d601
FB
122CPUState *first_cpu;
123/* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
5fafdf24 125CPUState *cpu_single_env;
2e70f6ef 126/* 0 = Do not count executed instructions.
bf20dc07 127 1 = Precise instruction counting.
2e70f6ef
PB
128 2 = Adaptive rate instruction counting. */
129int use_icount = 0;
130/* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132int64_t qemu_icount;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
92e873b9 146typedef struct PhysPageDesc {
0f459d16 147 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 148 ram_addr_t phys_offset;
8da3ff18 149 ram_addr_t region_offset;
92e873b9
FB
150} PhysPageDesc;
151
54936004 152#define L2_BITS 10
bedb69ea
JM
153#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154/* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
157 */
158#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159#else
03875444 160#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 161#endif
54936004
FB
162
163#define L1_SIZE (1 << L1_BITS)
164#define L2_SIZE (1 << L2_BITS)
165
83fb7adf
FB
166unsigned long qemu_real_host_page_size;
167unsigned long qemu_host_page_bits;
168unsigned long qemu_host_page_size;
169unsigned long qemu_host_page_mask;
54936004 170
92e873b9 171/* XXX: for system emulation, it could just be an array */
54936004 172static PageDesc *l1_map[L1_SIZE];
bdaf78e0 173static PhysPageDesc **l1_phys_map;
54936004 174
e2eef170
PB
175#if !defined(CONFIG_USER_ONLY)
176static void io_mem_init(void);
177
33417e70 178/* io memory support */
33417e70
FB
179CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 181void *io_mem_opaque[IO_MEM_NB_ENTRIES];
88715657 182char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
183static int io_mem_watch;
184#endif
33417e70 185
34865134 186/* log support */
d9b630fd 187static const char *logfilename = "/tmp/qemu.log";
34865134
FB
188FILE *logfile;
189int loglevel;
e735b91c 190static int log_append = 0;
34865134 191
e3db7226
FB
192/* statistics */
193static int tlb_flush_count;
194static int tb_flush_count;
195static int tb_phys_invalidate_count;
196
db7b5426
BS
197#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198typedef struct subpage_t {
199 target_phys_addr_t base;
3ee89922
BS
200 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202 void *opaque[TARGET_PAGE_SIZE][2][4];
8da3ff18 203 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
204} subpage_t;
205
7cb69cae
FB
206#ifdef _WIN32
207static void map_exec(void *addr, long size)
208{
209 DWORD old_protect;
210 VirtualProtect(addr, size,
211 PAGE_EXECUTE_READWRITE, &old_protect);
212
213}
214#else
215static void map_exec(void *addr, long size)
216{
4369415f 217 unsigned long start, end, page_size;
7cb69cae 218
4369415f 219 page_size = getpagesize();
7cb69cae 220 start = (unsigned long)addr;
4369415f 221 start &= ~(page_size - 1);
7cb69cae
FB
222
223 end = (unsigned long)addr + size;
4369415f
FB
224 end += page_size - 1;
225 end &= ~(page_size - 1);
7cb69cae
FB
226
227 mprotect((void *)start, end - start,
228 PROT_READ | PROT_WRITE | PROT_EXEC);
229}
230#endif
231
b346ff46 232static void page_init(void)
54936004 233{
83fb7adf 234 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 235 TARGET_PAGE_SIZE */
c2b48b69
AL
236#ifdef _WIN32
237 {
238 SYSTEM_INFO system_info;
239
240 GetSystemInfo(&system_info);
241 qemu_real_host_page_size = system_info.dwPageSize;
242 }
243#else
244 qemu_real_host_page_size = getpagesize();
245#endif
83fb7adf
FB
246 if (qemu_host_page_size == 0)
247 qemu_host_page_size = qemu_real_host_page_size;
248 if (qemu_host_page_size < TARGET_PAGE_SIZE)
249 qemu_host_page_size = TARGET_PAGE_SIZE;
250 qemu_host_page_bits = 0;
251 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252 qemu_host_page_bits++;
253 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
254 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
256
257#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
258 {
259 long long startaddr, endaddr;
260 FILE *f;
261 int n;
262
c8a706fe 263 mmap_lock();
0776590d 264 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
265 f = fopen("/proc/self/maps", "r");
266 if (f) {
267 do {
268 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269 if (n == 2) {
e0b8d65a
BS
270 startaddr = MIN(startaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 endaddr = MIN(endaddr,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 274 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
275 TARGET_PAGE_ALIGN(endaddr),
276 PAGE_RESERVED);
277 }
278 } while (!feof(f));
279 fclose(f);
280 }
c8a706fe 281 mmap_unlock();
50a9569b
AZ
282 }
283#endif
54936004
FB
284}
285
434929bf 286static inline PageDesc **page_l1_map(target_ulong index)
54936004 287{
17e2377a
PB
288#if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
d8173e0f 291 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
292 return NULL;
293#endif
434929bf
AL
294 return &l1_map[index >> L2_BITS];
295}
296
297static inline PageDesc *page_find_alloc(target_ulong index)
298{
299 PageDesc **lp, *p;
300 lp = page_l1_map(index);
301 if (!lp)
302 return NULL;
303
54936004
FB
304 p = *lp;
305 if (!p) {
306 /* allocate if not found */
17e2377a 307#if defined(CONFIG_USER_ONLY)
17e2377a
PB
308 size_t len = sizeof(PageDesc) * L2_SIZE;
309 /* Don't use qemu_malloc because it may recurse. */
310 p = mmap(0, len, PROT_READ | PROT_WRITE,
311 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 312 *lp = p;
fb1c2cd7
AJ
313 if (h2g_valid(p)) {
314 unsigned long addr = h2g(p);
17e2377a
PB
315 page_set_flags(addr & TARGET_PAGE_MASK,
316 TARGET_PAGE_ALIGN(addr + len),
317 PAGE_RESERVED);
318 }
319#else
320 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321 *lp = p;
322#endif
54936004
FB
323 }
324 return p + (index & (L2_SIZE - 1));
325}
326
00f82b8a 327static inline PageDesc *page_find(target_ulong index)
54936004 328{
434929bf
AL
329 PageDesc **lp, *p;
330 lp = page_l1_map(index);
331 if (!lp)
332 return NULL;
54936004 333
434929bf 334 p = *lp;
54936004
FB
335 if (!p)
336 return 0;
fd6ce8f6
FB
337 return p + (index & (L2_SIZE - 1));
338}
339
108c49b8 340static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 341{
108c49b8 342 void **lp, **p;
e3f4e2a4 343 PhysPageDesc *pd;
92e873b9 344
108c49b8
FB
345 p = (void **)l1_phys_map;
346#if TARGET_PHYS_ADDR_SPACE_BITS > 32
347
348#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350#endif
351 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
352 p = *lp;
353 if (!p) {
354 /* allocate if not found */
108c49b8
FB
355 if (!alloc)
356 return NULL;
357 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358 memset(p, 0, sizeof(void *) * L1_SIZE);
359 *lp = p;
360 }
361#endif
362 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
363 pd = *lp;
364 if (!pd) {
365 int i;
108c49b8
FB
366 /* allocate if not found */
367 if (!alloc)
368 return NULL;
e3f4e2a4
PB
369 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370 *lp = pd;
371 for (i = 0; i < L2_SIZE; i++)
372 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 373 }
e3f4e2a4 374 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
375}
376
108c49b8 377static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 378{
108c49b8 379 return phys_page_find_alloc(index, 0);
92e873b9
FB
380}
381
9fa3e853 382#if !defined(CONFIG_USER_ONLY)
6a00d601 383static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 384static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 385 target_ulong vaddr);
c8a706fe
PB
386#define mmap_lock() do { } while(0)
387#define mmap_unlock() do { } while(0)
9fa3e853 388#endif
fd6ce8f6 389
4369415f
FB
390#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
391
392#if defined(CONFIG_USER_ONLY)
393/* Currently it is not recommanded to allocate big chunks of data in
394 user mode. It will change when a dedicated libc will be used */
395#define USE_STATIC_CODE_GEN_BUFFER
396#endif
397
398#ifdef USE_STATIC_CODE_GEN_BUFFER
399static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
400#endif
401
8fcd3692 402static void code_gen_alloc(unsigned long tb_size)
26a5f13b 403{
4369415f
FB
404#ifdef USE_STATIC_CODE_GEN_BUFFER
405 code_gen_buffer = static_code_gen_buffer;
406 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
407 map_exec(code_gen_buffer, code_gen_buffer_size);
408#else
26a5f13b
FB
409 code_gen_buffer_size = tb_size;
410 if (code_gen_buffer_size == 0) {
4369415f
FB
411#if defined(CONFIG_USER_ONLY)
412 /* in user mode, phys_ram_size is not meaningful */
413 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
414#else
26a5f13b 415 /* XXX: needs ajustments */
174a9a1f 416 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
4369415f 417#endif
26a5f13b
FB
418 }
419 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
420 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
421 /* The code gen buffer location may have constraints depending on
422 the host cpu and OS */
423#if defined(__linux__)
424 {
425 int flags;
141ac468
BS
426 void *start = NULL;
427
26a5f13b
FB
428 flags = MAP_PRIVATE | MAP_ANONYMOUS;
429#if defined(__x86_64__)
430 flags |= MAP_32BIT;
431 /* Cannot map more than that */
432 if (code_gen_buffer_size > (800 * 1024 * 1024))
433 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
434#elif defined(__sparc_v9__)
435 // Map the buffer below 2G, so we can use direct calls and branches
436 flags |= MAP_FIXED;
437 start = (void *) 0x60000000UL;
438 if (code_gen_buffer_size > (512 * 1024 * 1024))
439 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 440#elif defined(__arm__)
63d41246 441 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
442 flags |= MAP_FIXED;
443 start = (void *) 0x01000000UL;
444 if (code_gen_buffer_size > 16 * 1024 * 1024)
445 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 446#endif
141ac468
BS
447 code_gen_buffer = mmap(start, code_gen_buffer_size,
448 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
449 flags, -1, 0);
450 if (code_gen_buffer == MAP_FAILED) {
451 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
452 exit(1);
453 }
454 }
06e67a82
AL
455#elif defined(__FreeBSD__)
456 {
457 int flags;
458 void *addr = NULL;
459 flags = MAP_PRIVATE | MAP_ANONYMOUS;
460#if defined(__x86_64__)
461 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
462 * 0x40000000 is free */
463 flags |= MAP_FIXED;
464 addr = (void *)0x40000000;
465 /* Cannot map more than that */
466 if (code_gen_buffer_size > (800 * 1024 * 1024))
467 code_gen_buffer_size = (800 * 1024 * 1024);
468#endif
469 code_gen_buffer = mmap(addr, code_gen_buffer_size,
470 PROT_WRITE | PROT_READ | PROT_EXEC,
471 flags, -1, 0);
472 if (code_gen_buffer == MAP_FAILED) {
473 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
474 exit(1);
475 }
476 }
26a5f13b
FB
477#else
478 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
479 map_exec(code_gen_buffer, code_gen_buffer_size);
480#endif
4369415f 481#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
482 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
483 code_gen_buffer_max_size = code_gen_buffer_size -
484 code_gen_max_block_size();
485 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
486 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
487}
488
489/* Must be called before using the QEMU cpus. 'tb_size' is the size
490 (in bytes) allocated to the translation buffer. Zero means default
491 size. */
492void cpu_exec_init_all(unsigned long tb_size)
493{
26a5f13b
FB
494 cpu_gen_init();
495 code_gen_alloc(tb_size);
496 code_gen_ptr = code_gen_buffer;
4369415f 497 page_init();
e2eef170 498#if !defined(CONFIG_USER_ONLY)
26a5f13b 499 io_mem_init();
e2eef170 500#endif
26a5f13b
FB
501}
502
9656f324
PB
503#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
504
505#define CPU_COMMON_SAVE_VERSION 1
506
507static void cpu_common_save(QEMUFile *f, void *opaque)
508{
509 CPUState *env = opaque;
510
511 qemu_put_be32s(f, &env->halted);
512 qemu_put_be32s(f, &env->interrupt_request);
513}
514
515static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
516{
517 CPUState *env = opaque;
518
519 if (version_id != CPU_COMMON_SAVE_VERSION)
520 return -EINVAL;
521
522 qemu_get_be32s(f, &env->halted);
75f482ae 523 qemu_get_be32s(f, &env->interrupt_request);
9656f324
PB
524 tlb_flush(env, 1);
525
526 return 0;
527}
528#endif
529
6a00d601 530void cpu_exec_init(CPUState *env)
fd6ce8f6 531{
6a00d601
FB
532 CPUState **penv;
533 int cpu_index;
534
6a00d601
FB
535 env->next_cpu = NULL;
536 penv = &first_cpu;
537 cpu_index = 0;
538 while (*penv != NULL) {
539 penv = (CPUState **)&(*penv)->next_cpu;
540 cpu_index++;
541 }
542 env->cpu_index = cpu_index;
c0ce998e
AL
543 TAILQ_INIT(&env->breakpoints);
544 TAILQ_INIT(&env->watchpoints);
6a00d601 545 *penv = env;
b3c7724c 546#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
547 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
548 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
549 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
550 cpu_save, cpu_load, env);
551#endif
fd6ce8f6
FB
552}
553
9fa3e853
FB
554static inline void invalidate_page_bitmap(PageDesc *p)
555{
556 if (p->code_bitmap) {
59817ccb 557 qemu_free(p->code_bitmap);
9fa3e853
FB
558 p->code_bitmap = NULL;
559 }
560 p->code_write_count = 0;
561}
562
fd6ce8f6
FB
563/* set to NULL all the 'first_tb' fields in all PageDescs */
564static void page_flush_tb(void)
565{
566 int i, j;
567 PageDesc *p;
568
569 for(i = 0; i < L1_SIZE; i++) {
570 p = l1_map[i];
571 if (p) {
9fa3e853
FB
572 for(j = 0; j < L2_SIZE; j++) {
573 p->first_tb = NULL;
574 invalidate_page_bitmap(p);
575 p++;
576 }
fd6ce8f6
FB
577 }
578 }
579}
580
581/* flush all the translation blocks */
d4e8164f 582/* XXX: tb_flush is currently not thread safe */
6a00d601 583void tb_flush(CPUState *env1)
fd6ce8f6 584{
6a00d601 585 CPUState *env;
0124311e 586#if defined(DEBUG_FLUSH)
ab3d1727
BS
587 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
588 (unsigned long)(code_gen_ptr - code_gen_buffer),
589 nb_tbs, nb_tbs > 0 ?
590 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 591#endif
26a5f13b 592 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
593 cpu_abort(env1, "Internal error: code buffer overflow\n");
594
fd6ce8f6 595 nb_tbs = 0;
3b46e624 596
6a00d601
FB
597 for(env = first_cpu; env != NULL; env = env->next_cpu) {
598 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
599 }
9fa3e853 600
8a8a608f 601 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 602 page_flush_tb();
9fa3e853 603
fd6ce8f6 604 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
605 /* XXX: flush processor icache at this point if cache flush is
606 expensive */
e3db7226 607 tb_flush_count++;
fd6ce8f6
FB
608}
609
610#ifdef DEBUG_TB_CHECK
611
bc98a7ef 612static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
613{
614 TranslationBlock *tb;
615 int i;
616 address &= TARGET_PAGE_MASK;
99773bd4
PB
617 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
618 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
619 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
620 address >= tb->pc + tb->size)) {
621 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 622 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
623 }
624 }
625 }
626}
627
628/* verify that all the pages have correct rights for code */
629static void tb_page_check(void)
630{
631 TranslationBlock *tb;
632 int i, flags1, flags2;
3b46e624 633
99773bd4
PB
634 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
635 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
636 flags1 = page_get_flags(tb->pc);
637 flags2 = page_get_flags(tb->pc + tb->size - 1);
638 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
639 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 640 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
641 }
642 }
643 }
644}
645
bdaf78e0 646static void tb_jmp_check(TranslationBlock *tb)
d4e8164f
FB
647{
648 TranslationBlock *tb1;
649 unsigned int n1;
650
651 /* suppress any remaining jumps to this TB */
652 tb1 = tb->jmp_first;
653 for(;;) {
654 n1 = (long)tb1 & 3;
655 tb1 = (TranslationBlock *)((long)tb1 & ~3);
656 if (n1 == 2)
657 break;
658 tb1 = tb1->jmp_next[n1];
659 }
660 /* check end of list */
661 if (tb1 != tb) {
662 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
663 }
664}
665
fd6ce8f6
FB
666#endif
667
668/* invalidate one TB */
669static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
670 int next_offset)
671{
672 TranslationBlock *tb1;
673 for(;;) {
674 tb1 = *ptb;
675 if (tb1 == tb) {
676 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
677 break;
678 }
679 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
680 }
681}
682
9fa3e853
FB
683static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
684{
685 TranslationBlock *tb1;
686 unsigned int n1;
687
688 for(;;) {
689 tb1 = *ptb;
690 n1 = (long)tb1 & 3;
691 tb1 = (TranslationBlock *)((long)tb1 & ~3);
692 if (tb1 == tb) {
693 *ptb = tb1->page_next[n1];
694 break;
695 }
696 ptb = &tb1->page_next[n1];
697 }
698}
699
d4e8164f
FB
700static inline void tb_jmp_remove(TranslationBlock *tb, int n)
701{
702 TranslationBlock *tb1, **ptb;
703 unsigned int n1;
704
705 ptb = &tb->jmp_next[n];
706 tb1 = *ptb;
707 if (tb1) {
708 /* find tb(n) in circular list */
709 for(;;) {
710 tb1 = *ptb;
711 n1 = (long)tb1 & 3;
712 tb1 = (TranslationBlock *)((long)tb1 & ~3);
713 if (n1 == n && tb1 == tb)
714 break;
715 if (n1 == 2) {
716 ptb = &tb1->jmp_first;
717 } else {
718 ptb = &tb1->jmp_next[n1];
719 }
720 }
721 /* now we can suppress tb(n) from the list */
722 *ptb = tb->jmp_next[n];
723
724 tb->jmp_next[n] = NULL;
725 }
726}
727
728/* reset the jump entry 'n' of a TB so that it is not chained to
729 another TB */
730static inline void tb_reset_jump(TranslationBlock *tb, int n)
731{
732 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
733}
734
2e70f6ef 735void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 736{
6a00d601 737 CPUState *env;
8a40a180 738 PageDesc *p;
d4e8164f 739 unsigned int h, n1;
00f82b8a 740 target_phys_addr_t phys_pc;
8a40a180 741 TranslationBlock *tb1, *tb2;
3b46e624 742
8a40a180
FB
743 /* remove the TB from the hash list */
744 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
745 h = tb_phys_hash_func(phys_pc);
5fafdf24 746 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
747 offsetof(TranslationBlock, phys_hash_next));
748
749 /* remove the TB from the page list */
750 if (tb->page_addr[0] != page_addr) {
751 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
752 tb_page_remove(&p->first_tb, tb);
753 invalidate_page_bitmap(p);
754 }
755 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
756 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
757 tb_page_remove(&p->first_tb, tb);
758 invalidate_page_bitmap(p);
759 }
760
36bdbe54 761 tb_invalidated_flag = 1;
59817ccb 762
fd6ce8f6 763 /* remove the TB from the hash list */
8a40a180 764 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
765 for(env = first_cpu; env != NULL; env = env->next_cpu) {
766 if (env->tb_jmp_cache[h] == tb)
767 env->tb_jmp_cache[h] = NULL;
768 }
d4e8164f
FB
769
770 /* suppress this TB from the two jump lists */
771 tb_jmp_remove(tb, 0);
772 tb_jmp_remove(tb, 1);
773
774 /* suppress any remaining jumps to this TB */
775 tb1 = tb->jmp_first;
776 for(;;) {
777 n1 = (long)tb1 & 3;
778 if (n1 == 2)
779 break;
780 tb1 = (TranslationBlock *)((long)tb1 & ~3);
781 tb2 = tb1->jmp_next[n1];
782 tb_reset_jump(tb1, n1);
783 tb1->jmp_next[n1] = NULL;
784 tb1 = tb2;
785 }
786 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 787
e3db7226 788 tb_phys_invalidate_count++;
9fa3e853
FB
789}
790
791static inline void set_bits(uint8_t *tab, int start, int len)
792{
793 int end, mask, end1;
794
795 end = start + len;
796 tab += start >> 3;
797 mask = 0xff << (start & 7);
798 if ((start & ~7) == (end & ~7)) {
799 if (start < end) {
800 mask &= ~(0xff << (end & 7));
801 *tab |= mask;
802 }
803 } else {
804 *tab++ |= mask;
805 start = (start + 8) & ~7;
806 end1 = end & ~7;
807 while (start < end1) {
808 *tab++ = 0xff;
809 start += 8;
810 }
811 if (start < end) {
812 mask = ~(0xff << (end & 7));
813 *tab |= mask;
814 }
815 }
816}
817
818static void build_page_bitmap(PageDesc *p)
819{
820 int n, tb_start, tb_end;
821 TranslationBlock *tb;
3b46e624 822
b2a7081a 823 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
824
825 tb = p->first_tb;
826 while (tb != NULL) {
827 n = (long)tb & 3;
828 tb = (TranslationBlock *)((long)tb & ~3);
829 /* NOTE: this is subtle as a TB may span two physical pages */
830 if (n == 0) {
831 /* NOTE: tb_end may be after the end of the page, but
832 it is not a problem */
833 tb_start = tb->pc & ~TARGET_PAGE_MASK;
834 tb_end = tb_start + tb->size;
835 if (tb_end > TARGET_PAGE_SIZE)
836 tb_end = TARGET_PAGE_SIZE;
837 } else {
838 tb_start = 0;
839 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
840 }
841 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
842 tb = tb->page_next[n];
843 }
844}
845
2e70f6ef
PB
846TranslationBlock *tb_gen_code(CPUState *env,
847 target_ulong pc, target_ulong cs_base,
848 int flags, int cflags)
d720b93d
FB
849{
850 TranslationBlock *tb;
851 uint8_t *tc_ptr;
852 target_ulong phys_pc, phys_page2, virt_page2;
853 int code_gen_size;
854
c27004ec
FB
855 phys_pc = get_phys_addr_code(env, pc);
856 tb = tb_alloc(pc);
d720b93d
FB
857 if (!tb) {
858 /* flush must be done */
859 tb_flush(env);
860 /* cannot fail at this point */
c27004ec 861 tb = tb_alloc(pc);
2e70f6ef
PB
862 /* Don't forget to invalidate previous TB info. */
863 tb_invalidated_flag = 1;
d720b93d
FB
864 }
865 tc_ptr = code_gen_ptr;
866 tb->tc_ptr = tc_ptr;
867 tb->cs_base = cs_base;
868 tb->flags = flags;
869 tb->cflags = cflags;
d07bde88 870 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 871 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 872
d720b93d 873 /* check next page if needed */
c27004ec 874 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 875 phys_page2 = -1;
c27004ec 876 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
877 phys_page2 = get_phys_addr_code(env, virt_page2);
878 }
879 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 880 return tb;
d720b93d 881}
3b46e624 882
9fa3e853
FB
883/* invalidate all TBs which intersect with the target physical page
884 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
885 the same physical page. 'is_cpu_write_access' should be true if called
886 from a real cpu write access: the virtual CPU will exit the current
887 TB if code is modified inside this TB. */
00f82b8a 888void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
889 int is_cpu_write_access)
890{
6b917547 891 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 892 CPUState *env = cpu_single_env;
9fa3e853 893 target_ulong tb_start, tb_end;
6b917547
AL
894 PageDesc *p;
895 int n;
896#ifdef TARGET_HAS_PRECISE_SMC
897 int current_tb_not_found = is_cpu_write_access;
898 TranslationBlock *current_tb = NULL;
899 int current_tb_modified = 0;
900 target_ulong current_pc = 0;
901 target_ulong current_cs_base = 0;
902 int current_flags = 0;
903#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
904
905 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 906 if (!p)
9fa3e853 907 return;
5fafdf24 908 if (!p->code_bitmap &&
d720b93d
FB
909 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
910 is_cpu_write_access) {
9fa3e853
FB
911 /* build code bitmap */
912 build_page_bitmap(p);
913 }
914
915 /* we remove all the TBs in the range [start, end[ */
916 /* XXX: see if in some cases it could be faster to invalidate all the code */
917 tb = p->first_tb;
918 while (tb != NULL) {
919 n = (long)tb & 3;
920 tb = (TranslationBlock *)((long)tb & ~3);
921 tb_next = tb->page_next[n];
922 /* NOTE: this is subtle as a TB may span two physical pages */
923 if (n == 0) {
924 /* NOTE: tb_end may be after the end of the page, but
925 it is not a problem */
926 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
927 tb_end = tb_start + tb->size;
928 } else {
929 tb_start = tb->page_addr[1];
930 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
931 }
932 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
933#ifdef TARGET_HAS_PRECISE_SMC
934 if (current_tb_not_found) {
935 current_tb_not_found = 0;
936 current_tb = NULL;
2e70f6ef 937 if (env->mem_io_pc) {
d720b93d 938 /* now we have a real cpu fault */
2e70f6ef 939 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
940 }
941 }
942 if (current_tb == tb &&
2e70f6ef 943 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
944 /* If we are modifying the current TB, we must stop
945 its execution. We could be more precise by checking
946 that the modification is after the current PC, but it
947 would require a specialized function to partially
948 restore the CPU state */
3b46e624 949
d720b93d 950 current_tb_modified = 1;
5fafdf24 951 cpu_restore_state(current_tb, env,
2e70f6ef 952 env->mem_io_pc, NULL);
6b917547
AL
953 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
954 &current_flags);
d720b93d
FB
955 }
956#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
957 /* we need to do that to handle the case where a signal
958 occurs while doing tb_phys_invalidate() */
959 saved_tb = NULL;
960 if (env) {
961 saved_tb = env->current_tb;
962 env->current_tb = NULL;
963 }
9fa3e853 964 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
965 if (env) {
966 env->current_tb = saved_tb;
967 if (env->interrupt_request && env->current_tb)
968 cpu_interrupt(env, env->interrupt_request);
969 }
9fa3e853
FB
970 }
971 tb = tb_next;
972 }
973#if !defined(CONFIG_USER_ONLY)
974 /* if no code remaining, no need to continue to use slow writes */
975 if (!p->first_tb) {
976 invalidate_page_bitmap(p);
d720b93d 977 if (is_cpu_write_access) {
2e70f6ef 978 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
979 }
980 }
981#endif
982#ifdef TARGET_HAS_PRECISE_SMC
983 if (current_tb_modified) {
984 /* we generate a block containing just the instruction
985 modifying the memory. It will ensure that it cannot modify
986 itself */
ea1c1802 987 env->current_tb = NULL;
2e70f6ef 988 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 989 cpu_resume_from_signal(env, NULL);
9fa3e853 990 }
fd6ce8f6 991#endif
9fa3e853 992}
fd6ce8f6 993
9fa3e853 994/* len must be <= 8 and start must be a multiple of len */
00f82b8a 995static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
996{
997 PageDesc *p;
998 int offset, b;
59817ccb 999#if 0
a4193c8a 1000 if (1) {
93fcfe39
AL
1001 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1002 cpu_single_env->mem_io_vaddr, len,
1003 cpu_single_env->eip,
1004 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1005 }
1006#endif
9fa3e853 1007 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1008 if (!p)
9fa3e853
FB
1009 return;
1010 if (p->code_bitmap) {
1011 offset = start & ~TARGET_PAGE_MASK;
1012 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1013 if (b & ((1 << len) - 1))
1014 goto do_invalidate;
1015 } else {
1016 do_invalidate:
d720b93d 1017 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1018 }
1019}
1020
9fa3e853 1021#if !defined(CONFIG_SOFTMMU)
00f82b8a 1022static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1023 unsigned long pc, void *puc)
9fa3e853 1024{
6b917547 1025 TranslationBlock *tb;
9fa3e853 1026 PageDesc *p;
6b917547 1027 int n;
d720b93d 1028#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1029 TranslationBlock *current_tb = NULL;
d720b93d 1030 CPUState *env = cpu_single_env;
6b917547
AL
1031 int current_tb_modified = 0;
1032 target_ulong current_pc = 0;
1033 target_ulong current_cs_base = 0;
1034 int current_flags = 0;
d720b93d 1035#endif
9fa3e853
FB
1036
1037 addr &= TARGET_PAGE_MASK;
1038 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1039 if (!p)
9fa3e853
FB
1040 return;
1041 tb = p->first_tb;
d720b93d
FB
1042#ifdef TARGET_HAS_PRECISE_SMC
1043 if (tb && pc != 0) {
1044 current_tb = tb_find_pc(pc);
1045 }
1046#endif
9fa3e853
FB
1047 while (tb != NULL) {
1048 n = (long)tb & 3;
1049 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1050#ifdef TARGET_HAS_PRECISE_SMC
1051 if (current_tb == tb &&
2e70f6ef 1052 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1053 /* If we are modifying the current TB, we must stop
1054 its execution. We could be more precise by checking
1055 that the modification is after the current PC, but it
1056 would require a specialized function to partially
1057 restore the CPU state */
3b46e624 1058
d720b93d
FB
1059 current_tb_modified = 1;
1060 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1061 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1062 &current_flags);
d720b93d
FB
1063 }
1064#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1065 tb_phys_invalidate(tb, addr);
1066 tb = tb->page_next[n];
1067 }
fd6ce8f6 1068 p->first_tb = NULL;
d720b93d
FB
1069#ifdef TARGET_HAS_PRECISE_SMC
1070 if (current_tb_modified) {
1071 /* we generate a block containing just the instruction
1072 modifying the memory. It will ensure that it cannot modify
1073 itself */
ea1c1802 1074 env->current_tb = NULL;
2e70f6ef 1075 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1076 cpu_resume_from_signal(env, puc);
1077 }
1078#endif
fd6ce8f6 1079}
9fa3e853 1080#endif
fd6ce8f6
FB
1081
1082/* add the tb in the target page and protect it if necessary */
5fafdf24 1083static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1084 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1085{
1086 PageDesc *p;
9fa3e853
FB
1087 TranslationBlock *last_first_tb;
1088
1089 tb->page_addr[n] = page_addr;
3a7d929e 1090 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1091 tb->page_next[n] = p->first_tb;
1092 last_first_tb = p->first_tb;
1093 p->first_tb = (TranslationBlock *)((long)tb | n);
1094 invalidate_page_bitmap(p);
fd6ce8f6 1095
107db443 1096#if defined(TARGET_HAS_SMC) || 1
d720b93d 1097
9fa3e853 1098#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1099 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1100 target_ulong addr;
1101 PageDesc *p2;
9fa3e853
FB
1102 int prot;
1103
fd6ce8f6
FB
1104 /* force the host page as non writable (writes will have a
1105 page fault + mprotect overhead) */
53a5960a 1106 page_addr &= qemu_host_page_mask;
fd6ce8f6 1107 prot = 0;
53a5960a
PB
1108 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1109 addr += TARGET_PAGE_SIZE) {
1110
1111 p2 = page_find (addr >> TARGET_PAGE_BITS);
1112 if (!p2)
1113 continue;
1114 prot |= p2->flags;
1115 p2->flags &= ~PAGE_WRITE;
1116 page_get_flags(addr);
1117 }
5fafdf24 1118 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1119 (prot & PAGE_BITS) & ~PAGE_WRITE);
1120#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1121 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1122 page_addr);
fd6ce8f6 1123#endif
fd6ce8f6 1124 }
9fa3e853
FB
1125#else
1126 /* if some code is already present, then the pages are already
1127 protected. So we handle the case where only the first TB is
1128 allocated in a physical page */
1129 if (!last_first_tb) {
6a00d601 1130 tlb_protect_code(page_addr);
9fa3e853
FB
1131 }
1132#endif
d720b93d
FB
1133
1134#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1135}
1136
1137/* Allocate a new translation block. Flush the translation buffer if
1138 too many translation blocks or too much generated code. */
c27004ec 1139TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1140{
1141 TranslationBlock *tb;
fd6ce8f6 1142
26a5f13b
FB
1143 if (nb_tbs >= code_gen_max_blocks ||
1144 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1145 return NULL;
fd6ce8f6
FB
1146 tb = &tbs[nb_tbs++];
1147 tb->pc = pc;
b448f2f3 1148 tb->cflags = 0;
d4e8164f
FB
1149 return tb;
1150}
1151
2e70f6ef
PB
1152void tb_free(TranslationBlock *tb)
1153{
bf20dc07 1154 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1155 Ignore the hard cases and just back up if this TB happens to
1156 be the last one generated. */
1157 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1158 code_gen_ptr = tb->tc_ptr;
1159 nb_tbs--;
1160 }
1161}
1162
9fa3e853
FB
1163/* add a new TB and link it to the physical page tables. phys_page2 is
1164 (-1) to indicate that only one page contains the TB. */
5fafdf24 1165void tb_link_phys(TranslationBlock *tb,
9fa3e853 1166 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1167{
9fa3e853
FB
1168 unsigned int h;
1169 TranslationBlock **ptb;
1170
c8a706fe
PB
1171 /* Grab the mmap lock to stop another thread invalidating this TB
1172 before we are done. */
1173 mmap_lock();
9fa3e853
FB
1174 /* add in the physical hash table */
1175 h = tb_phys_hash_func(phys_pc);
1176 ptb = &tb_phys_hash[h];
1177 tb->phys_hash_next = *ptb;
1178 *ptb = tb;
fd6ce8f6
FB
1179
1180 /* add in the page list */
9fa3e853
FB
1181 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1182 if (phys_page2 != -1)
1183 tb_alloc_page(tb, 1, phys_page2);
1184 else
1185 tb->page_addr[1] = -1;
9fa3e853 1186
d4e8164f
FB
1187 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1188 tb->jmp_next[0] = NULL;
1189 tb->jmp_next[1] = NULL;
1190
1191 /* init original jump addresses */
1192 if (tb->tb_next_offset[0] != 0xffff)
1193 tb_reset_jump(tb, 0);
1194 if (tb->tb_next_offset[1] != 0xffff)
1195 tb_reset_jump(tb, 1);
8a40a180
FB
1196
1197#ifdef DEBUG_TB_CHECK
1198 tb_page_check();
1199#endif
c8a706fe 1200 mmap_unlock();
fd6ce8f6
FB
1201}
1202
9fa3e853
FB
1203/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1204 tb[1].tc_ptr. Return NULL if not found */
1205TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1206{
9fa3e853
FB
1207 int m_min, m_max, m;
1208 unsigned long v;
1209 TranslationBlock *tb;
a513fe19
FB
1210
1211 if (nb_tbs <= 0)
1212 return NULL;
1213 if (tc_ptr < (unsigned long)code_gen_buffer ||
1214 tc_ptr >= (unsigned long)code_gen_ptr)
1215 return NULL;
1216 /* binary search (cf Knuth) */
1217 m_min = 0;
1218 m_max = nb_tbs - 1;
1219 while (m_min <= m_max) {
1220 m = (m_min + m_max) >> 1;
1221 tb = &tbs[m];
1222 v = (unsigned long)tb->tc_ptr;
1223 if (v == tc_ptr)
1224 return tb;
1225 else if (tc_ptr < v) {
1226 m_max = m - 1;
1227 } else {
1228 m_min = m + 1;
1229 }
5fafdf24 1230 }
a513fe19
FB
1231 return &tbs[m_max];
1232}
7501267e 1233
ea041c0e
FB
1234static void tb_reset_jump_recursive(TranslationBlock *tb);
1235
1236static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1237{
1238 TranslationBlock *tb1, *tb_next, **ptb;
1239 unsigned int n1;
1240
1241 tb1 = tb->jmp_next[n];
1242 if (tb1 != NULL) {
1243 /* find head of list */
1244 for(;;) {
1245 n1 = (long)tb1 & 3;
1246 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1247 if (n1 == 2)
1248 break;
1249 tb1 = tb1->jmp_next[n1];
1250 }
1251 /* we are now sure now that tb jumps to tb1 */
1252 tb_next = tb1;
1253
1254 /* remove tb from the jmp_first list */
1255 ptb = &tb_next->jmp_first;
1256 for(;;) {
1257 tb1 = *ptb;
1258 n1 = (long)tb1 & 3;
1259 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1260 if (n1 == n && tb1 == tb)
1261 break;
1262 ptb = &tb1->jmp_next[n1];
1263 }
1264 *ptb = tb->jmp_next[n];
1265 tb->jmp_next[n] = NULL;
3b46e624 1266
ea041c0e
FB
1267 /* suppress the jump to next tb in generated code */
1268 tb_reset_jump(tb, n);
1269
0124311e 1270 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1271 tb_reset_jump_recursive(tb_next);
1272 }
1273}
1274
1275static void tb_reset_jump_recursive(TranslationBlock *tb)
1276{
1277 tb_reset_jump_recursive2(tb, 0);
1278 tb_reset_jump_recursive2(tb, 1);
1279}
1280
1fddef4b 1281#if defined(TARGET_HAS_ICE)
d720b93d
FB
1282static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1283{
9b3c35e0
JM
1284 target_phys_addr_t addr;
1285 target_ulong pd;
c2f07f81
PB
1286 ram_addr_t ram_addr;
1287 PhysPageDesc *p;
d720b93d 1288
c2f07f81
PB
1289 addr = cpu_get_phys_page_debug(env, pc);
1290 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1291 if (!p) {
1292 pd = IO_MEM_UNASSIGNED;
1293 } else {
1294 pd = p->phys_offset;
1295 }
1296 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1297 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1298}
c27004ec 1299#endif
d720b93d 1300
6658ffb8 1301/* Add a watchpoint. */
a1d1bb31
AL
1302int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1303 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1304{
b4051334 1305 target_ulong len_mask = ~(len - 1);
c0ce998e 1306 CPUWatchpoint *wp;
6658ffb8 1307
b4051334
AL
1308 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1309 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1310 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1311 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1312 return -EINVAL;
1313 }
a1d1bb31 1314 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1315
1316 wp->vaddr = addr;
b4051334 1317 wp->len_mask = len_mask;
a1d1bb31
AL
1318 wp->flags = flags;
1319
2dc9f411 1320 /* keep all GDB-injected watchpoints in front */
c0ce998e
AL
1321 if (flags & BP_GDB)
1322 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1323 else
1324 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1325
6658ffb8 1326 tlb_flush_page(env, addr);
a1d1bb31
AL
1327
1328 if (watchpoint)
1329 *watchpoint = wp;
1330 return 0;
6658ffb8
PB
1331}
1332
a1d1bb31
AL
1333/* Remove a specific watchpoint. */
1334int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1335 int flags)
6658ffb8 1336{
b4051334 1337 target_ulong len_mask = ~(len - 1);
a1d1bb31 1338 CPUWatchpoint *wp;
6658ffb8 1339
c0ce998e 1340 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1341 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1342 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1343 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1344 return 0;
1345 }
1346 }
a1d1bb31 1347 return -ENOENT;
6658ffb8
PB
1348}
1349
a1d1bb31
AL
1350/* Remove a specific watchpoint by reference. */
1351void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1352{
c0ce998e 1353 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1354
a1d1bb31
AL
1355 tlb_flush_page(env, watchpoint->vaddr);
1356
1357 qemu_free(watchpoint);
1358}
1359
1360/* Remove all matching watchpoints. */
1361void cpu_watchpoint_remove_all(CPUState *env, int mask)
1362{
c0ce998e 1363 CPUWatchpoint *wp, *next;
a1d1bb31 1364
c0ce998e 1365 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1366 if (wp->flags & mask)
1367 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1368 }
7d03f82f
EI
1369}
1370
a1d1bb31
AL
1371/* Add a breakpoint. */
1372int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1373 CPUBreakpoint **breakpoint)
4c3a88a2 1374{
1fddef4b 1375#if defined(TARGET_HAS_ICE)
c0ce998e 1376 CPUBreakpoint *bp;
3b46e624 1377
a1d1bb31 1378 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1379
a1d1bb31
AL
1380 bp->pc = pc;
1381 bp->flags = flags;
1382
2dc9f411 1383 /* keep all GDB-injected breakpoints in front */
c0ce998e
AL
1384 if (flags & BP_GDB)
1385 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1386 else
1387 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1388
d720b93d 1389 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1390
1391 if (breakpoint)
1392 *breakpoint = bp;
4c3a88a2
FB
1393 return 0;
1394#else
a1d1bb31 1395 return -ENOSYS;
4c3a88a2
FB
1396#endif
1397}
1398
a1d1bb31
AL
1399/* Remove a specific breakpoint. */
1400int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1401{
7d03f82f 1402#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1403 CPUBreakpoint *bp;
1404
c0ce998e 1405 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1406 if (bp->pc == pc && bp->flags == flags) {
1407 cpu_breakpoint_remove_by_ref(env, bp);
1408 return 0;
1409 }
7d03f82f 1410 }
a1d1bb31
AL
1411 return -ENOENT;
1412#else
1413 return -ENOSYS;
7d03f82f
EI
1414#endif
1415}
1416
a1d1bb31
AL
1417/* Remove a specific breakpoint by reference. */
1418void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1419{
1fddef4b 1420#if defined(TARGET_HAS_ICE)
c0ce998e 1421 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1422
a1d1bb31
AL
1423 breakpoint_invalidate(env, breakpoint->pc);
1424
1425 qemu_free(breakpoint);
1426#endif
1427}
1428
1429/* Remove all matching breakpoints. */
1430void cpu_breakpoint_remove_all(CPUState *env, int mask)
1431{
1432#if defined(TARGET_HAS_ICE)
c0ce998e 1433 CPUBreakpoint *bp, *next;
a1d1bb31 1434
c0ce998e 1435 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1436 if (bp->flags & mask)
1437 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1438 }
4c3a88a2
FB
1439#endif
1440}
1441
c33a346e
FB
1442/* enable or disable single step mode. EXCP_DEBUG is returned by the
1443 CPU loop after each instruction */
1444void cpu_single_step(CPUState *env, int enabled)
1445{
1fddef4b 1446#if defined(TARGET_HAS_ICE)
c33a346e
FB
1447 if (env->singlestep_enabled != enabled) {
1448 env->singlestep_enabled = enabled;
1449 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1450 /* XXX: only flush what is necessary */
0124311e 1451 tb_flush(env);
c33a346e
FB
1452 }
1453#endif
1454}
1455
34865134
FB
1456/* enable or disable low levels log */
1457void cpu_set_log(int log_flags)
1458{
1459 loglevel = log_flags;
1460 if (loglevel && !logfile) {
11fcfab4 1461 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1462 if (!logfile) {
1463 perror(logfilename);
1464 _exit(1);
1465 }
9fa3e853
FB
1466#if !defined(CONFIG_SOFTMMU)
1467 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1468 {
b55266b5 1469 static char logfile_buf[4096];
9fa3e853
FB
1470 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1471 }
1472#else
34865134 1473 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1474#endif
e735b91c
PB
1475 log_append = 1;
1476 }
1477 if (!loglevel && logfile) {
1478 fclose(logfile);
1479 logfile = NULL;
34865134
FB
1480 }
1481}
1482
1483void cpu_set_log_filename(const char *filename)
1484{
1485 logfilename = strdup(filename);
e735b91c
PB
1486 if (logfile) {
1487 fclose(logfile);
1488 logfile = NULL;
1489 }
1490 cpu_set_log(loglevel);
34865134 1491}
c33a346e 1492
0124311e 1493/* mask must never be zero, except for A20 change call */
68a79315 1494void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1495{
d5975363 1496#if !defined(USE_NPTL)
ea041c0e 1497 TranslationBlock *tb;
15a51156 1498 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1499#endif
2e70f6ef 1500 int old_mask;
59817ccb 1501
2e70f6ef 1502 old_mask = env->interrupt_request;
d5975363 1503 /* FIXME: This is probably not threadsafe. A different thread could
bf20dc07 1504 be in the middle of a read-modify-write operation. */
68a79315 1505 env->interrupt_request |= mask;
d5975363
PB
1506#if defined(USE_NPTL)
1507 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1508 problem and hope the cpu will stop of its own accord. For userspace
1509 emulation this often isn't actually as bad as it sounds. Often
1510 signals are used primarily to interrupt blocking syscalls. */
1511#else
2e70f6ef 1512 if (use_icount) {
266910c4 1513 env->icount_decr.u16.high = 0xffff;
2e70f6ef
PB
1514#ifndef CONFIG_USER_ONLY
1515 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1516 an async event happened and we need to process it. */
1517 if (!can_do_io(env)
1518 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1519 cpu_abort(env, "Raised interrupt while not in I/O function");
1520 }
1521#endif
1522 } else {
1523 tb = env->current_tb;
1524 /* if the cpu is currently executing code, we must unlink it and
1525 all the potentially executing TB */
1526 if (tb && !testandset(&interrupt_lock)) {
1527 env->current_tb = NULL;
1528 tb_reset_jump_recursive(tb);
1529 resetlock(&interrupt_lock);
1530 }
ea041c0e 1531 }
d5975363 1532#endif
ea041c0e
FB
1533}
1534
b54ad049
FB
1535void cpu_reset_interrupt(CPUState *env, int mask)
1536{
1537 env->interrupt_request &= ~mask;
1538}
1539
c7cd6a37 1540const CPULogItem cpu_log_items[] = {
5fafdf24 1541 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1542 "show generated host assembly code for each compiled TB" },
1543 { CPU_LOG_TB_IN_ASM, "in_asm",
1544 "show target assembly code for each compiled TB" },
5fafdf24 1545 { CPU_LOG_TB_OP, "op",
57fec1fe 1546 "show micro ops for each compiled TB" },
f193c797 1547 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1548 "show micro ops "
1549#ifdef TARGET_I386
1550 "before eflags optimization and "
f193c797 1551#endif
e01a1157 1552 "after liveness analysis" },
f193c797
FB
1553 { CPU_LOG_INT, "int",
1554 "show interrupts/exceptions in short format" },
1555 { CPU_LOG_EXEC, "exec",
1556 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1557 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1558 "show CPU state before block translation" },
f193c797
FB
1559#ifdef TARGET_I386
1560 { CPU_LOG_PCALL, "pcall",
1561 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1562 { CPU_LOG_RESET, "cpu_reset",
1563 "show CPU state before CPU resets" },
f193c797 1564#endif
8e3a9fd2 1565#ifdef DEBUG_IOPORT
fd872598
FB
1566 { CPU_LOG_IOPORT, "ioport",
1567 "show all i/o ports accesses" },
8e3a9fd2 1568#endif
f193c797
FB
1569 { 0, NULL, NULL },
1570};
1571
1572static int cmp1(const char *s1, int n, const char *s2)
1573{
1574 if (strlen(s2) != n)
1575 return 0;
1576 return memcmp(s1, s2, n) == 0;
1577}
3b46e624 1578
f193c797
FB
1579/* takes a comma separated list of log masks. Return 0 if error. */
1580int cpu_str_to_log_mask(const char *str)
1581{
c7cd6a37 1582 const CPULogItem *item;
f193c797
FB
1583 int mask;
1584 const char *p, *p1;
1585
1586 p = str;
1587 mask = 0;
1588 for(;;) {
1589 p1 = strchr(p, ',');
1590 if (!p1)
1591 p1 = p + strlen(p);
8e3a9fd2
FB
1592 if(cmp1(p,p1-p,"all")) {
1593 for(item = cpu_log_items; item->mask != 0; item++) {
1594 mask |= item->mask;
1595 }
1596 } else {
f193c797
FB
1597 for(item = cpu_log_items; item->mask != 0; item++) {
1598 if (cmp1(p, p1 - p, item->name))
1599 goto found;
1600 }
1601 return 0;
8e3a9fd2 1602 }
f193c797
FB
1603 found:
1604 mask |= item->mask;
1605 if (*p1 != ',')
1606 break;
1607 p = p1 + 1;
1608 }
1609 return mask;
1610}
ea041c0e 1611
7501267e
FB
1612void cpu_abort(CPUState *env, const char *fmt, ...)
1613{
1614 va_list ap;
493ae1f0 1615 va_list ap2;
7501267e
FB
1616
1617 va_start(ap, fmt);
493ae1f0 1618 va_copy(ap2, ap);
7501267e
FB
1619 fprintf(stderr, "qemu: fatal: ");
1620 vfprintf(stderr, fmt, ap);
1621 fprintf(stderr, "\n");
1622#ifdef TARGET_I386
7fe48483
FB
1623 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1624#else
1625 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1626#endif
93fcfe39
AL
1627 if (qemu_log_enabled()) {
1628 qemu_log("qemu: fatal: ");
1629 qemu_log_vprintf(fmt, ap2);
1630 qemu_log("\n");
f9373291 1631#ifdef TARGET_I386
93fcfe39 1632 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1633#else
93fcfe39 1634 log_cpu_state(env, 0);
f9373291 1635#endif
31b1a7b4 1636 qemu_log_flush();
93fcfe39 1637 qemu_log_close();
924edcae 1638 }
493ae1f0 1639 va_end(ap2);
f9373291 1640 va_end(ap);
7501267e
FB
1641 abort();
1642}
1643
c5be9f08
TS
1644CPUState *cpu_copy(CPUState *env)
1645{
01ba9816 1646 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1647 CPUState *next_cpu = new_env->next_cpu;
1648 int cpu_index = new_env->cpu_index;
5a38f081
AL
1649#if defined(TARGET_HAS_ICE)
1650 CPUBreakpoint *bp;
1651 CPUWatchpoint *wp;
1652#endif
1653
c5be9f08 1654 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1655
1656 /* Preserve chaining and index. */
c5be9f08
TS
1657 new_env->next_cpu = next_cpu;
1658 new_env->cpu_index = cpu_index;
5a38f081
AL
1659
1660 /* Clone all break/watchpoints.
1661 Note: Once we support ptrace with hw-debug register access, make sure
1662 BP_CPU break/watchpoints are handled correctly on clone. */
1663 TAILQ_INIT(&env->breakpoints);
1664 TAILQ_INIT(&env->watchpoints);
1665#if defined(TARGET_HAS_ICE)
1666 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1667 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1668 }
1669 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1670 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1671 wp->flags, NULL);
1672 }
1673#endif
1674
c5be9f08
TS
1675 return new_env;
1676}
1677
0124311e
FB
1678#if !defined(CONFIG_USER_ONLY)
1679
5c751e99
EI
1680static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1681{
1682 unsigned int i;
1683
1684 /* Discard jump cache entries for any tb which might potentially
1685 overlap the flushed page. */
1686 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1687 memset (&env->tb_jmp_cache[i], 0,
1688 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1689
1690 i = tb_jmp_cache_hash_page(addr);
1691 memset (&env->tb_jmp_cache[i], 0,
1692 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1693}
1694
ee8b7021
FB
1695/* NOTE: if flush_global is true, also flush global entries (not
1696 implemented yet) */
1697void tlb_flush(CPUState *env, int flush_global)
33417e70 1698{
33417e70 1699 int i;
0124311e 1700
9fa3e853
FB
1701#if defined(DEBUG_TLB)
1702 printf("tlb_flush:\n");
1703#endif
0124311e
FB
1704 /* must reset current TB so that interrupts cannot modify the
1705 links while we are modifying them */
1706 env->current_tb = NULL;
1707
33417e70 1708 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1709 env->tlb_table[0][i].addr_read = -1;
1710 env->tlb_table[0][i].addr_write = -1;
1711 env->tlb_table[0][i].addr_code = -1;
1712 env->tlb_table[1][i].addr_read = -1;
1713 env->tlb_table[1][i].addr_write = -1;
1714 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1715#if (NB_MMU_MODES >= 3)
1716 env->tlb_table[2][i].addr_read = -1;
1717 env->tlb_table[2][i].addr_write = -1;
1718 env->tlb_table[2][i].addr_code = -1;
1719#if (NB_MMU_MODES == 4)
1720 env->tlb_table[3][i].addr_read = -1;
1721 env->tlb_table[3][i].addr_write = -1;
1722 env->tlb_table[3][i].addr_code = -1;
1723#endif
1724#endif
33417e70 1725 }
9fa3e853 1726
8a40a180 1727 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1728
0a962c02
FB
1729#ifdef USE_KQEMU
1730 if (env->kqemu_enabled) {
1731 kqemu_flush(env, flush_global);
1732 }
9fa3e853 1733#endif
e3db7226 1734 tlb_flush_count++;
33417e70
FB
1735}
1736
274da6b2 1737static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1738{
5fafdf24 1739 if (addr == (tlb_entry->addr_read &
84b7b8e7 1740 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1741 addr == (tlb_entry->addr_write &
84b7b8e7 1742 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1743 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1744 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1745 tlb_entry->addr_read = -1;
1746 tlb_entry->addr_write = -1;
1747 tlb_entry->addr_code = -1;
1748 }
61382a50
FB
1749}
1750
2e12669a 1751void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1752{
8a40a180 1753 int i;
0124311e 1754
9fa3e853 1755#if defined(DEBUG_TLB)
108c49b8 1756 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1757#endif
0124311e
FB
1758 /* must reset current TB so that interrupts cannot modify the
1759 links while we are modifying them */
1760 env->current_tb = NULL;
61382a50
FB
1761
1762 addr &= TARGET_PAGE_MASK;
1763 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1764 tlb_flush_entry(&env->tlb_table[0][i], addr);
1765 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1766#if (NB_MMU_MODES >= 3)
1767 tlb_flush_entry(&env->tlb_table[2][i], addr);
1768#if (NB_MMU_MODES == 4)
1769 tlb_flush_entry(&env->tlb_table[3][i], addr);
1770#endif
1771#endif
0124311e 1772
5c751e99 1773 tlb_flush_jmp_cache(env, addr);
9fa3e853 1774
0a962c02
FB
1775#ifdef USE_KQEMU
1776 if (env->kqemu_enabled) {
1777 kqemu_flush_page(env, addr);
1778 }
1779#endif
9fa3e853
FB
1780}
1781
9fa3e853
FB
1782/* update the TLBs so that writes to code in the virtual page 'addr'
1783 can be detected */
6a00d601 1784static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1785{
5fafdf24 1786 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1787 ram_addr + TARGET_PAGE_SIZE,
1788 CODE_DIRTY_FLAG);
9fa3e853
FB
1789}
1790
9fa3e853 1791/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1792 tested for self modifying code */
5fafdf24 1793static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1794 target_ulong vaddr)
9fa3e853 1795{
3a7d929e 1796 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1797}
1798
5fafdf24 1799static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1800 unsigned long start, unsigned long length)
1801{
1802 unsigned long addr;
84b7b8e7
FB
1803 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1804 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1805 if ((addr - start) < length) {
0f459d16 1806 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1807 }
1808 }
1809}
1810
3a7d929e 1811void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1812 int dirty_flags)
1ccde1cb
FB
1813{
1814 CPUState *env;
4f2ac237 1815 unsigned long length, start1;
0a962c02
FB
1816 int i, mask, len;
1817 uint8_t *p;
1ccde1cb
FB
1818
1819 start &= TARGET_PAGE_MASK;
1820 end = TARGET_PAGE_ALIGN(end);
1821
1822 length = end - start;
1823 if (length == 0)
1824 return;
0a962c02 1825 len = length >> TARGET_PAGE_BITS;
3a7d929e 1826#ifdef USE_KQEMU
6a00d601
FB
1827 /* XXX: should not depend on cpu context */
1828 env = first_cpu;
3a7d929e 1829 if (env->kqemu_enabled) {
f23db169
FB
1830 ram_addr_t addr;
1831 addr = start;
1832 for(i = 0; i < len; i++) {
1833 kqemu_set_notdirty(env, addr);
1834 addr += TARGET_PAGE_SIZE;
1835 }
3a7d929e
FB
1836 }
1837#endif
f23db169
FB
1838 mask = ~dirty_flags;
1839 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1840 for(i = 0; i < len; i++)
1841 p[i] &= mask;
1842
1ccde1cb
FB
1843 /* we modify the TLB cache so that the dirty bit will be set again
1844 when accessing the range */
59817ccb 1845 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1846 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1847 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1848 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1849 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1850 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1851#if (NB_MMU_MODES >= 3)
1852 for(i = 0; i < CPU_TLB_SIZE; i++)
1853 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1854#if (NB_MMU_MODES == 4)
1855 for(i = 0; i < CPU_TLB_SIZE; i++)
1856 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1857#endif
1858#endif
6a00d601 1859 }
1ccde1cb
FB
1860}
1861
74576198
AL
1862int cpu_physical_memory_set_dirty_tracking(int enable)
1863{
1864 in_migration = enable;
1865 return 0;
1866}
1867
1868int cpu_physical_memory_get_dirty_tracking(void)
1869{
1870 return in_migration;
1871}
1872
2bec46dc
AL
1873void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1874{
1875 if (kvm_enabled())
1876 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1877}
1878
3a7d929e
FB
1879static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1880{
1881 ram_addr_t ram_addr;
1882
84b7b8e7 1883 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1884 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1885 tlb_entry->addend - (unsigned long)phys_ram_base;
1886 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1887 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1888 }
1889 }
1890}
1891
1892/* update the TLB according to the current state of the dirty bits */
1893void cpu_tlb_update_dirty(CPUState *env)
1894{
1895 int i;
1896 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1897 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1898 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1899 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1900#if (NB_MMU_MODES >= 3)
1901 for(i = 0; i < CPU_TLB_SIZE; i++)
1902 tlb_update_dirty(&env->tlb_table[2][i]);
1903#if (NB_MMU_MODES == 4)
1904 for(i = 0; i < CPU_TLB_SIZE; i++)
1905 tlb_update_dirty(&env->tlb_table[3][i]);
1906#endif
1907#endif
3a7d929e
FB
1908}
1909
0f459d16 1910static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1911{
0f459d16
PB
1912 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1913 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1914}
1915
0f459d16
PB
1916/* update the TLB corresponding to virtual page vaddr
1917 so that it is no longer dirty */
1918static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1919{
1ccde1cb
FB
1920 int i;
1921
0f459d16 1922 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1923 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1924 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1925 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1926#if (NB_MMU_MODES >= 3)
0f459d16 1927 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1928#if (NB_MMU_MODES == 4)
0f459d16 1929 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1930#endif
1931#endif
9fa3e853
FB
1932}
1933
59817ccb
FB
1934/* add a new TLB entry. At most one entry for a given virtual address
1935 is permitted. Return 0 if OK or 2 if the page could not be mapped
1936 (can only happen in non SOFTMMU mode for I/O pages or pages
1937 conflicting with the host address space). */
5fafdf24
TS
1938int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1939 target_phys_addr_t paddr, int prot,
6ebbf390 1940 int mmu_idx, int is_softmmu)
9fa3e853 1941{
92e873b9 1942 PhysPageDesc *p;
4f2ac237 1943 unsigned long pd;
9fa3e853 1944 unsigned int index;
4f2ac237 1945 target_ulong address;
0f459d16 1946 target_ulong code_address;
108c49b8 1947 target_phys_addr_t addend;
9fa3e853 1948 int ret;
84b7b8e7 1949 CPUTLBEntry *te;
a1d1bb31 1950 CPUWatchpoint *wp;
0f459d16 1951 target_phys_addr_t iotlb;
9fa3e853 1952
92e873b9 1953 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1954 if (!p) {
1955 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1956 } else {
1957 pd = p->phys_offset;
9fa3e853
FB
1958 }
1959#if defined(DEBUG_TLB)
6ebbf390
JM
1960 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1961 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1962#endif
1963
1964 ret = 0;
0f459d16
PB
1965 address = vaddr;
1966 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1967 /* IO memory case (romd handled later) */
1968 address |= TLB_MMIO;
1969 }
1970 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1971 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1972 /* Normal RAM. */
1973 iotlb = pd & TARGET_PAGE_MASK;
1974 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1975 iotlb |= IO_MEM_NOTDIRTY;
1976 else
1977 iotlb |= IO_MEM_ROM;
1978 } else {
1979 /* IO handlers are currently passed a phsical address.
1980 It would be nice to pass an offset from the base address
1981 of that region. This would avoid having to special case RAM,
1982 and avoid full address decoding in every device.
1983 We can't use the high bits of pd for this because
1984 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
1985 iotlb = (pd & ~TARGET_PAGE_MASK);
1986 if (p) {
8da3ff18
PB
1987 iotlb += p->region_offset;
1988 } else {
1989 iotlb += paddr;
1990 }
0f459d16
PB
1991 }
1992
1993 code_address = address;
1994 /* Make accesses to pages with watchpoints go via the
1995 watchpoint trap routines. */
c0ce998e 1996 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 1997 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
1998 iotlb = io_mem_watch + paddr;
1999 /* TODO: The memory case can be optimized by not trapping
2000 reads of pages with a write breakpoint. */
2001 address |= TLB_MMIO;
6658ffb8 2002 }
0f459d16 2003 }
d79acba4 2004
0f459d16
PB
2005 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2006 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2007 te = &env->tlb_table[mmu_idx][index];
2008 te->addend = addend - vaddr;
2009 if (prot & PAGE_READ) {
2010 te->addr_read = address;
2011 } else {
2012 te->addr_read = -1;
2013 }
5c751e99 2014
0f459d16
PB
2015 if (prot & PAGE_EXEC) {
2016 te->addr_code = code_address;
2017 } else {
2018 te->addr_code = -1;
2019 }
2020 if (prot & PAGE_WRITE) {
2021 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2022 (pd & IO_MEM_ROMD)) {
2023 /* Write access calls the I/O callback. */
2024 te->addr_write = address | TLB_MMIO;
2025 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2026 !cpu_physical_memory_is_dirty(pd)) {
2027 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2028 } else {
0f459d16 2029 te->addr_write = address;
9fa3e853 2030 }
0f459d16
PB
2031 } else {
2032 te->addr_write = -1;
9fa3e853 2033 }
9fa3e853
FB
2034 return ret;
2035}
2036
0124311e
FB
2037#else
2038
ee8b7021 2039void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2040{
2041}
2042
2e12669a 2043void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2044{
2045}
2046
5fafdf24
TS
2047int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2048 target_phys_addr_t paddr, int prot,
6ebbf390 2049 int mmu_idx, int is_softmmu)
9fa3e853
FB
2050{
2051 return 0;
2052}
0124311e 2053
9fa3e853
FB
2054/* dump memory mappings */
2055void page_dump(FILE *f)
33417e70 2056{
9fa3e853
FB
2057 unsigned long start, end;
2058 int i, j, prot, prot1;
2059 PageDesc *p;
33417e70 2060
9fa3e853
FB
2061 fprintf(f, "%-8s %-8s %-8s %s\n",
2062 "start", "end", "size", "prot");
2063 start = -1;
2064 end = -1;
2065 prot = 0;
2066 for(i = 0; i <= L1_SIZE; i++) {
2067 if (i < L1_SIZE)
2068 p = l1_map[i];
2069 else
2070 p = NULL;
2071 for(j = 0;j < L2_SIZE; j++) {
2072 if (!p)
2073 prot1 = 0;
2074 else
2075 prot1 = p[j].flags;
2076 if (prot1 != prot) {
2077 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2078 if (start != -1) {
2079 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 2080 start, end, end - start,
9fa3e853
FB
2081 prot & PAGE_READ ? 'r' : '-',
2082 prot & PAGE_WRITE ? 'w' : '-',
2083 prot & PAGE_EXEC ? 'x' : '-');
2084 }
2085 if (prot1 != 0)
2086 start = end;
2087 else
2088 start = -1;
2089 prot = prot1;
2090 }
2091 if (!p)
2092 break;
2093 }
33417e70 2094 }
33417e70
FB
2095}
2096
53a5960a 2097int page_get_flags(target_ulong address)
33417e70 2098{
9fa3e853
FB
2099 PageDesc *p;
2100
2101 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2102 if (!p)
9fa3e853
FB
2103 return 0;
2104 return p->flags;
2105}
2106
2107/* modify the flags of a page and invalidate the code if
2108 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2109 depending on PAGE_WRITE */
53a5960a 2110void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2111{
2112 PageDesc *p;
53a5960a 2113 target_ulong addr;
9fa3e853 2114
c8a706fe 2115 /* mmap_lock should already be held. */
9fa3e853
FB
2116 start = start & TARGET_PAGE_MASK;
2117 end = TARGET_PAGE_ALIGN(end);
2118 if (flags & PAGE_WRITE)
2119 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2120 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2121 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2122 /* We may be called for host regions that are outside guest
2123 address space. */
2124 if (!p)
2125 return;
9fa3e853
FB
2126 /* if the write protection is set, then we invalidate the code
2127 inside */
5fafdf24 2128 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2129 (flags & PAGE_WRITE) &&
2130 p->first_tb) {
d720b93d 2131 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2132 }
2133 p->flags = flags;
2134 }
33417e70
FB
2135}
2136
3d97b40b
TS
2137int page_check_range(target_ulong start, target_ulong len, int flags)
2138{
2139 PageDesc *p;
2140 target_ulong end;
2141 target_ulong addr;
2142
55f280c9
AZ
2143 if (start + len < start)
2144 /* we've wrapped around */
2145 return -1;
2146
3d97b40b
TS
2147 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2148 start = start & TARGET_PAGE_MASK;
2149
3d97b40b
TS
2150 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2151 p = page_find(addr >> TARGET_PAGE_BITS);
2152 if( !p )
2153 return -1;
2154 if( !(p->flags & PAGE_VALID) )
2155 return -1;
2156
dae3270c 2157 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2158 return -1;
dae3270c
FB
2159 if (flags & PAGE_WRITE) {
2160 if (!(p->flags & PAGE_WRITE_ORG))
2161 return -1;
2162 /* unprotect the page if it was put read-only because it
2163 contains translated code */
2164 if (!(p->flags & PAGE_WRITE)) {
2165 if (!page_unprotect(addr, 0, NULL))
2166 return -1;
2167 }
2168 return 0;
2169 }
3d97b40b
TS
2170 }
2171 return 0;
2172}
2173
9fa3e853
FB
2174/* called from signal handler: invalidate the code and unprotect the
2175 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2176int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2177{
2178 unsigned int page_index, prot, pindex;
2179 PageDesc *p, *p1;
53a5960a 2180 target_ulong host_start, host_end, addr;
9fa3e853 2181
c8a706fe
PB
2182 /* Technically this isn't safe inside a signal handler. However we
2183 know this only ever happens in a synchronous SEGV handler, so in
2184 practice it seems to be ok. */
2185 mmap_lock();
2186
83fb7adf 2187 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2188 page_index = host_start >> TARGET_PAGE_BITS;
2189 p1 = page_find(page_index);
c8a706fe
PB
2190 if (!p1) {
2191 mmap_unlock();
9fa3e853 2192 return 0;
c8a706fe 2193 }
83fb7adf 2194 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2195 p = p1;
2196 prot = 0;
2197 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2198 prot |= p->flags;
2199 p++;
2200 }
2201 /* if the page was really writable, then we change its
2202 protection back to writable */
2203 if (prot & PAGE_WRITE_ORG) {
2204 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2205 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2206 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2207 (prot & PAGE_BITS) | PAGE_WRITE);
2208 p1[pindex].flags |= PAGE_WRITE;
2209 /* and since the content will be modified, we must invalidate
2210 the corresponding translated code. */
d720b93d 2211 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2212#ifdef DEBUG_TB_CHECK
2213 tb_invalidate_check(address);
2214#endif
c8a706fe 2215 mmap_unlock();
9fa3e853
FB
2216 return 1;
2217 }
2218 }
c8a706fe 2219 mmap_unlock();
9fa3e853
FB
2220 return 0;
2221}
2222
6a00d601
FB
2223static inline void tlb_set_dirty(CPUState *env,
2224 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2225{
2226}
9fa3e853
FB
2227#endif /* defined(CONFIG_USER_ONLY) */
2228
e2eef170 2229#if !defined(CONFIG_USER_ONLY)
8da3ff18 2230
db7b5426 2231static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2232 ram_addr_t memory, ram_addr_t region_offset);
00f82b8a 2233static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2234 ram_addr_t orig_memory, ram_addr_t region_offset);
db7b5426
BS
2235#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2236 need_subpage) \
2237 do { \
2238 if (addr > start_addr) \
2239 start_addr2 = 0; \
2240 else { \
2241 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2242 if (start_addr2 > 0) \
2243 need_subpage = 1; \
2244 } \
2245 \
49e9fba2 2246 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2247 end_addr2 = TARGET_PAGE_SIZE - 1; \
2248 else { \
2249 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2250 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2251 need_subpage = 1; \
2252 } \
2253 } while (0)
2254
33417e70
FB
2255/* register physical memory. 'size' must be a multiple of the target
2256 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2257 io memory page. The address used when calling the IO function is
2258 the offset from the start of the region, plus region_offset. Both
2259 start_region and regon_offset are rounded down to a page boundary
2260 before calculating this offset. This should not be a problem unless
2261 the low bits of start_addr and region_offset differ. */
2262void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2263 ram_addr_t size,
2264 ram_addr_t phys_offset,
2265 ram_addr_t region_offset)
33417e70 2266{
108c49b8 2267 target_phys_addr_t addr, end_addr;
92e873b9 2268 PhysPageDesc *p;
9d42037b 2269 CPUState *env;
00f82b8a 2270 ram_addr_t orig_size = size;
db7b5426 2271 void *subpage;
33417e70 2272
da260249
FB
2273#ifdef USE_KQEMU
2274 /* XXX: should not depend on cpu context */
2275 env = first_cpu;
2276 if (env->kqemu_enabled) {
2277 kqemu_set_phys_mem(start_addr, size, phys_offset);
2278 }
2279#endif
7ba1e619
AL
2280 if (kvm_enabled())
2281 kvm_set_phys_mem(start_addr, size, phys_offset);
2282
8da3ff18 2283 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2284 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2285 end_addr = start_addr + (target_phys_addr_t)size;
2286 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2287 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2288 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2289 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2290 target_phys_addr_t start_addr2, end_addr2;
2291 int need_subpage = 0;
2292
2293 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2294 need_subpage);
4254fab8 2295 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2296 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2297 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2298 &p->phys_offset, orig_memory,
2299 p->region_offset);
db7b5426
BS
2300 } else {
2301 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2302 >> IO_MEM_SHIFT];
2303 }
8da3ff18
PB
2304 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2305 region_offset);
2306 p->region_offset = 0;
db7b5426
BS
2307 } else {
2308 p->phys_offset = phys_offset;
2309 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2310 (phys_offset & IO_MEM_ROMD))
2311 phys_offset += TARGET_PAGE_SIZE;
2312 }
2313 } else {
2314 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2315 p->phys_offset = phys_offset;
8da3ff18 2316 p->region_offset = region_offset;
db7b5426 2317 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2318 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2319 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2320 } else {
db7b5426
BS
2321 target_phys_addr_t start_addr2, end_addr2;
2322 int need_subpage = 0;
2323
2324 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2325 end_addr2, need_subpage);
2326
4254fab8 2327 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426 2328 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2329 &p->phys_offset, IO_MEM_UNASSIGNED,
2330 0);
db7b5426 2331 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2332 phys_offset, region_offset);
2333 p->region_offset = 0;
db7b5426
BS
2334 }
2335 }
2336 }
8da3ff18 2337 region_offset += TARGET_PAGE_SIZE;
33417e70 2338 }
3b46e624 2339
9d42037b
FB
2340 /* since each CPU stores ram addresses in its TLB cache, we must
2341 reset the modified entries */
2342 /* XXX: slow ! */
2343 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2344 tlb_flush(env, 1);
2345 }
33417e70
FB
2346}
2347
ba863458 2348/* XXX: temporary until new memory mapping API */
00f82b8a 2349ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2350{
2351 PhysPageDesc *p;
2352
2353 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2354 if (!p)
2355 return IO_MEM_UNASSIGNED;
2356 return p->phys_offset;
2357}
2358
f65ed4c1
AL
2359void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2360{
2361 if (kvm_enabled())
2362 kvm_coalesce_mmio_region(addr, size);
2363}
2364
2365void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2366{
2367 if (kvm_enabled())
2368 kvm_uncoalesce_mmio_region(addr, size);
2369}
2370
e9a1ab19 2371/* XXX: better than nothing */
00f82b8a 2372ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2373{
2374 ram_addr_t addr;
7fb4fdcf 2375 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
012a7045 2376 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
ed441467 2377 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2378 abort();
2379 }
2380 addr = phys_ram_alloc_offset;
2381 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2382 return addr;
2383}
2384
2385void qemu_ram_free(ram_addr_t addr)
2386{
2387}
2388
a4193c8a 2389static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2390{
67d3b957 2391#ifdef DEBUG_UNASSIGNED
ab3d1727 2392 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2393#endif
0a6f8a6d 2394#if defined(TARGET_SPARC)
e18231a3
BS
2395 do_unassigned_access(addr, 0, 0, 0, 1);
2396#endif
2397 return 0;
2398}
2399
2400static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2401{
2402#ifdef DEBUG_UNASSIGNED
2403 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2404#endif
0a6f8a6d 2405#if defined(TARGET_SPARC)
e18231a3
BS
2406 do_unassigned_access(addr, 0, 0, 0, 2);
2407#endif
2408 return 0;
2409}
2410
2411static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2412{
2413#ifdef DEBUG_UNASSIGNED
2414 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2415#endif
0a6f8a6d 2416#if defined(TARGET_SPARC)
e18231a3 2417 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2418#endif
33417e70
FB
2419 return 0;
2420}
2421
a4193c8a 2422static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2423{
67d3b957 2424#ifdef DEBUG_UNASSIGNED
ab3d1727 2425 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2426#endif
0a6f8a6d 2427#if defined(TARGET_SPARC)
e18231a3
BS
2428 do_unassigned_access(addr, 1, 0, 0, 1);
2429#endif
2430}
2431
2432static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2433{
2434#ifdef DEBUG_UNASSIGNED
2435 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2436#endif
0a6f8a6d 2437#if defined(TARGET_SPARC)
e18231a3
BS
2438 do_unassigned_access(addr, 1, 0, 0, 2);
2439#endif
2440}
2441
2442static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2443{
2444#ifdef DEBUG_UNASSIGNED
2445 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2446#endif
0a6f8a6d 2447#if defined(TARGET_SPARC)
e18231a3 2448 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2449#endif
33417e70
FB
2450}
2451
2452static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2453 unassigned_mem_readb,
e18231a3
BS
2454 unassigned_mem_readw,
2455 unassigned_mem_readl,
33417e70
FB
2456};
2457
2458static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2459 unassigned_mem_writeb,
e18231a3
BS
2460 unassigned_mem_writew,
2461 unassigned_mem_writel,
33417e70
FB
2462};
2463
0f459d16
PB
2464static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2465 uint32_t val)
9fa3e853 2466{
3a7d929e 2467 int dirty_flags;
3a7d929e
FB
2468 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2469 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2470#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2471 tb_invalidate_phys_page_fast(ram_addr, 1);
2472 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2473#endif
3a7d929e 2474 }
0f459d16 2475 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2476#ifdef USE_KQEMU
2477 if (cpu_single_env->kqemu_enabled &&
2478 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2479 kqemu_modify_page(cpu_single_env, ram_addr);
2480#endif
f23db169
FB
2481 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2482 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2483 /* we remove the notdirty callback only if the code has been
2484 flushed */
2485 if (dirty_flags == 0xff)
2e70f6ef 2486 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2487}
2488
0f459d16
PB
2489static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2490 uint32_t val)
9fa3e853 2491{
3a7d929e 2492 int dirty_flags;
3a7d929e
FB
2493 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2494 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2495#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2496 tb_invalidate_phys_page_fast(ram_addr, 2);
2497 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2498#endif
3a7d929e 2499 }
0f459d16 2500 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2501#ifdef USE_KQEMU
2502 if (cpu_single_env->kqemu_enabled &&
2503 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2504 kqemu_modify_page(cpu_single_env, ram_addr);
2505#endif
f23db169
FB
2506 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2507 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2508 /* we remove the notdirty callback only if the code has been
2509 flushed */
2510 if (dirty_flags == 0xff)
2e70f6ef 2511 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2512}
2513
0f459d16
PB
2514static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2515 uint32_t val)
9fa3e853 2516{
3a7d929e 2517 int dirty_flags;
3a7d929e
FB
2518 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2519 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2520#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2521 tb_invalidate_phys_page_fast(ram_addr, 4);
2522 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2523#endif
3a7d929e 2524 }
0f459d16 2525 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2526#ifdef USE_KQEMU
2527 if (cpu_single_env->kqemu_enabled &&
2528 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2529 kqemu_modify_page(cpu_single_env, ram_addr);
2530#endif
f23db169
FB
2531 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2532 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2533 /* we remove the notdirty callback only if the code has been
2534 flushed */
2535 if (dirty_flags == 0xff)
2e70f6ef 2536 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2537}
2538
3a7d929e 2539static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2540 NULL, /* never used */
2541 NULL, /* never used */
2542 NULL, /* never used */
2543};
2544
1ccde1cb
FB
2545static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2546 notdirty_mem_writeb,
2547 notdirty_mem_writew,
2548 notdirty_mem_writel,
2549};
2550
0f459d16 2551/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2552static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
2553{
2554 CPUState *env = cpu_single_env;
06d55cc1
AL
2555 target_ulong pc, cs_base;
2556 TranslationBlock *tb;
0f459d16 2557 target_ulong vaddr;
a1d1bb31 2558 CPUWatchpoint *wp;
06d55cc1 2559 int cpu_flags;
0f459d16 2560
06d55cc1
AL
2561 if (env->watchpoint_hit) {
2562 /* We re-entered the check after replacing the TB. Now raise
2563 * the debug interrupt so that is will trigger after the
2564 * current instruction. */
2565 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2566 return;
2567 }
2e70f6ef 2568 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
c0ce998e 2569 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2570 if ((vaddr == (wp->vaddr & len_mask) ||
2571 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2572 wp->flags |= BP_WATCHPOINT_HIT;
2573 if (!env->watchpoint_hit) {
2574 env->watchpoint_hit = wp;
2575 tb = tb_find_pc(env->mem_io_pc);
2576 if (!tb) {
2577 cpu_abort(env, "check_watchpoint: could not find TB for "
2578 "pc=%p", (void *)env->mem_io_pc);
2579 }
2580 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2581 tb_phys_invalidate(tb, -1);
2582 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2583 env->exception_index = EXCP_DEBUG;
2584 } else {
2585 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2586 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2587 }
2588 cpu_resume_from_signal(env, NULL);
06d55cc1 2589 }
6e140f28
AL
2590 } else {
2591 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2592 }
2593 }
2594}
2595
6658ffb8
PB
2596/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2597 so these check for a hit then pass through to the normal out-of-line
2598 phys routines. */
2599static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2600{
b4051334 2601 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
2602 return ldub_phys(addr);
2603}
2604
2605static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2606{
b4051334 2607 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
2608 return lduw_phys(addr);
2609}
2610
2611static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2612{
b4051334 2613 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
2614 return ldl_phys(addr);
2615}
2616
6658ffb8
PB
2617static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2618 uint32_t val)
2619{
b4051334 2620 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
2621 stb_phys(addr, val);
2622}
2623
2624static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2625 uint32_t val)
2626{
b4051334 2627 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
2628 stw_phys(addr, val);
2629}
2630
2631static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2632 uint32_t val)
2633{
b4051334 2634 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
2635 stl_phys(addr, val);
2636}
2637
2638static CPUReadMemoryFunc *watch_mem_read[3] = {
2639 watch_mem_readb,
2640 watch_mem_readw,
2641 watch_mem_readl,
2642};
2643
2644static CPUWriteMemoryFunc *watch_mem_write[3] = {
2645 watch_mem_writeb,
2646 watch_mem_writew,
2647 watch_mem_writel,
2648};
6658ffb8 2649
db7b5426
BS
2650static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2651 unsigned int len)
2652{
db7b5426
BS
2653 uint32_t ret;
2654 unsigned int idx;
2655
8da3ff18 2656 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2657#if defined(DEBUG_SUBPAGE)
2658 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2659 mmio, len, addr, idx);
2660#endif
8da3ff18
PB
2661 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2662 addr + mmio->region_offset[idx][0][len]);
db7b5426
BS
2663
2664 return ret;
2665}
2666
2667static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2668 uint32_t value, unsigned int len)
2669{
db7b5426
BS
2670 unsigned int idx;
2671
8da3ff18 2672 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2673#if defined(DEBUG_SUBPAGE)
2674 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2675 mmio, len, addr, idx, value);
2676#endif
8da3ff18
PB
2677 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2678 addr + mmio->region_offset[idx][1][len],
2679 value);
db7b5426
BS
2680}
2681
2682static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2683{
2684#if defined(DEBUG_SUBPAGE)
2685 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2686#endif
2687
2688 return subpage_readlen(opaque, addr, 0);
2689}
2690
2691static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2692 uint32_t value)
2693{
2694#if defined(DEBUG_SUBPAGE)
2695 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2696#endif
2697 subpage_writelen(opaque, addr, value, 0);
2698}
2699
2700static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2701{
2702#if defined(DEBUG_SUBPAGE)
2703 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2704#endif
2705
2706 return subpage_readlen(opaque, addr, 1);
2707}
2708
2709static void subpage_writew (void *opaque, target_phys_addr_t addr,
2710 uint32_t value)
2711{
2712#if defined(DEBUG_SUBPAGE)
2713 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2714#endif
2715 subpage_writelen(opaque, addr, value, 1);
2716}
2717
2718static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2719{
2720#if defined(DEBUG_SUBPAGE)
2721 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2722#endif
2723
2724 return subpage_readlen(opaque, addr, 2);
2725}
2726
2727static void subpage_writel (void *opaque,
2728 target_phys_addr_t addr, uint32_t value)
2729{
2730#if defined(DEBUG_SUBPAGE)
2731 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2732#endif
2733 subpage_writelen(opaque, addr, value, 2);
2734}
2735
2736static CPUReadMemoryFunc *subpage_read[] = {
2737 &subpage_readb,
2738 &subpage_readw,
2739 &subpage_readl,
2740};
2741
2742static CPUWriteMemoryFunc *subpage_write[] = {
2743 &subpage_writeb,
2744 &subpage_writew,
2745 &subpage_writel,
2746};
2747
2748static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2749 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
2750{
2751 int idx, eidx;
4254fab8 2752 unsigned int i;
db7b5426
BS
2753
2754 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2755 return -1;
2756 idx = SUBPAGE_IDX(start);
2757 eidx = SUBPAGE_IDX(end);
2758#if defined(DEBUG_SUBPAGE)
2759 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2760 mmio, start, end, idx, eidx, memory);
2761#endif
2762 memory >>= IO_MEM_SHIFT;
2763 for (; idx <= eidx; idx++) {
4254fab8 2764 for (i = 0; i < 4; i++) {
3ee89922
BS
2765 if (io_mem_read[memory][i]) {
2766 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2767 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
8da3ff18 2768 mmio->region_offset[idx][0][i] = region_offset;
3ee89922
BS
2769 }
2770 if (io_mem_write[memory][i]) {
2771 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2772 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
8da3ff18 2773 mmio->region_offset[idx][1][i] = region_offset;
3ee89922 2774 }
4254fab8 2775 }
db7b5426
BS
2776 }
2777
2778 return 0;
2779}
2780
00f82b8a 2781static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2782 ram_addr_t orig_memory, ram_addr_t region_offset)
db7b5426
BS
2783{
2784 subpage_t *mmio;
2785 int subpage_memory;
2786
2787 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
2788
2789 mmio->base = base;
2790 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
db7b5426 2791#if defined(DEBUG_SUBPAGE)
1eec614b
AL
2792 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2793 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 2794#endif
1eec614b
AL
2795 *phys = subpage_memory | IO_MEM_SUBPAGE;
2796 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
8da3ff18 2797 region_offset);
db7b5426
BS
2798
2799 return mmio;
2800}
2801
88715657
AL
2802static int get_free_io_mem_idx(void)
2803{
2804 int i;
2805
2806 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2807 if (!io_mem_used[i]) {
2808 io_mem_used[i] = 1;
2809 return i;
2810 }
2811
2812 return -1;
2813}
2814
33417e70
FB
2815static void io_mem_init(void)
2816{
88715657
AL
2817 int i;
2818
3a7d929e 2819 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2820 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2821 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
88715657
AL
2822 for (i=0; i<5; i++)
2823 io_mem_used[i] = 1;
1ccde1cb 2824
0f459d16 2825 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2826 watch_mem_write, NULL);
1ccde1cb 2827 /* alloc dirty bits array */
0a962c02 2828 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2829 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2830}
2831
2832/* mem_read and mem_write are arrays of functions containing the
2833 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2834 2). Functions can be omitted with a NULL function pointer. The
2835 registered functions may be modified dynamically later.
2836 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2837 modified. If it is zero, a new io zone is allocated. The return
2838 value can be used with cpu_register_physical_memory(). (-1) is
2839 returned if error. */
33417e70
FB
2840int cpu_register_io_memory(int io_index,
2841 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2842 CPUWriteMemoryFunc **mem_write,
2843 void *opaque)
33417e70 2844{
4254fab8 2845 int i, subwidth = 0;
33417e70
FB
2846
2847 if (io_index <= 0) {
88715657
AL
2848 io_index = get_free_io_mem_idx();
2849 if (io_index == -1)
2850 return io_index;
33417e70
FB
2851 } else {
2852 if (io_index >= IO_MEM_NB_ENTRIES)
2853 return -1;
2854 }
b5ff1b31 2855
33417e70 2856 for(i = 0;i < 3; i++) {
4254fab8
BS
2857 if (!mem_read[i] || !mem_write[i])
2858 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2859 io_mem_read[io_index][i] = mem_read[i];
2860 io_mem_write[io_index][i] = mem_write[i];
2861 }
a4193c8a 2862 io_mem_opaque[io_index] = opaque;
4254fab8 2863 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2864}
61382a50 2865
88715657
AL
2866void cpu_unregister_io_memory(int io_table_address)
2867{
2868 int i;
2869 int io_index = io_table_address >> IO_MEM_SHIFT;
2870
2871 for (i=0;i < 3; i++) {
2872 io_mem_read[io_index][i] = unassigned_mem_read[i];
2873 io_mem_write[io_index][i] = unassigned_mem_write[i];
2874 }
2875 io_mem_opaque[io_index] = NULL;
2876 io_mem_used[io_index] = 0;
2877}
2878
8926b517
FB
2879CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2880{
2881 return io_mem_write[io_index >> IO_MEM_SHIFT];
2882}
2883
2884CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2885{
2886 return io_mem_read[io_index >> IO_MEM_SHIFT];
2887}
2888
e2eef170
PB
2889#endif /* !defined(CONFIG_USER_ONLY) */
2890
13eb76e0
FB
2891/* physical memory access (slow version, mainly for debug) */
2892#if defined(CONFIG_USER_ONLY)
5fafdf24 2893void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2894 int len, int is_write)
2895{
2896 int l, flags;
2897 target_ulong page;
53a5960a 2898 void * p;
13eb76e0
FB
2899
2900 while (len > 0) {
2901 page = addr & TARGET_PAGE_MASK;
2902 l = (page + TARGET_PAGE_SIZE) - addr;
2903 if (l > len)
2904 l = len;
2905 flags = page_get_flags(page);
2906 if (!(flags & PAGE_VALID))
2907 return;
2908 if (is_write) {
2909 if (!(flags & PAGE_WRITE))
2910 return;
579a97f7 2911 /* XXX: this code should not depend on lock_user */
72fb7daa 2912 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2913 /* FIXME - should this return an error rather than just fail? */
2914 return;
72fb7daa
AJ
2915 memcpy(p, buf, l);
2916 unlock_user(p, addr, l);
13eb76e0
FB
2917 } else {
2918 if (!(flags & PAGE_READ))
2919 return;
579a97f7 2920 /* XXX: this code should not depend on lock_user */
72fb7daa 2921 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2922 /* FIXME - should this return an error rather than just fail? */
2923 return;
72fb7daa 2924 memcpy(buf, p, l);
5b257578 2925 unlock_user(p, addr, 0);
13eb76e0
FB
2926 }
2927 len -= l;
2928 buf += l;
2929 addr += l;
2930 }
2931}
8df1cd07 2932
13eb76e0 2933#else
5fafdf24 2934void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2935 int len, int is_write)
2936{
2937 int l, io_index;
2938 uint8_t *ptr;
2939 uint32_t val;
2e12669a
FB
2940 target_phys_addr_t page;
2941 unsigned long pd;
92e873b9 2942 PhysPageDesc *p;
3b46e624 2943
13eb76e0
FB
2944 while (len > 0) {
2945 page = addr & TARGET_PAGE_MASK;
2946 l = (page + TARGET_PAGE_SIZE) - addr;
2947 if (l > len)
2948 l = len;
92e873b9 2949 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2950 if (!p) {
2951 pd = IO_MEM_UNASSIGNED;
2952 } else {
2953 pd = p->phys_offset;
2954 }
3b46e624 2955
13eb76e0 2956 if (is_write) {
3a7d929e 2957 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2958 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
2959 if (p)
2960 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
2961 /* XXX: could force cpu_single_env to NULL to avoid
2962 potential bugs */
13eb76e0 2963 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2964 /* 32 bit write access */
c27004ec 2965 val = ldl_p(buf);
a4193c8a 2966 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2967 l = 4;
2968 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2969 /* 16 bit write access */
c27004ec 2970 val = lduw_p(buf);
a4193c8a 2971 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2972 l = 2;
2973 } else {
1c213d19 2974 /* 8 bit write access */
c27004ec 2975 val = ldub_p(buf);
a4193c8a 2976 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2977 l = 1;
2978 }
2979 } else {
b448f2f3
FB
2980 unsigned long addr1;
2981 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2982 /* RAM case */
b448f2f3 2983 ptr = phys_ram_base + addr1;
13eb76e0 2984 memcpy(ptr, buf, l);
3a7d929e
FB
2985 if (!cpu_physical_memory_is_dirty(addr1)) {
2986 /* invalidate code */
2987 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2988 /* set dirty bit */
5fafdf24 2989 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2990 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2991 }
13eb76e0
FB
2992 }
2993 } else {
5fafdf24 2994 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2995 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2996 /* I/O case */
2997 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
2998 if (p)
2999 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
13eb76e0
FB
3000 if (l >= 4 && ((addr & 3) == 0)) {
3001 /* 32 bit read access */
a4193c8a 3002 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 3003 stl_p(buf, val);
13eb76e0
FB
3004 l = 4;
3005 } else if (l >= 2 && ((addr & 1) == 0)) {
3006 /* 16 bit read access */
a4193c8a 3007 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 3008 stw_p(buf, val);
13eb76e0
FB
3009 l = 2;
3010 } else {
1c213d19 3011 /* 8 bit read access */
a4193c8a 3012 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 3013 stb_p(buf, val);
13eb76e0
FB
3014 l = 1;
3015 }
3016 } else {
3017 /* RAM case */
5fafdf24 3018 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3019 (addr & ~TARGET_PAGE_MASK);
3020 memcpy(buf, ptr, l);
3021 }
3022 }
3023 len -= l;
3024 buf += l;
3025 addr += l;
3026 }
3027}
8df1cd07 3028
d0ecd2aa 3029/* used for ROM loading : can write in RAM and ROM */
5fafdf24 3030void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3031 const uint8_t *buf, int len)
3032{
3033 int l;
3034 uint8_t *ptr;
3035 target_phys_addr_t page;
3036 unsigned long pd;
3037 PhysPageDesc *p;
3b46e624 3038
d0ecd2aa
FB
3039 while (len > 0) {
3040 page = addr & TARGET_PAGE_MASK;
3041 l = (page + TARGET_PAGE_SIZE) - addr;
3042 if (l > len)
3043 l = len;
3044 p = phys_page_find(page >> TARGET_PAGE_BITS);
3045 if (!p) {
3046 pd = IO_MEM_UNASSIGNED;
3047 } else {
3048 pd = p->phys_offset;
3049 }
3b46e624 3050
d0ecd2aa 3051 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3052 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3053 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3054 /* do nothing */
3055 } else {
3056 unsigned long addr1;
3057 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3058 /* ROM/RAM case */
3059 ptr = phys_ram_base + addr1;
3060 memcpy(ptr, buf, l);
3061 }
3062 len -= l;
3063 buf += l;
3064 addr += l;
3065 }
3066}
3067
6d16c2f8
AL
3068typedef struct {
3069 void *buffer;
3070 target_phys_addr_t addr;
3071 target_phys_addr_t len;
3072} BounceBuffer;
3073
3074static BounceBuffer bounce;
3075
ba223c29
AL
3076typedef struct MapClient {
3077 void *opaque;
3078 void (*callback)(void *opaque);
3079 LIST_ENTRY(MapClient) link;
3080} MapClient;
3081
3082static LIST_HEAD(map_client_list, MapClient) map_client_list
3083 = LIST_HEAD_INITIALIZER(map_client_list);
3084
3085void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3086{
3087 MapClient *client = qemu_malloc(sizeof(*client));
3088
3089 client->opaque = opaque;
3090 client->callback = callback;
3091 LIST_INSERT_HEAD(&map_client_list, client, link);
3092 return client;
3093}
3094
3095void cpu_unregister_map_client(void *_client)
3096{
3097 MapClient *client = (MapClient *)_client;
3098
3099 LIST_REMOVE(client, link);
3100}
3101
3102static void cpu_notify_map_clients(void)
3103{
3104 MapClient *client;
3105
3106 while (!LIST_EMPTY(&map_client_list)) {
3107 client = LIST_FIRST(&map_client_list);
3108 client->callback(client->opaque);
3109 LIST_REMOVE(client, link);
3110 }
3111}
3112
6d16c2f8
AL
3113/* Map a physical memory region into a host virtual address.
3114 * May map a subset of the requested range, given by and returned in *plen.
3115 * May return NULL if resources needed to perform the mapping are exhausted.
3116 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3117 * Use cpu_register_map_client() to know when retrying the map operation is
3118 * likely to succeed.
6d16c2f8
AL
3119 */
3120void *cpu_physical_memory_map(target_phys_addr_t addr,
3121 target_phys_addr_t *plen,
3122 int is_write)
3123{
3124 target_phys_addr_t len = *plen;
3125 target_phys_addr_t done = 0;
3126 int l;
3127 uint8_t *ret = NULL;
3128 uint8_t *ptr;
3129 target_phys_addr_t page;
3130 unsigned long pd;
3131 PhysPageDesc *p;
3132 unsigned long addr1;
3133
3134 while (len > 0) {
3135 page = addr & TARGET_PAGE_MASK;
3136 l = (page + TARGET_PAGE_SIZE) - addr;
3137 if (l > len)
3138 l = len;
3139 p = phys_page_find(page >> TARGET_PAGE_BITS);
3140 if (!p) {
3141 pd = IO_MEM_UNASSIGNED;
3142 } else {
3143 pd = p->phys_offset;
3144 }
3145
3146 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3147 if (done || bounce.buffer) {
3148 break;
3149 }
3150 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3151 bounce.addr = addr;
3152 bounce.len = l;
3153 if (!is_write) {
3154 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3155 }
3156 ptr = bounce.buffer;
3157 } else {
3158 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3159 ptr = phys_ram_base + addr1;
3160 }
3161 if (!done) {
3162 ret = ptr;
3163 } else if (ret + done != ptr) {
3164 break;
3165 }
3166
3167 len -= l;
3168 addr += l;
3169 done += l;
3170 }
3171 *plen = done;
3172 return ret;
3173}
3174
3175/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3176 * Will also mark the memory as dirty if is_write == 1. access_len gives
3177 * the amount of memory that was actually read or written by the caller.
3178 */
3179void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3180 int is_write, target_phys_addr_t access_len)
3181{
3182 if (buffer != bounce.buffer) {
3183 if (is_write) {
3184 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3185 while (access_len) {
3186 unsigned l;
3187 l = TARGET_PAGE_SIZE;
3188 if (l > access_len)
3189 l = access_len;
3190 if (!cpu_physical_memory_is_dirty(addr1)) {
3191 /* invalidate code */
3192 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3193 /* set dirty bit */
3194 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3195 (0xff & ~CODE_DIRTY_FLAG);
3196 }
3197 addr1 += l;
3198 access_len -= l;
3199 }
3200 }
3201 return;
3202 }
3203 if (is_write) {
3204 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3205 }
3206 qemu_free(bounce.buffer);
3207 bounce.buffer = NULL;
ba223c29 3208 cpu_notify_map_clients();
6d16c2f8 3209}
d0ecd2aa 3210
8df1cd07
FB
3211/* warning: addr must be aligned */
3212uint32_t ldl_phys(target_phys_addr_t addr)
3213{
3214 int io_index;
3215 uint8_t *ptr;
3216 uint32_t val;
3217 unsigned long pd;
3218 PhysPageDesc *p;
3219
3220 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3221 if (!p) {
3222 pd = IO_MEM_UNASSIGNED;
3223 } else {
3224 pd = p->phys_offset;
3225 }
3b46e624 3226
5fafdf24 3227 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3228 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3229 /* I/O case */
3230 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3231 if (p)
3232 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3233 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3234 } else {
3235 /* RAM case */
5fafdf24 3236 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3237 (addr & ~TARGET_PAGE_MASK);
3238 val = ldl_p(ptr);
3239 }
3240 return val;
3241}
3242
84b7b8e7
FB
3243/* warning: addr must be aligned */
3244uint64_t ldq_phys(target_phys_addr_t addr)
3245{
3246 int io_index;
3247 uint8_t *ptr;
3248 uint64_t val;
3249 unsigned long pd;
3250 PhysPageDesc *p;
3251
3252 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3253 if (!p) {
3254 pd = IO_MEM_UNASSIGNED;
3255 } else {
3256 pd = p->phys_offset;
3257 }
3b46e624 3258
2a4188a3
FB
3259 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3260 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3261 /* I/O case */
3262 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3263 if (p)
3264 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
3265#ifdef TARGET_WORDS_BIGENDIAN
3266 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3267 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3268#else
3269 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3270 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3271#endif
3272 } else {
3273 /* RAM case */
5fafdf24 3274 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3275 (addr & ~TARGET_PAGE_MASK);
3276 val = ldq_p(ptr);
3277 }
3278 return val;
3279}
3280
aab33094
FB
3281/* XXX: optimize */
3282uint32_t ldub_phys(target_phys_addr_t addr)
3283{
3284 uint8_t val;
3285 cpu_physical_memory_read(addr, &val, 1);
3286 return val;
3287}
3288
3289/* XXX: optimize */
3290uint32_t lduw_phys(target_phys_addr_t addr)
3291{
3292 uint16_t val;
3293 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3294 return tswap16(val);
3295}
3296
8df1cd07
FB
3297/* warning: addr must be aligned. The ram page is not masked as dirty
3298 and the code inside is not invalidated. It is useful if the dirty
3299 bits are used to track modified PTEs */
3300void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3301{
3302 int io_index;
3303 uint8_t *ptr;
3304 unsigned long pd;
3305 PhysPageDesc *p;
3306
3307 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3308 if (!p) {
3309 pd = IO_MEM_UNASSIGNED;
3310 } else {
3311 pd = p->phys_offset;
3312 }
3b46e624 3313
3a7d929e 3314 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3315 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3316 if (p)
3317 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3318 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3319 } else {
74576198
AL
3320 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3321 ptr = phys_ram_base + addr1;
8df1cd07 3322 stl_p(ptr, val);
74576198
AL
3323
3324 if (unlikely(in_migration)) {
3325 if (!cpu_physical_memory_is_dirty(addr1)) {
3326 /* invalidate code */
3327 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3328 /* set dirty bit */
3329 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3330 (0xff & ~CODE_DIRTY_FLAG);
3331 }
3332 }
8df1cd07
FB
3333 }
3334}
3335
bc98a7ef
JM
3336void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3337{
3338 int io_index;
3339 uint8_t *ptr;
3340 unsigned long pd;
3341 PhysPageDesc *p;
3342
3343 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3344 if (!p) {
3345 pd = IO_MEM_UNASSIGNED;
3346 } else {
3347 pd = p->phys_offset;
3348 }
3b46e624 3349
bc98a7ef
JM
3350 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3351 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3352 if (p)
3353 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
3354#ifdef TARGET_WORDS_BIGENDIAN
3355 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3356 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3357#else
3358 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3359 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3360#endif
3361 } else {
5fafdf24 3362 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3363 (addr & ~TARGET_PAGE_MASK);
3364 stq_p(ptr, val);
3365 }
3366}
3367
8df1cd07 3368/* warning: addr must be aligned */
8df1cd07
FB
3369void stl_phys(target_phys_addr_t addr, uint32_t val)
3370{
3371 int io_index;
3372 uint8_t *ptr;
3373 unsigned long pd;
3374 PhysPageDesc *p;
3375
3376 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3377 if (!p) {
3378 pd = IO_MEM_UNASSIGNED;
3379 } else {
3380 pd = p->phys_offset;
3381 }
3b46e624 3382
3a7d929e 3383 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3384 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3385 if (p)
3386 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3387 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3388 } else {
3389 unsigned long addr1;
3390 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3391 /* RAM case */
3392 ptr = phys_ram_base + addr1;
3393 stl_p(ptr, val);
3a7d929e
FB
3394 if (!cpu_physical_memory_is_dirty(addr1)) {
3395 /* invalidate code */
3396 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3397 /* set dirty bit */
f23db169
FB
3398 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3399 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3400 }
8df1cd07
FB
3401 }
3402}
3403
aab33094
FB
3404/* XXX: optimize */
3405void stb_phys(target_phys_addr_t addr, uint32_t val)
3406{
3407 uint8_t v = val;
3408 cpu_physical_memory_write(addr, &v, 1);
3409}
3410
3411/* XXX: optimize */
3412void stw_phys(target_phys_addr_t addr, uint32_t val)
3413{
3414 uint16_t v = tswap16(val);
3415 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3416}
3417
3418/* XXX: optimize */
3419void stq_phys(target_phys_addr_t addr, uint64_t val)
3420{
3421 val = tswap64(val);
3422 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3423}
3424
13eb76e0
FB
3425#endif
3426
3427/* virtual memory access for debug */
5fafdf24 3428int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3429 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3430{
3431 int l;
9b3c35e0
JM
3432 target_phys_addr_t phys_addr;
3433 target_ulong page;
13eb76e0
FB
3434
3435 while (len > 0) {
3436 page = addr & TARGET_PAGE_MASK;
3437 phys_addr = cpu_get_phys_page_debug(env, page);
3438 /* if no physical page mapped, return an error */
3439 if (phys_addr == -1)
3440 return -1;
3441 l = (page + TARGET_PAGE_SIZE) - addr;
3442 if (l > len)
3443 l = len;
5fafdf24 3444 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3445 buf, l, is_write);
13eb76e0
FB
3446 len -= l;
3447 buf += l;
3448 addr += l;
3449 }
3450 return 0;
3451}
3452
2e70f6ef
PB
3453/* in deterministic execution mode, instructions doing device I/Os
3454 must be at the end of the TB */
3455void cpu_io_recompile(CPUState *env, void *retaddr)
3456{
3457 TranslationBlock *tb;
3458 uint32_t n, cflags;
3459 target_ulong pc, cs_base;
3460 uint64_t flags;
3461
3462 tb = tb_find_pc((unsigned long)retaddr);
3463 if (!tb) {
3464 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3465 retaddr);
3466 }
3467 n = env->icount_decr.u16.low + tb->icount;
3468 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3469 /* Calculate how many instructions had been executed before the fault
bf20dc07 3470 occurred. */
2e70f6ef
PB
3471 n = n - env->icount_decr.u16.low;
3472 /* Generate a new TB ending on the I/O insn. */
3473 n++;
3474 /* On MIPS and SH, delay slot instructions can only be restarted if
3475 they were already the first instruction in the TB. If this is not
bf20dc07 3476 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3477 branch. */
3478#if defined(TARGET_MIPS)
3479 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3480 env->active_tc.PC -= 4;
3481 env->icount_decr.u16.low++;
3482 env->hflags &= ~MIPS_HFLAG_BMASK;
3483 }
3484#elif defined(TARGET_SH4)
3485 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3486 && n > 1) {
3487 env->pc -= 2;
3488 env->icount_decr.u16.low++;
3489 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3490 }
3491#endif
3492 /* This should never happen. */
3493 if (n > CF_COUNT_MASK)
3494 cpu_abort(env, "TB too big during recompile");
3495
3496 cflags = n | CF_LAST_IO;
3497 pc = tb->pc;
3498 cs_base = tb->cs_base;
3499 flags = tb->flags;
3500 tb_phys_invalidate(tb, -1);
3501 /* FIXME: In theory this could raise an exception. In practice
3502 we have already translated the block once so it's probably ok. */
3503 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3504 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3505 the first in the TB) then we end up generating a whole new TB and
3506 repeating the fault, which is horribly inefficient.
3507 Better would be to execute just this insn uncached, or generate a
3508 second new TB. */
3509 cpu_resume_from_signal(env, NULL);
3510}
3511
e3db7226
FB
3512void dump_exec_info(FILE *f,
3513 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3514{
3515 int i, target_code_size, max_target_code_size;
3516 int direct_jmp_count, direct_jmp2_count, cross_page;
3517 TranslationBlock *tb;
3b46e624 3518
e3db7226
FB
3519 target_code_size = 0;
3520 max_target_code_size = 0;
3521 cross_page = 0;
3522 direct_jmp_count = 0;
3523 direct_jmp2_count = 0;
3524 for(i = 0; i < nb_tbs; i++) {
3525 tb = &tbs[i];
3526 target_code_size += tb->size;
3527 if (tb->size > max_target_code_size)
3528 max_target_code_size = tb->size;
3529 if (tb->page_addr[1] != -1)
3530 cross_page++;
3531 if (tb->tb_next_offset[0] != 0xffff) {
3532 direct_jmp_count++;
3533 if (tb->tb_next_offset[1] != 0xffff) {
3534 direct_jmp2_count++;
3535 }
3536 }
3537 }
3538 /* XXX: avoid using doubles ? */
57fec1fe 3539 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3540 cpu_fprintf(f, "gen code size %ld/%ld\n",
3541 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3542 cpu_fprintf(f, "TB count %d/%d\n",
3543 nb_tbs, code_gen_max_blocks);
5fafdf24 3544 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3545 nb_tbs ? target_code_size / nb_tbs : 0,
3546 max_target_code_size);
5fafdf24 3547 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3548 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3549 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3550 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3551 cross_page,
e3db7226
FB
3552 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3553 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3554 direct_jmp_count,
e3db7226
FB
3555 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3556 direct_jmp2_count,
3557 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3558 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3559 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3560 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3561 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3562 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3563}
3564
5fafdf24 3565#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3566
3567#define MMUSUFFIX _cmmu
3568#define GETPC() NULL
3569#define env cpu_single_env
b769d8fe 3570#define SOFTMMU_CODE_ACCESS
61382a50
FB
3571
3572#define SHIFT 0
3573#include "softmmu_template.h"
3574
3575#define SHIFT 1
3576#include "softmmu_template.h"
3577
3578#define SHIFT 2
3579#include "softmmu_template.h"
3580
3581#define SHIFT 3
3582#include "softmmu_template.h"
3583
3584#undef env
3585
3586#endif