]> git.proxmox.com Git - qemu.git/blame - exec.c
Fix texinfo syntax errors.
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
54936004 19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
b3c7724c 40#include "hw/hw.h"
74576198 41#include "osdep.h"
7ba1e619 42#include "kvm.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
45#endif
54936004 46
fd6ce8f6 47//#define DEBUG_TB_INVALIDATE
66e85a21 48//#define DEBUG_FLUSH
9fa3e853 49//#define DEBUG_TLB
67d3b957 50//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
51
52/* make various TB consistency checks */
5fafdf24
TS
53//#define DEBUG_TB_CHECK
54//#define DEBUG_TLB_CHECK
fd6ce8f6 55
1196be37 56//#define DEBUG_IOPORT
db7b5426 57//#define DEBUG_SUBPAGE
1196be37 58
99773bd4
PB
59#if !defined(CONFIG_USER_ONLY)
60/* TB consistency checks only implemented for usermode emulation. */
61#undef DEBUG_TB_CHECK
62#endif
63
9fa3e853
FB
64#define SMC_BITMAP_USE_THRESHOLD 10
65
66#define MMAP_AREA_START 0x00000000
67#define MMAP_AREA_END 0xa8000000
fd6ce8f6 68
108c49b8
FB
69#if defined(TARGET_SPARC64)
70#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
71#elif defined(TARGET_SPARC)
72#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
73#elif defined(TARGET_ALPHA)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
76#elif defined(TARGET_PPC64)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
78#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 42
80#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
82#else
83/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84#define TARGET_PHYS_ADDR_SPACE_BITS 32
85#endif
86
bdaf78e0 87static TranslationBlock *tbs;
26a5f13b 88int code_gen_max_blocks;
9fa3e853 89TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 90static int nb_tbs;
eb51d102
FB
91/* any access to the tbs or the page table must use this lock */
92spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 93
141ac468
BS
94#if defined(__arm__) || defined(__sparc_v9__)
95/* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
97 section close to code segment. */
98#define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101#else
102#define code_gen_section \
103 __attribute__((aligned (32)))
104#endif
105
106uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
107static uint8_t *code_gen_buffer;
108static unsigned long code_gen_buffer_size;
26a5f13b 109/* threshold to flush the translated code buffer */
bdaf78e0 110static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
111uint8_t *code_gen_ptr;
112
e2eef170 113#if !defined(CONFIG_USER_ONLY)
00f82b8a 114ram_addr_t phys_ram_size;
9fa3e853
FB
115int phys_ram_fd;
116uint8_t *phys_ram_base;
1ccde1cb 117uint8_t *phys_ram_dirty;
74576198 118static int in_migration;
e9a1ab19 119static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 120#endif
9fa3e853 121
6a00d601
FB
122CPUState *first_cpu;
123/* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
5fafdf24 125CPUState *cpu_single_env;
2e70f6ef 126/* 0 = Do not count executed instructions.
bf20dc07 127 1 = Precise instruction counting.
2e70f6ef
PB
128 2 = Adaptive rate instruction counting. */
129int use_icount = 0;
130/* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132int64_t qemu_icount;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
92e873b9 146typedef struct PhysPageDesc {
0f459d16 147 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 148 ram_addr_t phys_offset;
8da3ff18 149 ram_addr_t region_offset;
92e873b9
FB
150} PhysPageDesc;
151
54936004 152#define L2_BITS 10
bedb69ea
JM
153#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154/* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
157 */
158#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159#else
03875444 160#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 161#endif
54936004
FB
162
163#define L1_SIZE (1 << L1_BITS)
164#define L2_SIZE (1 << L2_BITS)
165
83fb7adf
FB
166unsigned long qemu_real_host_page_size;
167unsigned long qemu_host_page_bits;
168unsigned long qemu_host_page_size;
169unsigned long qemu_host_page_mask;
54936004 170
92e873b9 171/* XXX: for system emulation, it could just be an array */
54936004 172static PageDesc *l1_map[L1_SIZE];
bdaf78e0 173static PhysPageDesc **l1_phys_map;
54936004 174
e2eef170
PB
175#if !defined(CONFIG_USER_ONLY)
176static void io_mem_init(void);
177
33417e70 178/* io memory support */
33417e70
FB
179CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 181void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 182static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
183static int io_mem_watch;
184#endif
33417e70 185
34865134 186/* log support */
d9b630fd 187static const char *logfilename = "/tmp/qemu.log";
34865134
FB
188FILE *logfile;
189int loglevel;
e735b91c 190static int log_append = 0;
34865134 191
e3db7226
FB
192/* statistics */
193static int tlb_flush_count;
194static int tb_flush_count;
195static int tb_phys_invalidate_count;
196
db7b5426
BS
197#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198typedef struct subpage_t {
199 target_phys_addr_t base;
3ee89922
BS
200 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202 void *opaque[TARGET_PAGE_SIZE][2][4];
8da3ff18 203 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
204} subpage_t;
205
7cb69cae
FB
206#ifdef _WIN32
207static void map_exec(void *addr, long size)
208{
209 DWORD old_protect;
210 VirtualProtect(addr, size,
211 PAGE_EXECUTE_READWRITE, &old_protect);
212
213}
214#else
215static void map_exec(void *addr, long size)
216{
4369415f 217 unsigned long start, end, page_size;
7cb69cae 218
4369415f 219 page_size = getpagesize();
7cb69cae 220 start = (unsigned long)addr;
4369415f 221 start &= ~(page_size - 1);
7cb69cae
FB
222
223 end = (unsigned long)addr + size;
4369415f
FB
224 end += page_size - 1;
225 end &= ~(page_size - 1);
7cb69cae
FB
226
227 mprotect((void *)start, end - start,
228 PROT_READ | PROT_WRITE | PROT_EXEC);
229}
230#endif
231
b346ff46 232static void page_init(void)
54936004 233{
83fb7adf 234 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 235 TARGET_PAGE_SIZE */
c2b48b69
AL
236#ifdef _WIN32
237 {
238 SYSTEM_INFO system_info;
239
240 GetSystemInfo(&system_info);
241 qemu_real_host_page_size = system_info.dwPageSize;
242 }
243#else
244 qemu_real_host_page_size = getpagesize();
245#endif
83fb7adf
FB
246 if (qemu_host_page_size == 0)
247 qemu_host_page_size = qemu_real_host_page_size;
248 if (qemu_host_page_size < TARGET_PAGE_SIZE)
249 qemu_host_page_size = TARGET_PAGE_SIZE;
250 qemu_host_page_bits = 0;
251 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252 qemu_host_page_bits++;
253 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
254 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
256
257#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
258 {
259 long long startaddr, endaddr;
260 FILE *f;
261 int n;
262
c8a706fe 263 mmap_lock();
0776590d 264 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
265 f = fopen("/proc/self/maps", "r");
266 if (f) {
267 do {
268 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269 if (n == 2) {
e0b8d65a
BS
270 startaddr = MIN(startaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 endaddr = MIN(endaddr,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 274 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
275 TARGET_PAGE_ALIGN(endaddr),
276 PAGE_RESERVED);
277 }
278 } while (!feof(f));
279 fclose(f);
280 }
c8a706fe 281 mmap_unlock();
50a9569b
AZ
282 }
283#endif
54936004
FB
284}
285
434929bf 286static inline PageDesc **page_l1_map(target_ulong index)
54936004 287{
17e2377a
PB
288#if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
d8173e0f 291 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
292 return NULL;
293#endif
434929bf
AL
294 return &l1_map[index >> L2_BITS];
295}
296
297static inline PageDesc *page_find_alloc(target_ulong index)
298{
299 PageDesc **lp, *p;
300 lp = page_l1_map(index);
301 if (!lp)
302 return NULL;
303
54936004
FB
304 p = *lp;
305 if (!p) {
306 /* allocate if not found */
17e2377a 307#if defined(CONFIG_USER_ONLY)
17e2377a
PB
308 size_t len = sizeof(PageDesc) * L2_SIZE;
309 /* Don't use qemu_malloc because it may recurse. */
310 p = mmap(0, len, PROT_READ | PROT_WRITE,
311 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 312 *lp = p;
fb1c2cd7
AJ
313 if (h2g_valid(p)) {
314 unsigned long addr = h2g(p);
17e2377a
PB
315 page_set_flags(addr & TARGET_PAGE_MASK,
316 TARGET_PAGE_ALIGN(addr + len),
317 PAGE_RESERVED);
318 }
319#else
320 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321 *lp = p;
322#endif
54936004
FB
323 }
324 return p + (index & (L2_SIZE - 1));
325}
326
00f82b8a 327static inline PageDesc *page_find(target_ulong index)
54936004 328{
434929bf
AL
329 PageDesc **lp, *p;
330 lp = page_l1_map(index);
331 if (!lp)
332 return NULL;
54936004 333
434929bf 334 p = *lp;
54936004
FB
335 if (!p)
336 return 0;
fd6ce8f6
FB
337 return p + (index & (L2_SIZE - 1));
338}
339
108c49b8 340static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 341{
108c49b8 342 void **lp, **p;
e3f4e2a4 343 PhysPageDesc *pd;
92e873b9 344
108c49b8
FB
345 p = (void **)l1_phys_map;
346#if TARGET_PHYS_ADDR_SPACE_BITS > 32
347
348#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350#endif
351 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
352 p = *lp;
353 if (!p) {
354 /* allocate if not found */
108c49b8
FB
355 if (!alloc)
356 return NULL;
357 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358 memset(p, 0, sizeof(void *) * L1_SIZE);
359 *lp = p;
360 }
361#endif
362 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
363 pd = *lp;
364 if (!pd) {
365 int i;
108c49b8
FB
366 /* allocate if not found */
367 if (!alloc)
368 return NULL;
e3f4e2a4
PB
369 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370 *lp = pd;
67c4d23c 371 for (i = 0; i < L2_SIZE; i++) {
e3f4e2a4 372 pd[i].phys_offset = IO_MEM_UNASSIGNED;
67c4d23c
PB
373 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
374 }
92e873b9 375 }
e3f4e2a4 376 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
377}
378
108c49b8 379static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 380{
108c49b8 381 return phys_page_find_alloc(index, 0);
92e873b9
FB
382}
383
9fa3e853 384#if !defined(CONFIG_USER_ONLY)
6a00d601 385static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 386static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 387 target_ulong vaddr);
c8a706fe
PB
388#define mmap_lock() do { } while(0)
389#define mmap_unlock() do { } while(0)
9fa3e853 390#endif
fd6ce8f6 391
4369415f
FB
392#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
393
394#if defined(CONFIG_USER_ONLY)
395/* Currently it is not recommanded to allocate big chunks of data in
396 user mode. It will change when a dedicated libc will be used */
397#define USE_STATIC_CODE_GEN_BUFFER
398#endif
399
400#ifdef USE_STATIC_CODE_GEN_BUFFER
401static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
402#endif
403
8fcd3692 404static void code_gen_alloc(unsigned long tb_size)
26a5f13b 405{
4369415f
FB
406#ifdef USE_STATIC_CODE_GEN_BUFFER
407 code_gen_buffer = static_code_gen_buffer;
408 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
409 map_exec(code_gen_buffer, code_gen_buffer_size);
410#else
26a5f13b
FB
411 code_gen_buffer_size = tb_size;
412 if (code_gen_buffer_size == 0) {
4369415f
FB
413#if defined(CONFIG_USER_ONLY)
414 /* in user mode, phys_ram_size is not meaningful */
415 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
416#else
26a5f13b 417 /* XXX: needs ajustments */
174a9a1f 418 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
4369415f 419#endif
26a5f13b
FB
420 }
421 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
422 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
423 /* The code gen buffer location may have constraints depending on
424 the host cpu and OS */
425#if defined(__linux__)
426 {
427 int flags;
141ac468
BS
428 void *start = NULL;
429
26a5f13b
FB
430 flags = MAP_PRIVATE | MAP_ANONYMOUS;
431#if defined(__x86_64__)
432 flags |= MAP_32BIT;
433 /* Cannot map more than that */
434 if (code_gen_buffer_size > (800 * 1024 * 1024))
435 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
436#elif defined(__sparc_v9__)
437 // Map the buffer below 2G, so we can use direct calls and branches
438 flags |= MAP_FIXED;
439 start = (void *) 0x60000000UL;
440 if (code_gen_buffer_size > (512 * 1024 * 1024))
441 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 442#elif defined(__arm__)
63d41246 443 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
444 flags |= MAP_FIXED;
445 start = (void *) 0x01000000UL;
446 if (code_gen_buffer_size > 16 * 1024 * 1024)
447 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 448#endif
141ac468
BS
449 code_gen_buffer = mmap(start, code_gen_buffer_size,
450 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
451 flags, -1, 0);
452 if (code_gen_buffer == MAP_FAILED) {
453 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
454 exit(1);
455 }
456 }
c5e97233 457#elif defined(__FreeBSD__) || defined(__DragonFly__)
06e67a82
AL
458 {
459 int flags;
460 void *addr = NULL;
461 flags = MAP_PRIVATE | MAP_ANONYMOUS;
462#if defined(__x86_64__)
463 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
464 * 0x40000000 is free */
465 flags |= MAP_FIXED;
466 addr = (void *)0x40000000;
467 /* Cannot map more than that */
468 if (code_gen_buffer_size > (800 * 1024 * 1024))
469 code_gen_buffer_size = (800 * 1024 * 1024);
470#endif
471 code_gen_buffer = mmap(addr, code_gen_buffer_size,
472 PROT_WRITE | PROT_READ | PROT_EXEC,
473 flags, -1, 0);
474 if (code_gen_buffer == MAP_FAILED) {
475 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
476 exit(1);
477 }
478 }
26a5f13b
FB
479#else
480 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
481 map_exec(code_gen_buffer, code_gen_buffer_size);
482#endif
4369415f 483#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
484 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
485 code_gen_buffer_max_size = code_gen_buffer_size -
486 code_gen_max_block_size();
487 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
488 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
489}
490
491/* Must be called before using the QEMU cpus. 'tb_size' is the size
492 (in bytes) allocated to the translation buffer. Zero means default
493 size. */
494void cpu_exec_init_all(unsigned long tb_size)
495{
26a5f13b
FB
496 cpu_gen_init();
497 code_gen_alloc(tb_size);
498 code_gen_ptr = code_gen_buffer;
4369415f 499 page_init();
e2eef170 500#if !defined(CONFIG_USER_ONLY)
26a5f13b 501 io_mem_init();
e2eef170 502#endif
26a5f13b
FB
503}
504
9656f324
PB
505#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
506
507#define CPU_COMMON_SAVE_VERSION 1
508
509static void cpu_common_save(QEMUFile *f, void *opaque)
510{
511 CPUState *env = opaque;
512
513 qemu_put_be32s(f, &env->halted);
514 qemu_put_be32s(f, &env->interrupt_request);
515}
516
517static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
518{
519 CPUState *env = opaque;
520
521 if (version_id != CPU_COMMON_SAVE_VERSION)
522 return -EINVAL;
523
524 qemu_get_be32s(f, &env->halted);
75f482ae 525 qemu_get_be32s(f, &env->interrupt_request);
e47ce3f2 526 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
9656f324
PB
527 tlb_flush(env, 1);
528
529 return 0;
530}
531#endif
532
6a00d601 533void cpu_exec_init(CPUState *env)
fd6ce8f6 534{
6a00d601
FB
535 CPUState **penv;
536 int cpu_index;
537
c2764719
PB
538#if defined(CONFIG_USER_ONLY)
539 cpu_list_lock();
540#endif
6a00d601
FB
541 env->next_cpu = NULL;
542 penv = &first_cpu;
543 cpu_index = 0;
544 while (*penv != NULL) {
545 penv = (CPUState **)&(*penv)->next_cpu;
546 cpu_index++;
547 }
548 env->cpu_index = cpu_index;
c0ce998e
AL
549 TAILQ_INIT(&env->breakpoints);
550 TAILQ_INIT(&env->watchpoints);
6a00d601 551 *penv = env;
c2764719
PB
552#if defined(CONFIG_USER_ONLY)
553 cpu_list_unlock();
554#endif
b3c7724c 555#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
556 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
557 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
558 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
559 cpu_save, cpu_load, env);
560#endif
fd6ce8f6
FB
561}
562
9fa3e853
FB
563static inline void invalidate_page_bitmap(PageDesc *p)
564{
565 if (p->code_bitmap) {
59817ccb 566 qemu_free(p->code_bitmap);
9fa3e853
FB
567 p->code_bitmap = NULL;
568 }
569 p->code_write_count = 0;
570}
571
fd6ce8f6
FB
572/* set to NULL all the 'first_tb' fields in all PageDescs */
573static void page_flush_tb(void)
574{
575 int i, j;
576 PageDesc *p;
577
578 for(i = 0; i < L1_SIZE; i++) {
579 p = l1_map[i];
580 if (p) {
9fa3e853
FB
581 for(j = 0; j < L2_SIZE; j++) {
582 p->first_tb = NULL;
583 invalidate_page_bitmap(p);
584 p++;
585 }
fd6ce8f6
FB
586 }
587 }
588}
589
590/* flush all the translation blocks */
d4e8164f 591/* XXX: tb_flush is currently not thread safe */
6a00d601 592void tb_flush(CPUState *env1)
fd6ce8f6 593{
6a00d601 594 CPUState *env;
0124311e 595#if defined(DEBUG_FLUSH)
ab3d1727
BS
596 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
597 (unsigned long)(code_gen_ptr - code_gen_buffer),
598 nb_tbs, nb_tbs > 0 ?
599 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 600#endif
26a5f13b 601 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
602 cpu_abort(env1, "Internal error: code buffer overflow\n");
603
fd6ce8f6 604 nb_tbs = 0;
3b46e624 605
6a00d601
FB
606 for(env = first_cpu; env != NULL; env = env->next_cpu) {
607 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
608 }
9fa3e853 609
8a8a608f 610 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 611 page_flush_tb();
9fa3e853 612
fd6ce8f6 613 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
614 /* XXX: flush processor icache at this point if cache flush is
615 expensive */
e3db7226 616 tb_flush_count++;
fd6ce8f6
FB
617}
618
619#ifdef DEBUG_TB_CHECK
620
bc98a7ef 621static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
622{
623 TranslationBlock *tb;
624 int i;
625 address &= TARGET_PAGE_MASK;
99773bd4
PB
626 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
627 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
628 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
629 address >= tb->pc + tb->size)) {
630 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 631 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
632 }
633 }
634 }
635}
636
637/* verify that all the pages have correct rights for code */
638static void tb_page_check(void)
639{
640 TranslationBlock *tb;
641 int i, flags1, flags2;
3b46e624 642
99773bd4
PB
643 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
644 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
645 flags1 = page_get_flags(tb->pc);
646 flags2 = page_get_flags(tb->pc + tb->size - 1);
647 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
648 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 649 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
650 }
651 }
652 }
653}
654
bdaf78e0 655static void tb_jmp_check(TranslationBlock *tb)
d4e8164f
FB
656{
657 TranslationBlock *tb1;
658 unsigned int n1;
659
660 /* suppress any remaining jumps to this TB */
661 tb1 = tb->jmp_first;
662 for(;;) {
663 n1 = (long)tb1 & 3;
664 tb1 = (TranslationBlock *)((long)tb1 & ~3);
665 if (n1 == 2)
666 break;
667 tb1 = tb1->jmp_next[n1];
668 }
669 /* check end of list */
670 if (tb1 != tb) {
671 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
672 }
673}
674
fd6ce8f6
FB
675#endif
676
677/* invalidate one TB */
678static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
679 int next_offset)
680{
681 TranslationBlock *tb1;
682 for(;;) {
683 tb1 = *ptb;
684 if (tb1 == tb) {
685 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
686 break;
687 }
688 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
689 }
690}
691
9fa3e853
FB
692static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
693{
694 TranslationBlock *tb1;
695 unsigned int n1;
696
697 for(;;) {
698 tb1 = *ptb;
699 n1 = (long)tb1 & 3;
700 tb1 = (TranslationBlock *)((long)tb1 & ~3);
701 if (tb1 == tb) {
702 *ptb = tb1->page_next[n1];
703 break;
704 }
705 ptb = &tb1->page_next[n1];
706 }
707}
708
d4e8164f
FB
709static inline void tb_jmp_remove(TranslationBlock *tb, int n)
710{
711 TranslationBlock *tb1, **ptb;
712 unsigned int n1;
713
714 ptb = &tb->jmp_next[n];
715 tb1 = *ptb;
716 if (tb1) {
717 /* find tb(n) in circular list */
718 for(;;) {
719 tb1 = *ptb;
720 n1 = (long)tb1 & 3;
721 tb1 = (TranslationBlock *)((long)tb1 & ~3);
722 if (n1 == n && tb1 == tb)
723 break;
724 if (n1 == 2) {
725 ptb = &tb1->jmp_first;
726 } else {
727 ptb = &tb1->jmp_next[n1];
728 }
729 }
730 /* now we can suppress tb(n) from the list */
731 *ptb = tb->jmp_next[n];
732
733 tb->jmp_next[n] = NULL;
734 }
735}
736
737/* reset the jump entry 'n' of a TB so that it is not chained to
738 another TB */
739static inline void tb_reset_jump(TranslationBlock *tb, int n)
740{
741 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
742}
743
2e70f6ef 744void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 745{
6a00d601 746 CPUState *env;
8a40a180 747 PageDesc *p;
d4e8164f 748 unsigned int h, n1;
00f82b8a 749 target_phys_addr_t phys_pc;
8a40a180 750 TranslationBlock *tb1, *tb2;
3b46e624 751
8a40a180
FB
752 /* remove the TB from the hash list */
753 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
754 h = tb_phys_hash_func(phys_pc);
5fafdf24 755 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
756 offsetof(TranslationBlock, phys_hash_next));
757
758 /* remove the TB from the page list */
759 if (tb->page_addr[0] != page_addr) {
760 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
761 tb_page_remove(&p->first_tb, tb);
762 invalidate_page_bitmap(p);
763 }
764 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
765 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
766 tb_page_remove(&p->first_tb, tb);
767 invalidate_page_bitmap(p);
768 }
769
36bdbe54 770 tb_invalidated_flag = 1;
59817ccb 771
fd6ce8f6 772 /* remove the TB from the hash list */
8a40a180 773 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
774 for(env = first_cpu; env != NULL; env = env->next_cpu) {
775 if (env->tb_jmp_cache[h] == tb)
776 env->tb_jmp_cache[h] = NULL;
777 }
d4e8164f
FB
778
779 /* suppress this TB from the two jump lists */
780 tb_jmp_remove(tb, 0);
781 tb_jmp_remove(tb, 1);
782
783 /* suppress any remaining jumps to this TB */
784 tb1 = tb->jmp_first;
785 for(;;) {
786 n1 = (long)tb1 & 3;
787 if (n1 == 2)
788 break;
789 tb1 = (TranslationBlock *)((long)tb1 & ~3);
790 tb2 = tb1->jmp_next[n1];
791 tb_reset_jump(tb1, n1);
792 tb1->jmp_next[n1] = NULL;
793 tb1 = tb2;
794 }
795 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 796
e3db7226 797 tb_phys_invalidate_count++;
9fa3e853
FB
798}
799
800static inline void set_bits(uint8_t *tab, int start, int len)
801{
802 int end, mask, end1;
803
804 end = start + len;
805 tab += start >> 3;
806 mask = 0xff << (start & 7);
807 if ((start & ~7) == (end & ~7)) {
808 if (start < end) {
809 mask &= ~(0xff << (end & 7));
810 *tab |= mask;
811 }
812 } else {
813 *tab++ |= mask;
814 start = (start + 8) & ~7;
815 end1 = end & ~7;
816 while (start < end1) {
817 *tab++ = 0xff;
818 start += 8;
819 }
820 if (start < end) {
821 mask = ~(0xff << (end & 7));
822 *tab |= mask;
823 }
824 }
825}
826
827static void build_page_bitmap(PageDesc *p)
828{
829 int n, tb_start, tb_end;
830 TranslationBlock *tb;
3b46e624 831
b2a7081a 832 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
833
834 tb = p->first_tb;
835 while (tb != NULL) {
836 n = (long)tb & 3;
837 tb = (TranslationBlock *)((long)tb & ~3);
838 /* NOTE: this is subtle as a TB may span two physical pages */
839 if (n == 0) {
840 /* NOTE: tb_end may be after the end of the page, but
841 it is not a problem */
842 tb_start = tb->pc & ~TARGET_PAGE_MASK;
843 tb_end = tb_start + tb->size;
844 if (tb_end > TARGET_PAGE_SIZE)
845 tb_end = TARGET_PAGE_SIZE;
846 } else {
847 tb_start = 0;
848 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
849 }
850 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
851 tb = tb->page_next[n];
852 }
853}
854
2e70f6ef
PB
855TranslationBlock *tb_gen_code(CPUState *env,
856 target_ulong pc, target_ulong cs_base,
857 int flags, int cflags)
d720b93d
FB
858{
859 TranslationBlock *tb;
860 uint8_t *tc_ptr;
861 target_ulong phys_pc, phys_page2, virt_page2;
862 int code_gen_size;
863
c27004ec
FB
864 phys_pc = get_phys_addr_code(env, pc);
865 tb = tb_alloc(pc);
d720b93d
FB
866 if (!tb) {
867 /* flush must be done */
868 tb_flush(env);
869 /* cannot fail at this point */
c27004ec 870 tb = tb_alloc(pc);
2e70f6ef
PB
871 /* Don't forget to invalidate previous TB info. */
872 tb_invalidated_flag = 1;
d720b93d
FB
873 }
874 tc_ptr = code_gen_ptr;
875 tb->tc_ptr = tc_ptr;
876 tb->cs_base = cs_base;
877 tb->flags = flags;
878 tb->cflags = cflags;
d07bde88 879 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 880 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 881
d720b93d 882 /* check next page if needed */
c27004ec 883 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 884 phys_page2 = -1;
c27004ec 885 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
886 phys_page2 = get_phys_addr_code(env, virt_page2);
887 }
888 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 889 return tb;
d720b93d 890}
3b46e624 891
9fa3e853
FB
892/* invalidate all TBs which intersect with the target physical page
893 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
894 the same physical page. 'is_cpu_write_access' should be true if called
895 from a real cpu write access: the virtual CPU will exit the current
896 TB if code is modified inside this TB. */
00f82b8a 897void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
898 int is_cpu_write_access)
899{
6b917547 900 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 901 CPUState *env = cpu_single_env;
9fa3e853 902 target_ulong tb_start, tb_end;
6b917547
AL
903 PageDesc *p;
904 int n;
905#ifdef TARGET_HAS_PRECISE_SMC
906 int current_tb_not_found = is_cpu_write_access;
907 TranslationBlock *current_tb = NULL;
908 int current_tb_modified = 0;
909 target_ulong current_pc = 0;
910 target_ulong current_cs_base = 0;
911 int current_flags = 0;
912#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
913
914 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 915 if (!p)
9fa3e853 916 return;
5fafdf24 917 if (!p->code_bitmap &&
d720b93d
FB
918 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
919 is_cpu_write_access) {
9fa3e853
FB
920 /* build code bitmap */
921 build_page_bitmap(p);
922 }
923
924 /* we remove all the TBs in the range [start, end[ */
925 /* XXX: see if in some cases it could be faster to invalidate all the code */
926 tb = p->first_tb;
927 while (tb != NULL) {
928 n = (long)tb & 3;
929 tb = (TranslationBlock *)((long)tb & ~3);
930 tb_next = tb->page_next[n];
931 /* NOTE: this is subtle as a TB may span two physical pages */
932 if (n == 0) {
933 /* NOTE: tb_end may be after the end of the page, but
934 it is not a problem */
935 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
936 tb_end = tb_start + tb->size;
937 } else {
938 tb_start = tb->page_addr[1];
939 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
940 }
941 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
942#ifdef TARGET_HAS_PRECISE_SMC
943 if (current_tb_not_found) {
944 current_tb_not_found = 0;
945 current_tb = NULL;
2e70f6ef 946 if (env->mem_io_pc) {
d720b93d 947 /* now we have a real cpu fault */
2e70f6ef 948 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
949 }
950 }
951 if (current_tb == tb &&
2e70f6ef 952 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
953 /* If we are modifying the current TB, we must stop
954 its execution. We could be more precise by checking
955 that the modification is after the current PC, but it
956 would require a specialized function to partially
957 restore the CPU state */
3b46e624 958
d720b93d 959 current_tb_modified = 1;
5fafdf24 960 cpu_restore_state(current_tb, env,
2e70f6ef 961 env->mem_io_pc, NULL);
6b917547
AL
962 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
963 &current_flags);
d720b93d
FB
964 }
965#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
966 /* we need to do that to handle the case where a signal
967 occurs while doing tb_phys_invalidate() */
968 saved_tb = NULL;
969 if (env) {
970 saved_tb = env->current_tb;
971 env->current_tb = NULL;
972 }
9fa3e853 973 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
974 if (env) {
975 env->current_tb = saved_tb;
976 if (env->interrupt_request && env->current_tb)
977 cpu_interrupt(env, env->interrupt_request);
978 }
9fa3e853
FB
979 }
980 tb = tb_next;
981 }
982#if !defined(CONFIG_USER_ONLY)
983 /* if no code remaining, no need to continue to use slow writes */
984 if (!p->first_tb) {
985 invalidate_page_bitmap(p);
d720b93d 986 if (is_cpu_write_access) {
2e70f6ef 987 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
988 }
989 }
990#endif
991#ifdef TARGET_HAS_PRECISE_SMC
992 if (current_tb_modified) {
993 /* we generate a block containing just the instruction
994 modifying the memory. It will ensure that it cannot modify
995 itself */
ea1c1802 996 env->current_tb = NULL;
2e70f6ef 997 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 998 cpu_resume_from_signal(env, NULL);
9fa3e853 999 }
fd6ce8f6 1000#endif
9fa3e853 1001}
fd6ce8f6 1002
9fa3e853 1003/* len must be <= 8 and start must be a multiple of len */
00f82b8a 1004static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
1005{
1006 PageDesc *p;
1007 int offset, b;
59817ccb 1008#if 0
a4193c8a 1009 if (1) {
93fcfe39
AL
1010 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1011 cpu_single_env->mem_io_vaddr, len,
1012 cpu_single_env->eip,
1013 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1014 }
1015#endif
9fa3e853 1016 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1017 if (!p)
9fa3e853
FB
1018 return;
1019 if (p->code_bitmap) {
1020 offset = start & ~TARGET_PAGE_MASK;
1021 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1022 if (b & ((1 << len) - 1))
1023 goto do_invalidate;
1024 } else {
1025 do_invalidate:
d720b93d 1026 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1027 }
1028}
1029
9fa3e853 1030#if !defined(CONFIG_SOFTMMU)
00f82b8a 1031static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1032 unsigned long pc, void *puc)
9fa3e853 1033{
6b917547 1034 TranslationBlock *tb;
9fa3e853 1035 PageDesc *p;
6b917547 1036 int n;
d720b93d 1037#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1038 TranslationBlock *current_tb = NULL;
d720b93d 1039 CPUState *env = cpu_single_env;
6b917547
AL
1040 int current_tb_modified = 0;
1041 target_ulong current_pc = 0;
1042 target_ulong current_cs_base = 0;
1043 int current_flags = 0;
d720b93d 1044#endif
9fa3e853
FB
1045
1046 addr &= TARGET_PAGE_MASK;
1047 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1048 if (!p)
9fa3e853
FB
1049 return;
1050 tb = p->first_tb;
d720b93d
FB
1051#ifdef TARGET_HAS_PRECISE_SMC
1052 if (tb && pc != 0) {
1053 current_tb = tb_find_pc(pc);
1054 }
1055#endif
9fa3e853
FB
1056 while (tb != NULL) {
1057 n = (long)tb & 3;
1058 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1059#ifdef TARGET_HAS_PRECISE_SMC
1060 if (current_tb == tb &&
2e70f6ef 1061 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1062 /* If we are modifying the current TB, we must stop
1063 its execution. We could be more precise by checking
1064 that the modification is after the current PC, but it
1065 would require a specialized function to partially
1066 restore the CPU state */
3b46e624 1067
d720b93d
FB
1068 current_tb_modified = 1;
1069 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1070 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1071 &current_flags);
d720b93d
FB
1072 }
1073#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1074 tb_phys_invalidate(tb, addr);
1075 tb = tb->page_next[n];
1076 }
fd6ce8f6 1077 p->first_tb = NULL;
d720b93d
FB
1078#ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb_modified) {
1080 /* we generate a block containing just the instruction
1081 modifying the memory. It will ensure that it cannot modify
1082 itself */
ea1c1802 1083 env->current_tb = NULL;
2e70f6ef 1084 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1085 cpu_resume_from_signal(env, puc);
1086 }
1087#endif
fd6ce8f6 1088}
9fa3e853 1089#endif
fd6ce8f6
FB
1090
1091/* add the tb in the target page and protect it if necessary */
5fafdf24 1092static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1093 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1094{
1095 PageDesc *p;
9fa3e853
FB
1096 TranslationBlock *last_first_tb;
1097
1098 tb->page_addr[n] = page_addr;
3a7d929e 1099 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1100 tb->page_next[n] = p->first_tb;
1101 last_first_tb = p->first_tb;
1102 p->first_tb = (TranslationBlock *)((long)tb | n);
1103 invalidate_page_bitmap(p);
fd6ce8f6 1104
107db443 1105#if defined(TARGET_HAS_SMC) || 1
d720b93d 1106
9fa3e853 1107#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1108 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1109 target_ulong addr;
1110 PageDesc *p2;
9fa3e853
FB
1111 int prot;
1112
fd6ce8f6
FB
1113 /* force the host page as non writable (writes will have a
1114 page fault + mprotect overhead) */
53a5960a 1115 page_addr &= qemu_host_page_mask;
fd6ce8f6 1116 prot = 0;
53a5960a
PB
1117 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1118 addr += TARGET_PAGE_SIZE) {
1119
1120 p2 = page_find (addr >> TARGET_PAGE_BITS);
1121 if (!p2)
1122 continue;
1123 prot |= p2->flags;
1124 p2->flags &= ~PAGE_WRITE;
1125 page_get_flags(addr);
1126 }
5fafdf24 1127 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1128 (prot & PAGE_BITS) & ~PAGE_WRITE);
1129#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1130 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1131 page_addr);
fd6ce8f6 1132#endif
fd6ce8f6 1133 }
9fa3e853
FB
1134#else
1135 /* if some code is already present, then the pages are already
1136 protected. So we handle the case where only the first TB is
1137 allocated in a physical page */
1138 if (!last_first_tb) {
6a00d601 1139 tlb_protect_code(page_addr);
9fa3e853
FB
1140 }
1141#endif
d720b93d
FB
1142
1143#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1144}
1145
1146/* Allocate a new translation block. Flush the translation buffer if
1147 too many translation blocks or too much generated code. */
c27004ec 1148TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1149{
1150 TranslationBlock *tb;
fd6ce8f6 1151
26a5f13b
FB
1152 if (nb_tbs >= code_gen_max_blocks ||
1153 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1154 return NULL;
fd6ce8f6
FB
1155 tb = &tbs[nb_tbs++];
1156 tb->pc = pc;
b448f2f3 1157 tb->cflags = 0;
d4e8164f
FB
1158 return tb;
1159}
1160
2e70f6ef
PB
1161void tb_free(TranslationBlock *tb)
1162{
bf20dc07 1163 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1164 Ignore the hard cases and just back up if this TB happens to
1165 be the last one generated. */
1166 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1167 code_gen_ptr = tb->tc_ptr;
1168 nb_tbs--;
1169 }
1170}
1171
9fa3e853
FB
1172/* add a new TB and link it to the physical page tables. phys_page2 is
1173 (-1) to indicate that only one page contains the TB. */
5fafdf24 1174void tb_link_phys(TranslationBlock *tb,
9fa3e853 1175 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1176{
9fa3e853
FB
1177 unsigned int h;
1178 TranslationBlock **ptb;
1179
c8a706fe
PB
1180 /* Grab the mmap lock to stop another thread invalidating this TB
1181 before we are done. */
1182 mmap_lock();
9fa3e853
FB
1183 /* add in the physical hash table */
1184 h = tb_phys_hash_func(phys_pc);
1185 ptb = &tb_phys_hash[h];
1186 tb->phys_hash_next = *ptb;
1187 *ptb = tb;
fd6ce8f6
FB
1188
1189 /* add in the page list */
9fa3e853
FB
1190 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1191 if (phys_page2 != -1)
1192 tb_alloc_page(tb, 1, phys_page2);
1193 else
1194 tb->page_addr[1] = -1;
9fa3e853 1195
d4e8164f
FB
1196 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1197 tb->jmp_next[0] = NULL;
1198 tb->jmp_next[1] = NULL;
1199
1200 /* init original jump addresses */
1201 if (tb->tb_next_offset[0] != 0xffff)
1202 tb_reset_jump(tb, 0);
1203 if (tb->tb_next_offset[1] != 0xffff)
1204 tb_reset_jump(tb, 1);
8a40a180
FB
1205
1206#ifdef DEBUG_TB_CHECK
1207 tb_page_check();
1208#endif
c8a706fe 1209 mmap_unlock();
fd6ce8f6
FB
1210}
1211
9fa3e853
FB
1212/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1213 tb[1].tc_ptr. Return NULL if not found */
1214TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1215{
9fa3e853
FB
1216 int m_min, m_max, m;
1217 unsigned long v;
1218 TranslationBlock *tb;
a513fe19
FB
1219
1220 if (nb_tbs <= 0)
1221 return NULL;
1222 if (tc_ptr < (unsigned long)code_gen_buffer ||
1223 tc_ptr >= (unsigned long)code_gen_ptr)
1224 return NULL;
1225 /* binary search (cf Knuth) */
1226 m_min = 0;
1227 m_max = nb_tbs - 1;
1228 while (m_min <= m_max) {
1229 m = (m_min + m_max) >> 1;
1230 tb = &tbs[m];
1231 v = (unsigned long)tb->tc_ptr;
1232 if (v == tc_ptr)
1233 return tb;
1234 else if (tc_ptr < v) {
1235 m_max = m - 1;
1236 } else {
1237 m_min = m + 1;
1238 }
5fafdf24 1239 }
a513fe19
FB
1240 return &tbs[m_max];
1241}
7501267e 1242
ea041c0e
FB
1243static void tb_reset_jump_recursive(TranslationBlock *tb);
1244
1245static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1246{
1247 TranslationBlock *tb1, *tb_next, **ptb;
1248 unsigned int n1;
1249
1250 tb1 = tb->jmp_next[n];
1251 if (tb1 != NULL) {
1252 /* find head of list */
1253 for(;;) {
1254 n1 = (long)tb1 & 3;
1255 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1256 if (n1 == 2)
1257 break;
1258 tb1 = tb1->jmp_next[n1];
1259 }
1260 /* we are now sure now that tb jumps to tb1 */
1261 tb_next = tb1;
1262
1263 /* remove tb from the jmp_first list */
1264 ptb = &tb_next->jmp_first;
1265 for(;;) {
1266 tb1 = *ptb;
1267 n1 = (long)tb1 & 3;
1268 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1269 if (n1 == n && tb1 == tb)
1270 break;
1271 ptb = &tb1->jmp_next[n1];
1272 }
1273 *ptb = tb->jmp_next[n];
1274 tb->jmp_next[n] = NULL;
3b46e624 1275
ea041c0e
FB
1276 /* suppress the jump to next tb in generated code */
1277 tb_reset_jump(tb, n);
1278
0124311e 1279 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1280 tb_reset_jump_recursive(tb_next);
1281 }
1282}
1283
1284static void tb_reset_jump_recursive(TranslationBlock *tb)
1285{
1286 tb_reset_jump_recursive2(tb, 0);
1287 tb_reset_jump_recursive2(tb, 1);
1288}
1289
1fddef4b 1290#if defined(TARGET_HAS_ICE)
d720b93d
FB
1291static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1292{
9b3c35e0
JM
1293 target_phys_addr_t addr;
1294 target_ulong pd;
c2f07f81
PB
1295 ram_addr_t ram_addr;
1296 PhysPageDesc *p;
d720b93d 1297
c2f07f81
PB
1298 addr = cpu_get_phys_page_debug(env, pc);
1299 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1300 if (!p) {
1301 pd = IO_MEM_UNASSIGNED;
1302 } else {
1303 pd = p->phys_offset;
1304 }
1305 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1306 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1307}
c27004ec 1308#endif
d720b93d 1309
6658ffb8 1310/* Add a watchpoint. */
a1d1bb31
AL
1311int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1312 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1313{
b4051334 1314 target_ulong len_mask = ~(len - 1);
c0ce998e 1315 CPUWatchpoint *wp;
6658ffb8 1316
b4051334
AL
1317 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1318 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1319 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1320 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1321 return -EINVAL;
1322 }
a1d1bb31 1323 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1324
1325 wp->vaddr = addr;
b4051334 1326 wp->len_mask = len_mask;
a1d1bb31
AL
1327 wp->flags = flags;
1328
2dc9f411 1329 /* keep all GDB-injected watchpoints in front */
c0ce998e
AL
1330 if (flags & BP_GDB)
1331 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1332 else
1333 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1334
6658ffb8 1335 tlb_flush_page(env, addr);
a1d1bb31
AL
1336
1337 if (watchpoint)
1338 *watchpoint = wp;
1339 return 0;
6658ffb8
PB
1340}
1341
a1d1bb31
AL
1342/* Remove a specific watchpoint. */
1343int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1344 int flags)
6658ffb8 1345{
b4051334 1346 target_ulong len_mask = ~(len - 1);
a1d1bb31 1347 CPUWatchpoint *wp;
6658ffb8 1348
c0ce998e 1349 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1350 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1351 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1352 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1353 return 0;
1354 }
1355 }
a1d1bb31 1356 return -ENOENT;
6658ffb8
PB
1357}
1358
a1d1bb31
AL
1359/* Remove a specific watchpoint by reference. */
1360void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1361{
c0ce998e 1362 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1363
a1d1bb31
AL
1364 tlb_flush_page(env, watchpoint->vaddr);
1365
1366 qemu_free(watchpoint);
1367}
1368
1369/* Remove all matching watchpoints. */
1370void cpu_watchpoint_remove_all(CPUState *env, int mask)
1371{
c0ce998e 1372 CPUWatchpoint *wp, *next;
a1d1bb31 1373
c0ce998e 1374 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1375 if (wp->flags & mask)
1376 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1377 }
7d03f82f
EI
1378}
1379
a1d1bb31
AL
1380/* Add a breakpoint. */
1381int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1382 CPUBreakpoint **breakpoint)
4c3a88a2 1383{
1fddef4b 1384#if defined(TARGET_HAS_ICE)
c0ce998e 1385 CPUBreakpoint *bp;
3b46e624 1386
a1d1bb31 1387 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1388
a1d1bb31
AL
1389 bp->pc = pc;
1390 bp->flags = flags;
1391
2dc9f411 1392 /* keep all GDB-injected breakpoints in front */
c0ce998e
AL
1393 if (flags & BP_GDB)
1394 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1395 else
1396 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1397
d720b93d 1398 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1399
1400 if (breakpoint)
1401 *breakpoint = bp;
4c3a88a2
FB
1402 return 0;
1403#else
a1d1bb31 1404 return -ENOSYS;
4c3a88a2
FB
1405#endif
1406}
1407
a1d1bb31
AL
1408/* Remove a specific breakpoint. */
1409int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1410{
7d03f82f 1411#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1412 CPUBreakpoint *bp;
1413
c0ce998e 1414 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1415 if (bp->pc == pc && bp->flags == flags) {
1416 cpu_breakpoint_remove_by_ref(env, bp);
1417 return 0;
1418 }
7d03f82f 1419 }
a1d1bb31
AL
1420 return -ENOENT;
1421#else
1422 return -ENOSYS;
7d03f82f
EI
1423#endif
1424}
1425
a1d1bb31
AL
1426/* Remove a specific breakpoint by reference. */
1427void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1428{
1fddef4b 1429#if defined(TARGET_HAS_ICE)
c0ce998e 1430 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1431
a1d1bb31
AL
1432 breakpoint_invalidate(env, breakpoint->pc);
1433
1434 qemu_free(breakpoint);
1435#endif
1436}
1437
1438/* Remove all matching breakpoints. */
1439void cpu_breakpoint_remove_all(CPUState *env, int mask)
1440{
1441#if defined(TARGET_HAS_ICE)
c0ce998e 1442 CPUBreakpoint *bp, *next;
a1d1bb31 1443
c0ce998e 1444 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1445 if (bp->flags & mask)
1446 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1447 }
4c3a88a2
FB
1448#endif
1449}
1450
c33a346e
FB
1451/* enable or disable single step mode. EXCP_DEBUG is returned by the
1452 CPU loop after each instruction */
1453void cpu_single_step(CPUState *env, int enabled)
1454{
1fddef4b 1455#if defined(TARGET_HAS_ICE)
c33a346e
FB
1456 if (env->singlestep_enabled != enabled) {
1457 env->singlestep_enabled = enabled;
1458 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1459 /* XXX: only flush what is necessary */
0124311e 1460 tb_flush(env);
c33a346e
FB
1461 }
1462#endif
1463}
1464
34865134
FB
1465/* enable or disable low levels log */
1466void cpu_set_log(int log_flags)
1467{
1468 loglevel = log_flags;
1469 if (loglevel && !logfile) {
11fcfab4 1470 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1471 if (!logfile) {
1472 perror(logfilename);
1473 _exit(1);
1474 }
9fa3e853
FB
1475#if !defined(CONFIG_SOFTMMU)
1476 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1477 {
b55266b5 1478 static char logfile_buf[4096];
9fa3e853
FB
1479 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1480 }
1481#else
34865134 1482 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1483#endif
e735b91c
PB
1484 log_append = 1;
1485 }
1486 if (!loglevel && logfile) {
1487 fclose(logfile);
1488 logfile = NULL;
34865134
FB
1489 }
1490}
1491
1492void cpu_set_log_filename(const char *filename)
1493{
1494 logfilename = strdup(filename);
e735b91c
PB
1495 if (logfile) {
1496 fclose(logfile);
1497 logfile = NULL;
1498 }
1499 cpu_set_log(loglevel);
34865134 1500}
c33a346e 1501
0124311e 1502/* mask must never be zero, except for A20 change call */
68a79315 1503void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1504{
d5975363 1505#if !defined(USE_NPTL)
ea041c0e 1506 TranslationBlock *tb;
15a51156 1507 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1508#endif
2e70f6ef 1509 int old_mask;
59817ccb 1510
be214e6c
AJ
1511 if (mask & CPU_INTERRUPT_EXIT) {
1512 env->exit_request = 1;
1513 mask &= ~CPU_INTERRUPT_EXIT;
1514 }
1515
2e70f6ef 1516 old_mask = env->interrupt_request;
68a79315 1517 env->interrupt_request |= mask;
d5975363
PB
1518#if defined(USE_NPTL)
1519 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1520 problem and hope the cpu will stop of its own accord. For userspace
1521 emulation this often isn't actually as bad as it sounds. Often
1522 signals are used primarily to interrupt blocking syscalls. */
1523#else
2e70f6ef 1524 if (use_icount) {
266910c4 1525 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1526#ifndef CONFIG_USER_ONLY
2e70f6ef 1527 if (!can_do_io(env)
be214e6c 1528 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1529 cpu_abort(env, "Raised interrupt while not in I/O function");
1530 }
1531#endif
1532 } else {
1533 tb = env->current_tb;
1534 /* if the cpu is currently executing code, we must unlink it and
1535 all the potentially executing TB */
1536 if (tb && !testandset(&interrupt_lock)) {
1537 env->current_tb = NULL;
1538 tb_reset_jump_recursive(tb);
1539 resetlock(&interrupt_lock);
1540 }
ea041c0e 1541 }
d5975363 1542#endif
ea041c0e
FB
1543}
1544
b54ad049
FB
1545void cpu_reset_interrupt(CPUState *env, int mask)
1546{
1547 env->interrupt_request &= ~mask;
1548}
1549
c7cd6a37 1550const CPULogItem cpu_log_items[] = {
5fafdf24 1551 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1552 "show generated host assembly code for each compiled TB" },
1553 { CPU_LOG_TB_IN_ASM, "in_asm",
1554 "show target assembly code for each compiled TB" },
5fafdf24 1555 { CPU_LOG_TB_OP, "op",
57fec1fe 1556 "show micro ops for each compiled TB" },
f193c797 1557 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1558 "show micro ops "
1559#ifdef TARGET_I386
1560 "before eflags optimization and "
f193c797 1561#endif
e01a1157 1562 "after liveness analysis" },
f193c797
FB
1563 { CPU_LOG_INT, "int",
1564 "show interrupts/exceptions in short format" },
1565 { CPU_LOG_EXEC, "exec",
1566 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1567 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1568 "show CPU state before block translation" },
f193c797
FB
1569#ifdef TARGET_I386
1570 { CPU_LOG_PCALL, "pcall",
1571 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1572 { CPU_LOG_RESET, "cpu_reset",
1573 "show CPU state before CPU resets" },
f193c797 1574#endif
8e3a9fd2 1575#ifdef DEBUG_IOPORT
fd872598
FB
1576 { CPU_LOG_IOPORT, "ioport",
1577 "show all i/o ports accesses" },
8e3a9fd2 1578#endif
f193c797
FB
1579 { 0, NULL, NULL },
1580};
1581
1582static int cmp1(const char *s1, int n, const char *s2)
1583{
1584 if (strlen(s2) != n)
1585 return 0;
1586 return memcmp(s1, s2, n) == 0;
1587}
3b46e624 1588
f193c797
FB
1589/* takes a comma separated list of log masks. Return 0 if error. */
1590int cpu_str_to_log_mask(const char *str)
1591{
c7cd6a37 1592 const CPULogItem *item;
f193c797
FB
1593 int mask;
1594 const char *p, *p1;
1595
1596 p = str;
1597 mask = 0;
1598 for(;;) {
1599 p1 = strchr(p, ',');
1600 if (!p1)
1601 p1 = p + strlen(p);
8e3a9fd2
FB
1602 if(cmp1(p,p1-p,"all")) {
1603 for(item = cpu_log_items; item->mask != 0; item++) {
1604 mask |= item->mask;
1605 }
1606 } else {
f193c797
FB
1607 for(item = cpu_log_items; item->mask != 0; item++) {
1608 if (cmp1(p, p1 - p, item->name))
1609 goto found;
1610 }
1611 return 0;
8e3a9fd2 1612 }
f193c797
FB
1613 found:
1614 mask |= item->mask;
1615 if (*p1 != ',')
1616 break;
1617 p = p1 + 1;
1618 }
1619 return mask;
1620}
ea041c0e 1621
7501267e
FB
1622void cpu_abort(CPUState *env, const char *fmt, ...)
1623{
1624 va_list ap;
493ae1f0 1625 va_list ap2;
7501267e
FB
1626
1627 va_start(ap, fmt);
493ae1f0 1628 va_copy(ap2, ap);
7501267e
FB
1629 fprintf(stderr, "qemu: fatal: ");
1630 vfprintf(stderr, fmt, ap);
1631 fprintf(stderr, "\n");
1632#ifdef TARGET_I386
7fe48483
FB
1633 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1634#else
1635 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1636#endif
93fcfe39
AL
1637 if (qemu_log_enabled()) {
1638 qemu_log("qemu: fatal: ");
1639 qemu_log_vprintf(fmt, ap2);
1640 qemu_log("\n");
f9373291 1641#ifdef TARGET_I386
93fcfe39 1642 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1643#else
93fcfe39 1644 log_cpu_state(env, 0);
f9373291 1645#endif
31b1a7b4 1646 qemu_log_flush();
93fcfe39 1647 qemu_log_close();
924edcae 1648 }
493ae1f0 1649 va_end(ap2);
f9373291 1650 va_end(ap);
7501267e
FB
1651 abort();
1652}
1653
c5be9f08
TS
1654CPUState *cpu_copy(CPUState *env)
1655{
01ba9816 1656 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1657 CPUState *next_cpu = new_env->next_cpu;
1658 int cpu_index = new_env->cpu_index;
5a38f081
AL
1659#if defined(TARGET_HAS_ICE)
1660 CPUBreakpoint *bp;
1661 CPUWatchpoint *wp;
1662#endif
1663
c5be9f08 1664 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1665
1666 /* Preserve chaining and index. */
c5be9f08
TS
1667 new_env->next_cpu = next_cpu;
1668 new_env->cpu_index = cpu_index;
5a38f081
AL
1669
1670 /* Clone all break/watchpoints.
1671 Note: Once we support ptrace with hw-debug register access, make sure
1672 BP_CPU break/watchpoints are handled correctly on clone. */
1673 TAILQ_INIT(&env->breakpoints);
1674 TAILQ_INIT(&env->watchpoints);
1675#if defined(TARGET_HAS_ICE)
1676 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1677 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1678 }
1679 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1680 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1681 wp->flags, NULL);
1682 }
1683#endif
1684
c5be9f08
TS
1685 return new_env;
1686}
1687
0124311e
FB
1688#if !defined(CONFIG_USER_ONLY)
1689
5c751e99
EI
1690static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1691{
1692 unsigned int i;
1693
1694 /* Discard jump cache entries for any tb which might potentially
1695 overlap the flushed page. */
1696 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1697 memset (&env->tb_jmp_cache[i], 0,
1698 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1699
1700 i = tb_jmp_cache_hash_page(addr);
1701 memset (&env->tb_jmp_cache[i], 0,
1702 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1703}
1704
ee8b7021
FB
1705/* NOTE: if flush_global is true, also flush global entries (not
1706 implemented yet) */
1707void tlb_flush(CPUState *env, int flush_global)
33417e70 1708{
33417e70 1709 int i;
0124311e 1710
9fa3e853
FB
1711#if defined(DEBUG_TLB)
1712 printf("tlb_flush:\n");
1713#endif
0124311e
FB
1714 /* must reset current TB so that interrupts cannot modify the
1715 links while we are modifying them */
1716 env->current_tb = NULL;
1717
33417e70 1718 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1719 env->tlb_table[0][i].addr_read = -1;
1720 env->tlb_table[0][i].addr_write = -1;
1721 env->tlb_table[0][i].addr_code = -1;
1722 env->tlb_table[1][i].addr_read = -1;
1723 env->tlb_table[1][i].addr_write = -1;
1724 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1725#if (NB_MMU_MODES >= 3)
1726 env->tlb_table[2][i].addr_read = -1;
1727 env->tlb_table[2][i].addr_write = -1;
1728 env->tlb_table[2][i].addr_code = -1;
1729#if (NB_MMU_MODES == 4)
1730 env->tlb_table[3][i].addr_read = -1;
1731 env->tlb_table[3][i].addr_write = -1;
1732 env->tlb_table[3][i].addr_code = -1;
1733#endif
1734#endif
33417e70 1735 }
9fa3e853 1736
8a40a180 1737 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1738
0a962c02
FB
1739#ifdef USE_KQEMU
1740 if (env->kqemu_enabled) {
1741 kqemu_flush(env, flush_global);
1742 }
9fa3e853 1743#endif
e3db7226 1744 tlb_flush_count++;
33417e70
FB
1745}
1746
274da6b2 1747static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1748{
5fafdf24 1749 if (addr == (tlb_entry->addr_read &
84b7b8e7 1750 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1751 addr == (tlb_entry->addr_write &
84b7b8e7 1752 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1753 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1754 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1755 tlb_entry->addr_read = -1;
1756 tlb_entry->addr_write = -1;
1757 tlb_entry->addr_code = -1;
1758 }
61382a50
FB
1759}
1760
2e12669a 1761void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1762{
8a40a180 1763 int i;
0124311e 1764
9fa3e853 1765#if defined(DEBUG_TLB)
108c49b8 1766 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1767#endif
0124311e
FB
1768 /* must reset current TB so that interrupts cannot modify the
1769 links while we are modifying them */
1770 env->current_tb = NULL;
61382a50
FB
1771
1772 addr &= TARGET_PAGE_MASK;
1773 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1774 tlb_flush_entry(&env->tlb_table[0][i], addr);
1775 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1776#if (NB_MMU_MODES >= 3)
1777 tlb_flush_entry(&env->tlb_table[2][i], addr);
1778#if (NB_MMU_MODES == 4)
1779 tlb_flush_entry(&env->tlb_table[3][i], addr);
1780#endif
1781#endif
0124311e 1782
5c751e99 1783 tlb_flush_jmp_cache(env, addr);
9fa3e853 1784
0a962c02
FB
1785#ifdef USE_KQEMU
1786 if (env->kqemu_enabled) {
1787 kqemu_flush_page(env, addr);
1788 }
1789#endif
9fa3e853
FB
1790}
1791
9fa3e853
FB
1792/* update the TLBs so that writes to code in the virtual page 'addr'
1793 can be detected */
6a00d601 1794static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1795{
5fafdf24 1796 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1797 ram_addr + TARGET_PAGE_SIZE,
1798 CODE_DIRTY_FLAG);
9fa3e853
FB
1799}
1800
9fa3e853 1801/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1802 tested for self modifying code */
5fafdf24 1803static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1804 target_ulong vaddr)
9fa3e853 1805{
3a7d929e 1806 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1807}
1808
5fafdf24 1809static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1810 unsigned long start, unsigned long length)
1811{
1812 unsigned long addr;
84b7b8e7
FB
1813 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1814 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1815 if ((addr - start) < length) {
0f459d16 1816 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1817 }
1818 }
1819}
1820
3a7d929e 1821void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1822 int dirty_flags)
1ccde1cb
FB
1823{
1824 CPUState *env;
4f2ac237 1825 unsigned long length, start1;
0a962c02
FB
1826 int i, mask, len;
1827 uint8_t *p;
1ccde1cb
FB
1828
1829 start &= TARGET_PAGE_MASK;
1830 end = TARGET_PAGE_ALIGN(end);
1831
1832 length = end - start;
1833 if (length == 0)
1834 return;
0a962c02 1835 len = length >> TARGET_PAGE_BITS;
3a7d929e 1836#ifdef USE_KQEMU
6a00d601
FB
1837 /* XXX: should not depend on cpu context */
1838 env = first_cpu;
3a7d929e 1839 if (env->kqemu_enabled) {
f23db169
FB
1840 ram_addr_t addr;
1841 addr = start;
1842 for(i = 0; i < len; i++) {
1843 kqemu_set_notdirty(env, addr);
1844 addr += TARGET_PAGE_SIZE;
1845 }
3a7d929e
FB
1846 }
1847#endif
f23db169
FB
1848 mask = ~dirty_flags;
1849 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1850 for(i = 0; i < len; i++)
1851 p[i] &= mask;
1852
1ccde1cb
FB
1853 /* we modify the TLB cache so that the dirty bit will be set again
1854 when accessing the range */
59817ccb 1855 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1856 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1857 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1858 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1859 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1860 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1861#if (NB_MMU_MODES >= 3)
1862 for(i = 0; i < CPU_TLB_SIZE; i++)
1863 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1864#if (NB_MMU_MODES == 4)
1865 for(i = 0; i < CPU_TLB_SIZE; i++)
1866 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1867#endif
1868#endif
6a00d601 1869 }
1ccde1cb
FB
1870}
1871
74576198
AL
1872int cpu_physical_memory_set_dirty_tracking(int enable)
1873{
1874 in_migration = enable;
1875 return 0;
1876}
1877
1878int cpu_physical_memory_get_dirty_tracking(void)
1879{
1880 return in_migration;
1881}
1882
2bec46dc
AL
1883void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1884{
1885 if (kvm_enabled())
1886 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1887}
1888
3a7d929e
FB
1889static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1890{
1891 ram_addr_t ram_addr;
1892
84b7b8e7 1893 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1894 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1895 tlb_entry->addend - (unsigned long)phys_ram_base;
1896 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1897 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1898 }
1899 }
1900}
1901
1902/* update the TLB according to the current state of the dirty bits */
1903void cpu_tlb_update_dirty(CPUState *env)
1904{
1905 int i;
1906 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1907 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1908 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1909 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1910#if (NB_MMU_MODES >= 3)
1911 for(i = 0; i < CPU_TLB_SIZE; i++)
1912 tlb_update_dirty(&env->tlb_table[2][i]);
1913#if (NB_MMU_MODES == 4)
1914 for(i = 0; i < CPU_TLB_SIZE; i++)
1915 tlb_update_dirty(&env->tlb_table[3][i]);
1916#endif
1917#endif
3a7d929e
FB
1918}
1919
0f459d16 1920static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1921{
0f459d16
PB
1922 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1923 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1924}
1925
0f459d16
PB
1926/* update the TLB corresponding to virtual page vaddr
1927 so that it is no longer dirty */
1928static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1929{
1ccde1cb
FB
1930 int i;
1931
0f459d16 1932 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1933 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1934 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1935 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1936#if (NB_MMU_MODES >= 3)
0f459d16 1937 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1938#if (NB_MMU_MODES == 4)
0f459d16 1939 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1940#endif
1941#endif
9fa3e853
FB
1942}
1943
59817ccb
FB
1944/* add a new TLB entry. At most one entry for a given virtual address
1945 is permitted. Return 0 if OK or 2 if the page could not be mapped
1946 (can only happen in non SOFTMMU mode for I/O pages or pages
1947 conflicting with the host address space). */
5fafdf24
TS
1948int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1949 target_phys_addr_t paddr, int prot,
6ebbf390 1950 int mmu_idx, int is_softmmu)
9fa3e853 1951{
92e873b9 1952 PhysPageDesc *p;
4f2ac237 1953 unsigned long pd;
9fa3e853 1954 unsigned int index;
4f2ac237 1955 target_ulong address;
0f459d16 1956 target_ulong code_address;
108c49b8 1957 target_phys_addr_t addend;
9fa3e853 1958 int ret;
84b7b8e7 1959 CPUTLBEntry *te;
a1d1bb31 1960 CPUWatchpoint *wp;
0f459d16 1961 target_phys_addr_t iotlb;
9fa3e853 1962
92e873b9 1963 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1964 if (!p) {
1965 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1966 } else {
1967 pd = p->phys_offset;
9fa3e853
FB
1968 }
1969#if defined(DEBUG_TLB)
6ebbf390
JM
1970 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1971 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1972#endif
1973
1974 ret = 0;
0f459d16
PB
1975 address = vaddr;
1976 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1977 /* IO memory case (romd handled later) */
1978 address |= TLB_MMIO;
1979 }
1980 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1981 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1982 /* Normal RAM. */
1983 iotlb = pd & TARGET_PAGE_MASK;
1984 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1985 iotlb |= IO_MEM_NOTDIRTY;
1986 else
1987 iotlb |= IO_MEM_ROM;
1988 } else {
1989 /* IO handlers are currently passed a phsical address.
1990 It would be nice to pass an offset from the base address
1991 of that region. This would avoid having to special case RAM,
1992 and avoid full address decoding in every device.
1993 We can't use the high bits of pd for this because
1994 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
1995 iotlb = (pd & ~TARGET_PAGE_MASK);
1996 if (p) {
8da3ff18
PB
1997 iotlb += p->region_offset;
1998 } else {
1999 iotlb += paddr;
2000 }
0f459d16
PB
2001 }
2002
2003 code_address = address;
2004 /* Make accesses to pages with watchpoints go via the
2005 watchpoint trap routines. */
c0ce998e 2006 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2007 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
2008 iotlb = io_mem_watch + paddr;
2009 /* TODO: The memory case can be optimized by not trapping
2010 reads of pages with a write breakpoint. */
2011 address |= TLB_MMIO;
6658ffb8 2012 }
0f459d16 2013 }
d79acba4 2014
0f459d16
PB
2015 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2016 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2017 te = &env->tlb_table[mmu_idx][index];
2018 te->addend = addend - vaddr;
2019 if (prot & PAGE_READ) {
2020 te->addr_read = address;
2021 } else {
2022 te->addr_read = -1;
2023 }
5c751e99 2024
0f459d16
PB
2025 if (prot & PAGE_EXEC) {
2026 te->addr_code = code_address;
2027 } else {
2028 te->addr_code = -1;
2029 }
2030 if (prot & PAGE_WRITE) {
2031 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2032 (pd & IO_MEM_ROMD)) {
2033 /* Write access calls the I/O callback. */
2034 te->addr_write = address | TLB_MMIO;
2035 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2036 !cpu_physical_memory_is_dirty(pd)) {
2037 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2038 } else {
0f459d16 2039 te->addr_write = address;
9fa3e853 2040 }
0f459d16
PB
2041 } else {
2042 te->addr_write = -1;
9fa3e853 2043 }
9fa3e853
FB
2044 return ret;
2045}
2046
0124311e
FB
2047#else
2048
ee8b7021 2049void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2050{
2051}
2052
2e12669a 2053void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2054{
2055}
2056
5fafdf24
TS
2057int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2058 target_phys_addr_t paddr, int prot,
6ebbf390 2059 int mmu_idx, int is_softmmu)
9fa3e853
FB
2060{
2061 return 0;
2062}
0124311e 2063
9fa3e853
FB
2064/* dump memory mappings */
2065void page_dump(FILE *f)
33417e70 2066{
9fa3e853
FB
2067 unsigned long start, end;
2068 int i, j, prot, prot1;
2069 PageDesc *p;
33417e70 2070
9fa3e853
FB
2071 fprintf(f, "%-8s %-8s %-8s %s\n",
2072 "start", "end", "size", "prot");
2073 start = -1;
2074 end = -1;
2075 prot = 0;
2076 for(i = 0; i <= L1_SIZE; i++) {
2077 if (i < L1_SIZE)
2078 p = l1_map[i];
2079 else
2080 p = NULL;
2081 for(j = 0;j < L2_SIZE; j++) {
2082 if (!p)
2083 prot1 = 0;
2084 else
2085 prot1 = p[j].flags;
2086 if (prot1 != prot) {
2087 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2088 if (start != -1) {
2089 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 2090 start, end, end - start,
9fa3e853
FB
2091 prot & PAGE_READ ? 'r' : '-',
2092 prot & PAGE_WRITE ? 'w' : '-',
2093 prot & PAGE_EXEC ? 'x' : '-');
2094 }
2095 if (prot1 != 0)
2096 start = end;
2097 else
2098 start = -1;
2099 prot = prot1;
2100 }
2101 if (!p)
2102 break;
2103 }
33417e70 2104 }
33417e70
FB
2105}
2106
53a5960a 2107int page_get_flags(target_ulong address)
33417e70 2108{
9fa3e853
FB
2109 PageDesc *p;
2110
2111 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2112 if (!p)
9fa3e853
FB
2113 return 0;
2114 return p->flags;
2115}
2116
2117/* modify the flags of a page and invalidate the code if
2118 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2119 depending on PAGE_WRITE */
53a5960a 2120void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2121{
2122 PageDesc *p;
53a5960a 2123 target_ulong addr;
9fa3e853 2124
c8a706fe 2125 /* mmap_lock should already be held. */
9fa3e853
FB
2126 start = start & TARGET_PAGE_MASK;
2127 end = TARGET_PAGE_ALIGN(end);
2128 if (flags & PAGE_WRITE)
2129 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2130 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2131 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2132 /* We may be called for host regions that are outside guest
2133 address space. */
2134 if (!p)
2135 return;
9fa3e853
FB
2136 /* if the write protection is set, then we invalidate the code
2137 inside */
5fafdf24 2138 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2139 (flags & PAGE_WRITE) &&
2140 p->first_tb) {
d720b93d 2141 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2142 }
2143 p->flags = flags;
2144 }
33417e70
FB
2145}
2146
3d97b40b
TS
2147int page_check_range(target_ulong start, target_ulong len, int flags)
2148{
2149 PageDesc *p;
2150 target_ulong end;
2151 target_ulong addr;
2152
55f280c9
AZ
2153 if (start + len < start)
2154 /* we've wrapped around */
2155 return -1;
2156
3d97b40b
TS
2157 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2158 start = start & TARGET_PAGE_MASK;
2159
3d97b40b
TS
2160 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2161 p = page_find(addr >> TARGET_PAGE_BITS);
2162 if( !p )
2163 return -1;
2164 if( !(p->flags & PAGE_VALID) )
2165 return -1;
2166
dae3270c 2167 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2168 return -1;
dae3270c
FB
2169 if (flags & PAGE_WRITE) {
2170 if (!(p->flags & PAGE_WRITE_ORG))
2171 return -1;
2172 /* unprotect the page if it was put read-only because it
2173 contains translated code */
2174 if (!(p->flags & PAGE_WRITE)) {
2175 if (!page_unprotect(addr, 0, NULL))
2176 return -1;
2177 }
2178 return 0;
2179 }
3d97b40b
TS
2180 }
2181 return 0;
2182}
2183
9fa3e853
FB
2184/* called from signal handler: invalidate the code and unprotect the
2185 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2186int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2187{
2188 unsigned int page_index, prot, pindex;
2189 PageDesc *p, *p1;
53a5960a 2190 target_ulong host_start, host_end, addr;
9fa3e853 2191
c8a706fe
PB
2192 /* Technically this isn't safe inside a signal handler. However we
2193 know this only ever happens in a synchronous SEGV handler, so in
2194 practice it seems to be ok. */
2195 mmap_lock();
2196
83fb7adf 2197 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2198 page_index = host_start >> TARGET_PAGE_BITS;
2199 p1 = page_find(page_index);
c8a706fe
PB
2200 if (!p1) {
2201 mmap_unlock();
9fa3e853 2202 return 0;
c8a706fe 2203 }
83fb7adf 2204 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2205 p = p1;
2206 prot = 0;
2207 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2208 prot |= p->flags;
2209 p++;
2210 }
2211 /* if the page was really writable, then we change its
2212 protection back to writable */
2213 if (prot & PAGE_WRITE_ORG) {
2214 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2215 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2216 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2217 (prot & PAGE_BITS) | PAGE_WRITE);
2218 p1[pindex].flags |= PAGE_WRITE;
2219 /* and since the content will be modified, we must invalidate
2220 the corresponding translated code. */
d720b93d 2221 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2222#ifdef DEBUG_TB_CHECK
2223 tb_invalidate_check(address);
2224#endif
c8a706fe 2225 mmap_unlock();
9fa3e853
FB
2226 return 1;
2227 }
2228 }
c8a706fe 2229 mmap_unlock();
9fa3e853
FB
2230 return 0;
2231}
2232
6a00d601
FB
2233static inline void tlb_set_dirty(CPUState *env,
2234 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2235{
2236}
9fa3e853
FB
2237#endif /* defined(CONFIG_USER_ONLY) */
2238
e2eef170 2239#if !defined(CONFIG_USER_ONLY)
8da3ff18 2240
db7b5426 2241static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2242 ram_addr_t memory, ram_addr_t region_offset);
00f82b8a 2243static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2244 ram_addr_t orig_memory, ram_addr_t region_offset);
db7b5426
BS
2245#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2246 need_subpage) \
2247 do { \
2248 if (addr > start_addr) \
2249 start_addr2 = 0; \
2250 else { \
2251 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2252 if (start_addr2 > 0) \
2253 need_subpage = 1; \
2254 } \
2255 \
49e9fba2 2256 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2257 end_addr2 = TARGET_PAGE_SIZE - 1; \
2258 else { \
2259 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2260 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2261 need_subpage = 1; \
2262 } \
2263 } while (0)
2264
33417e70
FB
2265/* register physical memory. 'size' must be a multiple of the target
2266 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2267 io memory page. The address used when calling the IO function is
2268 the offset from the start of the region, plus region_offset. Both
2269 start_region and regon_offset are rounded down to a page boundary
2270 before calculating this offset. This should not be a problem unless
2271 the low bits of start_addr and region_offset differ. */
2272void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2273 ram_addr_t size,
2274 ram_addr_t phys_offset,
2275 ram_addr_t region_offset)
33417e70 2276{
108c49b8 2277 target_phys_addr_t addr, end_addr;
92e873b9 2278 PhysPageDesc *p;
9d42037b 2279 CPUState *env;
00f82b8a 2280 ram_addr_t orig_size = size;
db7b5426 2281 void *subpage;
33417e70 2282
da260249
FB
2283#ifdef USE_KQEMU
2284 /* XXX: should not depend on cpu context */
2285 env = first_cpu;
2286 if (env->kqemu_enabled) {
2287 kqemu_set_phys_mem(start_addr, size, phys_offset);
2288 }
2289#endif
7ba1e619
AL
2290 if (kvm_enabled())
2291 kvm_set_phys_mem(start_addr, size, phys_offset);
2292
67c4d23c
PB
2293 if (phys_offset == IO_MEM_UNASSIGNED) {
2294 region_offset = start_addr;
2295 }
8da3ff18 2296 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2297 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2298 end_addr = start_addr + (target_phys_addr_t)size;
2299 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2300 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2301 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2302 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2303 target_phys_addr_t start_addr2, end_addr2;
2304 int need_subpage = 0;
2305
2306 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2307 need_subpage);
4254fab8 2308 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2309 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2310 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2311 &p->phys_offset, orig_memory,
2312 p->region_offset);
db7b5426
BS
2313 } else {
2314 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2315 >> IO_MEM_SHIFT];
2316 }
8da3ff18
PB
2317 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2318 region_offset);
2319 p->region_offset = 0;
db7b5426
BS
2320 } else {
2321 p->phys_offset = phys_offset;
2322 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2323 (phys_offset & IO_MEM_ROMD))
2324 phys_offset += TARGET_PAGE_SIZE;
2325 }
2326 } else {
2327 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2328 p->phys_offset = phys_offset;
8da3ff18 2329 p->region_offset = region_offset;
db7b5426 2330 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2331 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2332 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2333 } else {
db7b5426
BS
2334 target_phys_addr_t start_addr2, end_addr2;
2335 int need_subpage = 0;
2336
2337 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2338 end_addr2, need_subpage);
2339
4254fab8 2340 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426 2341 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2342 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2343 addr & TARGET_PAGE_MASK);
db7b5426 2344 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2345 phys_offset, region_offset);
2346 p->region_offset = 0;
db7b5426
BS
2347 }
2348 }
2349 }
8da3ff18 2350 region_offset += TARGET_PAGE_SIZE;
33417e70 2351 }
3b46e624 2352
9d42037b
FB
2353 /* since each CPU stores ram addresses in its TLB cache, we must
2354 reset the modified entries */
2355 /* XXX: slow ! */
2356 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2357 tlb_flush(env, 1);
2358 }
33417e70
FB
2359}
2360
ba863458 2361/* XXX: temporary until new memory mapping API */
00f82b8a 2362ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2363{
2364 PhysPageDesc *p;
2365
2366 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2367 if (!p)
2368 return IO_MEM_UNASSIGNED;
2369 return p->phys_offset;
2370}
2371
f65ed4c1
AL
2372void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2373{
2374 if (kvm_enabled())
2375 kvm_coalesce_mmio_region(addr, size);
2376}
2377
2378void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2379{
2380 if (kvm_enabled())
2381 kvm_uncoalesce_mmio_region(addr, size);
2382}
2383
e9a1ab19 2384/* XXX: better than nothing */
00f82b8a 2385ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2386{
2387 ram_addr_t addr;
7fb4fdcf 2388 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
012a7045 2389 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
ed441467 2390 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2391 abort();
2392 }
2393 addr = phys_ram_alloc_offset;
2394 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2395 return addr;
2396}
2397
2398void qemu_ram_free(ram_addr_t addr)
2399{
2400}
2401
a4193c8a 2402static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2403{
67d3b957 2404#ifdef DEBUG_UNASSIGNED
ab3d1727 2405 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2406#endif
0a6f8a6d 2407#if defined(TARGET_SPARC)
e18231a3
BS
2408 do_unassigned_access(addr, 0, 0, 0, 1);
2409#endif
2410 return 0;
2411}
2412
2413static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2414{
2415#ifdef DEBUG_UNASSIGNED
2416 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2417#endif
0a6f8a6d 2418#if defined(TARGET_SPARC)
e18231a3
BS
2419 do_unassigned_access(addr, 0, 0, 0, 2);
2420#endif
2421 return 0;
2422}
2423
2424static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2425{
2426#ifdef DEBUG_UNASSIGNED
2427 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2428#endif
0a6f8a6d 2429#if defined(TARGET_SPARC)
e18231a3 2430 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2431#endif
33417e70
FB
2432 return 0;
2433}
2434
a4193c8a 2435static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2436{
67d3b957 2437#ifdef DEBUG_UNASSIGNED
ab3d1727 2438 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2439#endif
0a6f8a6d 2440#if defined(TARGET_SPARC)
e18231a3
BS
2441 do_unassigned_access(addr, 1, 0, 0, 1);
2442#endif
2443}
2444
2445static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2446{
2447#ifdef DEBUG_UNASSIGNED
2448 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2449#endif
0a6f8a6d 2450#if defined(TARGET_SPARC)
e18231a3
BS
2451 do_unassigned_access(addr, 1, 0, 0, 2);
2452#endif
2453}
2454
2455static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2456{
2457#ifdef DEBUG_UNASSIGNED
2458 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2459#endif
0a6f8a6d 2460#if defined(TARGET_SPARC)
e18231a3 2461 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2462#endif
33417e70
FB
2463}
2464
2465static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2466 unassigned_mem_readb,
e18231a3
BS
2467 unassigned_mem_readw,
2468 unassigned_mem_readl,
33417e70
FB
2469};
2470
2471static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2472 unassigned_mem_writeb,
e18231a3
BS
2473 unassigned_mem_writew,
2474 unassigned_mem_writel,
33417e70
FB
2475};
2476
0f459d16
PB
2477static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2478 uint32_t val)
9fa3e853 2479{
3a7d929e 2480 int dirty_flags;
3a7d929e
FB
2481 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2482 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2483#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2484 tb_invalidate_phys_page_fast(ram_addr, 1);
2485 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2486#endif
3a7d929e 2487 }
0f459d16 2488 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2489#ifdef USE_KQEMU
2490 if (cpu_single_env->kqemu_enabled &&
2491 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2492 kqemu_modify_page(cpu_single_env, ram_addr);
2493#endif
f23db169
FB
2494 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2495 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2496 /* we remove the notdirty callback only if the code has been
2497 flushed */
2498 if (dirty_flags == 0xff)
2e70f6ef 2499 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2500}
2501
0f459d16
PB
2502static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2503 uint32_t val)
9fa3e853 2504{
3a7d929e 2505 int dirty_flags;
3a7d929e
FB
2506 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2507 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2508#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2509 tb_invalidate_phys_page_fast(ram_addr, 2);
2510 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2511#endif
3a7d929e 2512 }
0f459d16 2513 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2514#ifdef USE_KQEMU
2515 if (cpu_single_env->kqemu_enabled &&
2516 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2517 kqemu_modify_page(cpu_single_env, ram_addr);
2518#endif
f23db169
FB
2519 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2520 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2521 /* we remove the notdirty callback only if the code has been
2522 flushed */
2523 if (dirty_flags == 0xff)
2e70f6ef 2524 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2525}
2526
0f459d16
PB
2527static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2528 uint32_t val)
9fa3e853 2529{
3a7d929e 2530 int dirty_flags;
3a7d929e
FB
2531 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2532 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2533#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2534 tb_invalidate_phys_page_fast(ram_addr, 4);
2535 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2536#endif
3a7d929e 2537 }
0f459d16 2538 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2539#ifdef USE_KQEMU
2540 if (cpu_single_env->kqemu_enabled &&
2541 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2542 kqemu_modify_page(cpu_single_env, ram_addr);
2543#endif
f23db169
FB
2544 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2545 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2546 /* we remove the notdirty callback only if the code has been
2547 flushed */
2548 if (dirty_flags == 0xff)
2e70f6ef 2549 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2550}
2551
3a7d929e 2552static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2553 NULL, /* never used */
2554 NULL, /* never used */
2555 NULL, /* never used */
2556};
2557
1ccde1cb
FB
2558static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2559 notdirty_mem_writeb,
2560 notdirty_mem_writew,
2561 notdirty_mem_writel,
2562};
2563
0f459d16 2564/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2565static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
2566{
2567 CPUState *env = cpu_single_env;
06d55cc1
AL
2568 target_ulong pc, cs_base;
2569 TranslationBlock *tb;
0f459d16 2570 target_ulong vaddr;
a1d1bb31 2571 CPUWatchpoint *wp;
06d55cc1 2572 int cpu_flags;
0f459d16 2573
06d55cc1
AL
2574 if (env->watchpoint_hit) {
2575 /* We re-entered the check after replacing the TB. Now raise
2576 * the debug interrupt so that is will trigger after the
2577 * current instruction. */
2578 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2579 return;
2580 }
2e70f6ef 2581 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
c0ce998e 2582 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2583 if ((vaddr == (wp->vaddr & len_mask) ||
2584 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2585 wp->flags |= BP_WATCHPOINT_HIT;
2586 if (!env->watchpoint_hit) {
2587 env->watchpoint_hit = wp;
2588 tb = tb_find_pc(env->mem_io_pc);
2589 if (!tb) {
2590 cpu_abort(env, "check_watchpoint: could not find TB for "
2591 "pc=%p", (void *)env->mem_io_pc);
2592 }
2593 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2594 tb_phys_invalidate(tb, -1);
2595 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2596 env->exception_index = EXCP_DEBUG;
2597 } else {
2598 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2599 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2600 }
2601 cpu_resume_from_signal(env, NULL);
06d55cc1 2602 }
6e140f28
AL
2603 } else {
2604 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2605 }
2606 }
2607}
2608
6658ffb8
PB
2609/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2610 so these check for a hit then pass through to the normal out-of-line
2611 phys routines. */
2612static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2613{
b4051334 2614 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
2615 return ldub_phys(addr);
2616}
2617
2618static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2619{
b4051334 2620 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
2621 return lduw_phys(addr);
2622}
2623
2624static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2625{
b4051334 2626 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
2627 return ldl_phys(addr);
2628}
2629
6658ffb8
PB
2630static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2631 uint32_t val)
2632{
b4051334 2633 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
2634 stb_phys(addr, val);
2635}
2636
2637static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2638 uint32_t val)
2639{
b4051334 2640 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
2641 stw_phys(addr, val);
2642}
2643
2644static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2645 uint32_t val)
2646{
b4051334 2647 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
2648 stl_phys(addr, val);
2649}
2650
2651static CPUReadMemoryFunc *watch_mem_read[3] = {
2652 watch_mem_readb,
2653 watch_mem_readw,
2654 watch_mem_readl,
2655};
2656
2657static CPUWriteMemoryFunc *watch_mem_write[3] = {
2658 watch_mem_writeb,
2659 watch_mem_writew,
2660 watch_mem_writel,
2661};
6658ffb8 2662
db7b5426
BS
2663static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2664 unsigned int len)
2665{
db7b5426
BS
2666 uint32_t ret;
2667 unsigned int idx;
2668
8da3ff18 2669 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2670#if defined(DEBUG_SUBPAGE)
2671 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2672 mmio, len, addr, idx);
2673#endif
8da3ff18
PB
2674 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2675 addr + mmio->region_offset[idx][0][len]);
db7b5426
BS
2676
2677 return ret;
2678}
2679
2680static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2681 uint32_t value, unsigned int len)
2682{
db7b5426
BS
2683 unsigned int idx;
2684
8da3ff18 2685 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2686#if defined(DEBUG_SUBPAGE)
2687 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2688 mmio, len, addr, idx, value);
2689#endif
8da3ff18
PB
2690 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2691 addr + mmio->region_offset[idx][1][len],
2692 value);
db7b5426
BS
2693}
2694
2695static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2696{
2697#if defined(DEBUG_SUBPAGE)
2698 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2699#endif
2700
2701 return subpage_readlen(opaque, addr, 0);
2702}
2703
2704static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2705 uint32_t value)
2706{
2707#if defined(DEBUG_SUBPAGE)
2708 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2709#endif
2710 subpage_writelen(opaque, addr, value, 0);
2711}
2712
2713static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2714{
2715#if defined(DEBUG_SUBPAGE)
2716 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2717#endif
2718
2719 return subpage_readlen(opaque, addr, 1);
2720}
2721
2722static void subpage_writew (void *opaque, target_phys_addr_t addr,
2723 uint32_t value)
2724{
2725#if defined(DEBUG_SUBPAGE)
2726 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2727#endif
2728 subpage_writelen(opaque, addr, value, 1);
2729}
2730
2731static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2732{
2733#if defined(DEBUG_SUBPAGE)
2734 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2735#endif
2736
2737 return subpage_readlen(opaque, addr, 2);
2738}
2739
2740static void subpage_writel (void *opaque,
2741 target_phys_addr_t addr, uint32_t value)
2742{
2743#if defined(DEBUG_SUBPAGE)
2744 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2745#endif
2746 subpage_writelen(opaque, addr, value, 2);
2747}
2748
2749static CPUReadMemoryFunc *subpage_read[] = {
2750 &subpage_readb,
2751 &subpage_readw,
2752 &subpage_readl,
2753};
2754
2755static CPUWriteMemoryFunc *subpage_write[] = {
2756 &subpage_writeb,
2757 &subpage_writew,
2758 &subpage_writel,
2759};
2760
2761static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2762 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
2763{
2764 int idx, eidx;
4254fab8 2765 unsigned int i;
db7b5426
BS
2766
2767 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2768 return -1;
2769 idx = SUBPAGE_IDX(start);
2770 eidx = SUBPAGE_IDX(end);
2771#if defined(DEBUG_SUBPAGE)
2772 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2773 mmio, start, end, idx, eidx, memory);
2774#endif
2775 memory >>= IO_MEM_SHIFT;
2776 for (; idx <= eidx; idx++) {
4254fab8 2777 for (i = 0; i < 4; i++) {
3ee89922
BS
2778 if (io_mem_read[memory][i]) {
2779 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2780 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
8da3ff18 2781 mmio->region_offset[idx][0][i] = region_offset;
3ee89922
BS
2782 }
2783 if (io_mem_write[memory][i]) {
2784 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2785 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
8da3ff18 2786 mmio->region_offset[idx][1][i] = region_offset;
3ee89922 2787 }
4254fab8 2788 }
db7b5426
BS
2789 }
2790
2791 return 0;
2792}
2793
00f82b8a 2794static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2795 ram_addr_t orig_memory, ram_addr_t region_offset)
db7b5426
BS
2796{
2797 subpage_t *mmio;
2798 int subpage_memory;
2799
2800 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
2801
2802 mmio->base = base;
2803 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
db7b5426 2804#if defined(DEBUG_SUBPAGE)
1eec614b
AL
2805 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2806 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 2807#endif
1eec614b
AL
2808 *phys = subpage_memory | IO_MEM_SUBPAGE;
2809 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
8da3ff18 2810 region_offset);
db7b5426
BS
2811
2812 return mmio;
2813}
2814
88715657
AL
2815static int get_free_io_mem_idx(void)
2816{
2817 int i;
2818
2819 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2820 if (!io_mem_used[i]) {
2821 io_mem_used[i] = 1;
2822 return i;
2823 }
2824
2825 return -1;
2826}
2827
33417e70
FB
2828static void io_mem_init(void)
2829{
88715657
AL
2830 int i;
2831
3a7d929e 2832 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2833 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2834 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
88715657
AL
2835 for (i=0; i<5; i++)
2836 io_mem_used[i] = 1;
1ccde1cb 2837
0f459d16 2838 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2839 watch_mem_write, NULL);
1ccde1cb 2840 /* alloc dirty bits array */
0a962c02 2841 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2842 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2843}
2844
2845/* mem_read and mem_write are arrays of functions containing the
2846 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2847 2). Functions can be omitted with a NULL function pointer. The
2848 registered functions may be modified dynamically later.
2849 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2850 modified. If it is zero, a new io zone is allocated. The return
2851 value can be used with cpu_register_physical_memory(). (-1) is
2852 returned if error. */
33417e70
FB
2853int cpu_register_io_memory(int io_index,
2854 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2855 CPUWriteMemoryFunc **mem_write,
2856 void *opaque)
33417e70 2857{
4254fab8 2858 int i, subwidth = 0;
33417e70
FB
2859
2860 if (io_index <= 0) {
88715657
AL
2861 io_index = get_free_io_mem_idx();
2862 if (io_index == -1)
2863 return io_index;
33417e70
FB
2864 } else {
2865 if (io_index >= IO_MEM_NB_ENTRIES)
2866 return -1;
2867 }
b5ff1b31 2868
33417e70 2869 for(i = 0;i < 3; i++) {
4254fab8
BS
2870 if (!mem_read[i] || !mem_write[i])
2871 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2872 io_mem_read[io_index][i] = mem_read[i];
2873 io_mem_write[io_index][i] = mem_write[i];
2874 }
a4193c8a 2875 io_mem_opaque[io_index] = opaque;
4254fab8 2876 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2877}
61382a50 2878
88715657
AL
2879void cpu_unregister_io_memory(int io_table_address)
2880{
2881 int i;
2882 int io_index = io_table_address >> IO_MEM_SHIFT;
2883
2884 for (i=0;i < 3; i++) {
2885 io_mem_read[io_index][i] = unassigned_mem_read[i];
2886 io_mem_write[io_index][i] = unassigned_mem_write[i];
2887 }
2888 io_mem_opaque[io_index] = NULL;
2889 io_mem_used[io_index] = 0;
2890}
2891
8926b517
FB
2892CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2893{
2894 return io_mem_write[io_index >> IO_MEM_SHIFT];
2895}
2896
2897CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2898{
2899 return io_mem_read[io_index >> IO_MEM_SHIFT];
2900}
2901
e2eef170
PB
2902#endif /* !defined(CONFIG_USER_ONLY) */
2903
13eb76e0
FB
2904/* physical memory access (slow version, mainly for debug) */
2905#if defined(CONFIG_USER_ONLY)
5fafdf24 2906void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2907 int len, int is_write)
2908{
2909 int l, flags;
2910 target_ulong page;
53a5960a 2911 void * p;
13eb76e0
FB
2912
2913 while (len > 0) {
2914 page = addr & TARGET_PAGE_MASK;
2915 l = (page + TARGET_PAGE_SIZE) - addr;
2916 if (l > len)
2917 l = len;
2918 flags = page_get_flags(page);
2919 if (!(flags & PAGE_VALID))
2920 return;
2921 if (is_write) {
2922 if (!(flags & PAGE_WRITE))
2923 return;
579a97f7 2924 /* XXX: this code should not depend on lock_user */
72fb7daa 2925 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2926 /* FIXME - should this return an error rather than just fail? */
2927 return;
72fb7daa
AJ
2928 memcpy(p, buf, l);
2929 unlock_user(p, addr, l);
13eb76e0
FB
2930 } else {
2931 if (!(flags & PAGE_READ))
2932 return;
579a97f7 2933 /* XXX: this code should not depend on lock_user */
72fb7daa 2934 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2935 /* FIXME - should this return an error rather than just fail? */
2936 return;
72fb7daa 2937 memcpy(buf, p, l);
5b257578 2938 unlock_user(p, addr, 0);
13eb76e0
FB
2939 }
2940 len -= l;
2941 buf += l;
2942 addr += l;
2943 }
2944}
8df1cd07 2945
13eb76e0 2946#else
5fafdf24 2947void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2948 int len, int is_write)
2949{
2950 int l, io_index;
2951 uint8_t *ptr;
2952 uint32_t val;
2e12669a
FB
2953 target_phys_addr_t page;
2954 unsigned long pd;
92e873b9 2955 PhysPageDesc *p;
3b46e624 2956
13eb76e0
FB
2957 while (len > 0) {
2958 page = addr & TARGET_PAGE_MASK;
2959 l = (page + TARGET_PAGE_SIZE) - addr;
2960 if (l > len)
2961 l = len;
92e873b9 2962 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2963 if (!p) {
2964 pd = IO_MEM_UNASSIGNED;
2965 } else {
2966 pd = p->phys_offset;
2967 }
3b46e624 2968
13eb76e0 2969 if (is_write) {
3a7d929e 2970 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
6c2934db 2971 target_phys_addr_t addr1 = addr;
13eb76e0 2972 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 2973 if (p)
6c2934db 2974 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
2975 /* XXX: could force cpu_single_env to NULL to avoid
2976 potential bugs */
6c2934db 2977 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 2978 /* 32 bit write access */
c27004ec 2979 val = ldl_p(buf);
6c2934db 2980 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 2981 l = 4;
6c2934db 2982 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 2983 /* 16 bit write access */
c27004ec 2984 val = lduw_p(buf);
6c2934db 2985 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
2986 l = 2;
2987 } else {
1c213d19 2988 /* 8 bit write access */
c27004ec 2989 val = ldub_p(buf);
6c2934db 2990 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
2991 l = 1;
2992 }
2993 } else {
b448f2f3
FB
2994 unsigned long addr1;
2995 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2996 /* RAM case */
b448f2f3 2997 ptr = phys_ram_base + addr1;
13eb76e0 2998 memcpy(ptr, buf, l);
3a7d929e
FB
2999 if (!cpu_physical_memory_is_dirty(addr1)) {
3000 /* invalidate code */
3001 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3002 /* set dirty bit */
5fafdf24 3003 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 3004 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3005 }
13eb76e0
FB
3006 }
3007 } else {
5fafdf24 3008 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3009 !(pd & IO_MEM_ROMD)) {
6c2934db 3010 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3011 /* I/O case */
3012 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3013 if (p)
6c2934db
AJ
3014 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3015 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3016 /* 32 bit read access */
6c2934db 3017 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3018 stl_p(buf, val);
13eb76e0 3019 l = 4;
6c2934db 3020 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3021 /* 16 bit read access */
6c2934db 3022 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3023 stw_p(buf, val);
13eb76e0
FB
3024 l = 2;
3025 } else {
1c213d19 3026 /* 8 bit read access */
6c2934db 3027 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3028 stb_p(buf, val);
13eb76e0
FB
3029 l = 1;
3030 }
3031 } else {
3032 /* RAM case */
5fafdf24 3033 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3034 (addr & ~TARGET_PAGE_MASK);
3035 memcpy(buf, ptr, l);
3036 }
3037 }
3038 len -= l;
3039 buf += l;
3040 addr += l;
3041 }
3042}
8df1cd07 3043
d0ecd2aa 3044/* used for ROM loading : can write in RAM and ROM */
5fafdf24 3045void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3046 const uint8_t *buf, int len)
3047{
3048 int l;
3049 uint8_t *ptr;
3050 target_phys_addr_t page;
3051 unsigned long pd;
3052 PhysPageDesc *p;
3b46e624 3053
d0ecd2aa
FB
3054 while (len > 0) {
3055 page = addr & TARGET_PAGE_MASK;
3056 l = (page + TARGET_PAGE_SIZE) - addr;
3057 if (l > len)
3058 l = len;
3059 p = phys_page_find(page >> TARGET_PAGE_BITS);
3060 if (!p) {
3061 pd = IO_MEM_UNASSIGNED;
3062 } else {
3063 pd = p->phys_offset;
3064 }
3b46e624 3065
d0ecd2aa 3066 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3067 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3068 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3069 /* do nothing */
3070 } else {
3071 unsigned long addr1;
3072 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3073 /* ROM/RAM case */
3074 ptr = phys_ram_base + addr1;
3075 memcpy(ptr, buf, l);
3076 }
3077 len -= l;
3078 buf += l;
3079 addr += l;
3080 }
3081}
3082
6d16c2f8
AL
3083typedef struct {
3084 void *buffer;
3085 target_phys_addr_t addr;
3086 target_phys_addr_t len;
3087} BounceBuffer;
3088
3089static BounceBuffer bounce;
3090
ba223c29
AL
3091typedef struct MapClient {
3092 void *opaque;
3093 void (*callback)(void *opaque);
3094 LIST_ENTRY(MapClient) link;
3095} MapClient;
3096
3097static LIST_HEAD(map_client_list, MapClient) map_client_list
3098 = LIST_HEAD_INITIALIZER(map_client_list);
3099
3100void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3101{
3102 MapClient *client = qemu_malloc(sizeof(*client));
3103
3104 client->opaque = opaque;
3105 client->callback = callback;
3106 LIST_INSERT_HEAD(&map_client_list, client, link);
3107 return client;
3108}
3109
3110void cpu_unregister_map_client(void *_client)
3111{
3112 MapClient *client = (MapClient *)_client;
3113
3114 LIST_REMOVE(client, link);
3115}
3116
3117static void cpu_notify_map_clients(void)
3118{
3119 MapClient *client;
3120
3121 while (!LIST_EMPTY(&map_client_list)) {
3122 client = LIST_FIRST(&map_client_list);
3123 client->callback(client->opaque);
3124 LIST_REMOVE(client, link);
3125 }
3126}
3127
6d16c2f8
AL
3128/* Map a physical memory region into a host virtual address.
3129 * May map a subset of the requested range, given by and returned in *plen.
3130 * May return NULL if resources needed to perform the mapping are exhausted.
3131 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3132 * Use cpu_register_map_client() to know when retrying the map operation is
3133 * likely to succeed.
6d16c2f8
AL
3134 */
3135void *cpu_physical_memory_map(target_phys_addr_t addr,
3136 target_phys_addr_t *plen,
3137 int is_write)
3138{
3139 target_phys_addr_t len = *plen;
3140 target_phys_addr_t done = 0;
3141 int l;
3142 uint8_t *ret = NULL;
3143 uint8_t *ptr;
3144 target_phys_addr_t page;
3145 unsigned long pd;
3146 PhysPageDesc *p;
3147 unsigned long addr1;
3148
3149 while (len > 0) {
3150 page = addr & TARGET_PAGE_MASK;
3151 l = (page + TARGET_PAGE_SIZE) - addr;
3152 if (l > len)
3153 l = len;
3154 p = phys_page_find(page >> TARGET_PAGE_BITS);
3155 if (!p) {
3156 pd = IO_MEM_UNASSIGNED;
3157 } else {
3158 pd = p->phys_offset;
3159 }
3160
3161 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3162 if (done || bounce.buffer) {
3163 break;
3164 }
3165 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3166 bounce.addr = addr;
3167 bounce.len = l;
3168 if (!is_write) {
3169 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3170 }
3171 ptr = bounce.buffer;
3172 } else {
3173 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3174 ptr = phys_ram_base + addr1;
3175 }
3176 if (!done) {
3177 ret = ptr;
3178 } else if (ret + done != ptr) {
3179 break;
3180 }
3181
3182 len -= l;
3183 addr += l;
3184 done += l;
3185 }
3186 *plen = done;
3187 return ret;
3188}
3189
3190/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3191 * Will also mark the memory as dirty if is_write == 1. access_len gives
3192 * the amount of memory that was actually read or written by the caller.
3193 */
3194void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3195 int is_write, target_phys_addr_t access_len)
3196{
3197 if (buffer != bounce.buffer) {
3198 if (is_write) {
3199 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3200 while (access_len) {
3201 unsigned l;
3202 l = TARGET_PAGE_SIZE;
3203 if (l > access_len)
3204 l = access_len;
3205 if (!cpu_physical_memory_is_dirty(addr1)) {
3206 /* invalidate code */
3207 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3208 /* set dirty bit */
3209 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3210 (0xff & ~CODE_DIRTY_FLAG);
3211 }
3212 addr1 += l;
3213 access_len -= l;
3214 }
3215 }
3216 return;
3217 }
3218 if (is_write) {
3219 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3220 }
3221 qemu_free(bounce.buffer);
3222 bounce.buffer = NULL;
ba223c29 3223 cpu_notify_map_clients();
6d16c2f8 3224}
d0ecd2aa 3225
8df1cd07
FB
3226/* warning: addr must be aligned */
3227uint32_t ldl_phys(target_phys_addr_t addr)
3228{
3229 int io_index;
3230 uint8_t *ptr;
3231 uint32_t val;
3232 unsigned long pd;
3233 PhysPageDesc *p;
3234
3235 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3236 if (!p) {
3237 pd = IO_MEM_UNASSIGNED;
3238 } else {
3239 pd = p->phys_offset;
3240 }
3b46e624 3241
5fafdf24 3242 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3243 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3244 /* I/O case */
3245 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3246 if (p)
3247 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3248 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3249 } else {
3250 /* RAM case */
5fafdf24 3251 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3252 (addr & ~TARGET_PAGE_MASK);
3253 val = ldl_p(ptr);
3254 }
3255 return val;
3256}
3257
84b7b8e7
FB
3258/* warning: addr must be aligned */
3259uint64_t ldq_phys(target_phys_addr_t addr)
3260{
3261 int io_index;
3262 uint8_t *ptr;
3263 uint64_t val;
3264 unsigned long pd;
3265 PhysPageDesc *p;
3266
3267 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3268 if (!p) {
3269 pd = IO_MEM_UNASSIGNED;
3270 } else {
3271 pd = p->phys_offset;
3272 }
3b46e624 3273
2a4188a3
FB
3274 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3275 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3276 /* I/O case */
3277 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3278 if (p)
3279 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
3280#ifdef TARGET_WORDS_BIGENDIAN
3281 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3282 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3283#else
3284 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3285 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3286#endif
3287 } else {
3288 /* RAM case */
5fafdf24 3289 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3290 (addr & ~TARGET_PAGE_MASK);
3291 val = ldq_p(ptr);
3292 }
3293 return val;
3294}
3295
aab33094
FB
3296/* XXX: optimize */
3297uint32_t ldub_phys(target_phys_addr_t addr)
3298{
3299 uint8_t val;
3300 cpu_physical_memory_read(addr, &val, 1);
3301 return val;
3302}
3303
3304/* XXX: optimize */
3305uint32_t lduw_phys(target_phys_addr_t addr)
3306{
3307 uint16_t val;
3308 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3309 return tswap16(val);
3310}
3311
8df1cd07
FB
3312/* warning: addr must be aligned. The ram page is not masked as dirty
3313 and the code inside is not invalidated. It is useful if the dirty
3314 bits are used to track modified PTEs */
3315void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3316{
3317 int io_index;
3318 uint8_t *ptr;
3319 unsigned long pd;
3320 PhysPageDesc *p;
3321
3322 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3323 if (!p) {
3324 pd = IO_MEM_UNASSIGNED;
3325 } else {
3326 pd = p->phys_offset;
3327 }
3b46e624 3328
3a7d929e 3329 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3330 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3331 if (p)
3332 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3333 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3334 } else {
74576198
AL
3335 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3336 ptr = phys_ram_base + addr1;
8df1cd07 3337 stl_p(ptr, val);
74576198
AL
3338
3339 if (unlikely(in_migration)) {
3340 if (!cpu_physical_memory_is_dirty(addr1)) {
3341 /* invalidate code */
3342 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3343 /* set dirty bit */
3344 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3345 (0xff & ~CODE_DIRTY_FLAG);
3346 }
3347 }
8df1cd07
FB
3348 }
3349}
3350
bc98a7ef
JM
3351void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3352{
3353 int io_index;
3354 uint8_t *ptr;
3355 unsigned long pd;
3356 PhysPageDesc *p;
3357
3358 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3359 if (!p) {
3360 pd = IO_MEM_UNASSIGNED;
3361 } else {
3362 pd = p->phys_offset;
3363 }
3b46e624 3364
bc98a7ef
JM
3365 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3366 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3367 if (p)
3368 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
3369#ifdef TARGET_WORDS_BIGENDIAN
3370 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3371 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3372#else
3373 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3374 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3375#endif
3376 } else {
5fafdf24 3377 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3378 (addr & ~TARGET_PAGE_MASK);
3379 stq_p(ptr, val);
3380 }
3381}
3382
8df1cd07 3383/* warning: addr must be aligned */
8df1cd07
FB
3384void stl_phys(target_phys_addr_t addr, uint32_t val)
3385{
3386 int io_index;
3387 uint8_t *ptr;
3388 unsigned long pd;
3389 PhysPageDesc *p;
3390
3391 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3392 if (!p) {
3393 pd = IO_MEM_UNASSIGNED;
3394 } else {
3395 pd = p->phys_offset;
3396 }
3b46e624 3397
3a7d929e 3398 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3399 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3400 if (p)
3401 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3402 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3403 } else {
3404 unsigned long addr1;
3405 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3406 /* RAM case */
3407 ptr = phys_ram_base + addr1;
3408 stl_p(ptr, val);
3a7d929e
FB
3409 if (!cpu_physical_memory_is_dirty(addr1)) {
3410 /* invalidate code */
3411 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3412 /* set dirty bit */
f23db169
FB
3413 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3414 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3415 }
8df1cd07
FB
3416 }
3417}
3418
aab33094
FB
3419/* XXX: optimize */
3420void stb_phys(target_phys_addr_t addr, uint32_t val)
3421{
3422 uint8_t v = val;
3423 cpu_physical_memory_write(addr, &v, 1);
3424}
3425
3426/* XXX: optimize */
3427void stw_phys(target_phys_addr_t addr, uint32_t val)
3428{
3429 uint16_t v = tswap16(val);
3430 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3431}
3432
3433/* XXX: optimize */
3434void stq_phys(target_phys_addr_t addr, uint64_t val)
3435{
3436 val = tswap64(val);
3437 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3438}
3439
13eb76e0
FB
3440#endif
3441
3442/* virtual memory access for debug */
5fafdf24 3443int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3444 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3445{
3446 int l;
9b3c35e0
JM
3447 target_phys_addr_t phys_addr;
3448 target_ulong page;
13eb76e0
FB
3449
3450 while (len > 0) {
3451 page = addr & TARGET_PAGE_MASK;
3452 phys_addr = cpu_get_phys_page_debug(env, page);
3453 /* if no physical page mapped, return an error */
3454 if (phys_addr == -1)
3455 return -1;
3456 l = (page + TARGET_PAGE_SIZE) - addr;
3457 if (l > len)
3458 l = len;
5fafdf24 3459 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3460 buf, l, is_write);
13eb76e0
FB
3461 len -= l;
3462 buf += l;
3463 addr += l;
3464 }
3465 return 0;
3466}
3467
2e70f6ef
PB
3468/* in deterministic execution mode, instructions doing device I/Os
3469 must be at the end of the TB */
3470void cpu_io_recompile(CPUState *env, void *retaddr)
3471{
3472 TranslationBlock *tb;
3473 uint32_t n, cflags;
3474 target_ulong pc, cs_base;
3475 uint64_t flags;
3476
3477 tb = tb_find_pc((unsigned long)retaddr);
3478 if (!tb) {
3479 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3480 retaddr);
3481 }
3482 n = env->icount_decr.u16.low + tb->icount;
3483 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3484 /* Calculate how many instructions had been executed before the fault
bf20dc07 3485 occurred. */
2e70f6ef
PB
3486 n = n - env->icount_decr.u16.low;
3487 /* Generate a new TB ending on the I/O insn. */
3488 n++;
3489 /* On MIPS and SH, delay slot instructions can only be restarted if
3490 they were already the first instruction in the TB. If this is not
bf20dc07 3491 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3492 branch. */
3493#if defined(TARGET_MIPS)
3494 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3495 env->active_tc.PC -= 4;
3496 env->icount_decr.u16.low++;
3497 env->hflags &= ~MIPS_HFLAG_BMASK;
3498 }
3499#elif defined(TARGET_SH4)
3500 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3501 && n > 1) {
3502 env->pc -= 2;
3503 env->icount_decr.u16.low++;
3504 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3505 }
3506#endif
3507 /* This should never happen. */
3508 if (n > CF_COUNT_MASK)
3509 cpu_abort(env, "TB too big during recompile");
3510
3511 cflags = n | CF_LAST_IO;
3512 pc = tb->pc;
3513 cs_base = tb->cs_base;
3514 flags = tb->flags;
3515 tb_phys_invalidate(tb, -1);
3516 /* FIXME: In theory this could raise an exception. In practice
3517 we have already translated the block once so it's probably ok. */
3518 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3519 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3520 the first in the TB) then we end up generating a whole new TB and
3521 repeating the fault, which is horribly inefficient.
3522 Better would be to execute just this insn uncached, or generate a
3523 second new TB. */
3524 cpu_resume_from_signal(env, NULL);
3525}
3526
e3db7226
FB
3527void dump_exec_info(FILE *f,
3528 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3529{
3530 int i, target_code_size, max_target_code_size;
3531 int direct_jmp_count, direct_jmp2_count, cross_page;
3532 TranslationBlock *tb;
3b46e624 3533
e3db7226
FB
3534 target_code_size = 0;
3535 max_target_code_size = 0;
3536 cross_page = 0;
3537 direct_jmp_count = 0;
3538 direct_jmp2_count = 0;
3539 for(i = 0; i < nb_tbs; i++) {
3540 tb = &tbs[i];
3541 target_code_size += tb->size;
3542 if (tb->size > max_target_code_size)
3543 max_target_code_size = tb->size;
3544 if (tb->page_addr[1] != -1)
3545 cross_page++;
3546 if (tb->tb_next_offset[0] != 0xffff) {
3547 direct_jmp_count++;
3548 if (tb->tb_next_offset[1] != 0xffff) {
3549 direct_jmp2_count++;
3550 }
3551 }
3552 }
3553 /* XXX: avoid using doubles ? */
57fec1fe 3554 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3555 cpu_fprintf(f, "gen code size %ld/%ld\n",
3556 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3557 cpu_fprintf(f, "TB count %d/%d\n",
3558 nb_tbs, code_gen_max_blocks);
5fafdf24 3559 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3560 nb_tbs ? target_code_size / nb_tbs : 0,
3561 max_target_code_size);
5fafdf24 3562 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3563 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3564 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3565 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3566 cross_page,
e3db7226
FB
3567 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3568 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3569 direct_jmp_count,
e3db7226
FB
3570 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3571 direct_jmp2_count,
3572 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3573 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3574 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3575 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3576 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3577 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3578}
3579
5fafdf24 3580#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3581
3582#define MMUSUFFIX _cmmu
3583#define GETPC() NULL
3584#define env cpu_single_env
b769d8fe 3585#define SOFTMMU_CODE_ACCESS
61382a50
FB
3586
3587#define SHIFT 0
3588#include "softmmu_template.h"
3589
3590#define SHIFT 1
3591#include "softmmu_template.h"
3592
3593#define SHIFT 2
3594#include "softmmu_template.h"
3595
3596#define SHIFT 3
3597#include "softmmu_template.h"
3598
3599#undef env
3600
3601#endif