]> git.proxmox.com Git - qemu.git/blame - exec.c
e1000.c doesn't properly emulate EERD and ICS registers
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
54936004 19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
b3c7724c 40#include "hw/hw.h"
74576198 41#include "osdep.h"
7ba1e619 42#include "kvm.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
45#endif
54936004 46
fd6ce8f6 47//#define DEBUG_TB_INVALIDATE
66e85a21 48//#define DEBUG_FLUSH
9fa3e853 49//#define DEBUG_TLB
67d3b957 50//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
51
52/* make various TB consistency checks */
5fafdf24
TS
53//#define DEBUG_TB_CHECK
54//#define DEBUG_TLB_CHECK
fd6ce8f6 55
1196be37 56//#define DEBUG_IOPORT
db7b5426 57//#define DEBUG_SUBPAGE
1196be37 58
99773bd4
PB
59#if !defined(CONFIG_USER_ONLY)
60/* TB consistency checks only implemented for usermode emulation. */
61#undef DEBUG_TB_CHECK
62#endif
63
9fa3e853
FB
64#define SMC_BITMAP_USE_THRESHOLD 10
65
66#define MMAP_AREA_START 0x00000000
67#define MMAP_AREA_END 0xa8000000
fd6ce8f6 68
108c49b8
FB
69#if defined(TARGET_SPARC64)
70#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
71#elif defined(TARGET_SPARC)
72#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
73#elif defined(TARGET_ALPHA)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
76#elif defined(TARGET_PPC64)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
78#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 42
80#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
82#else
83/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84#define TARGET_PHYS_ADDR_SPACE_BITS 32
85#endif
86
bdaf78e0 87static TranslationBlock *tbs;
26a5f13b 88int code_gen_max_blocks;
9fa3e853 89TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 90static int nb_tbs;
eb51d102
FB
91/* any access to the tbs or the page table must use this lock */
92spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 93
141ac468
BS
94#if defined(__arm__) || defined(__sparc_v9__)
95/* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
97 section close to code segment. */
98#define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101#else
102#define code_gen_section \
103 __attribute__((aligned (32)))
104#endif
105
106uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
107static uint8_t *code_gen_buffer;
108static unsigned long code_gen_buffer_size;
26a5f13b 109/* threshold to flush the translated code buffer */
bdaf78e0 110static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
111uint8_t *code_gen_ptr;
112
e2eef170 113#if !defined(CONFIG_USER_ONLY)
00f82b8a 114ram_addr_t phys_ram_size;
9fa3e853
FB
115int phys_ram_fd;
116uint8_t *phys_ram_base;
1ccde1cb 117uint8_t *phys_ram_dirty;
74576198 118static int in_migration;
e9a1ab19 119static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 120#endif
9fa3e853 121
6a00d601
FB
122CPUState *first_cpu;
123/* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
5fafdf24 125CPUState *cpu_single_env;
2e70f6ef 126/* 0 = Do not count executed instructions.
bf20dc07 127 1 = Precise instruction counting.
2e70f6ef
PB
128 2 = Adaptive rate instruction counting. */
129int use_icount = 0;
130/* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132int64_t qemu_icount;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
92e873b9 146typedef struct PhysPageDesc {
0f459d16 147 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 148 ram_addr_t phys_offset;
8da3ff18 149 ram_addr_t region_offset;
92e873b9
FB
150} PhysPageDesc;
151
54936004 152#define L2_BITS 10
bedb69ea
JM
153#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154/* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
157 */
158#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159#else
03875444 160#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 161#endif
54936004
FB
162
163#define L1_SIZE (1 << L1_BITS)
164#define L2_SIZE (1 << L2_BITS)
165
83fb7adf
FB
166unsigned long qemu_real_host_page_size;
167unsigned long qemu_host_page_bits;
168unsigned long qemu_host_page_size;
169unsigned long qemu_host_page_mask;
54936004 170
92e873b9 171/* XXX: for system emulation, it could just be an array */
54936004 172static PageDesc *l1_map[L1_SIZE];
bdaf78e0 173static PhysPageDesc **l1_phys_map;
54936004 174
e2eef170
PB
175#if !defined(CONFIG_USER_ONLY)
176static void io_mem_init(void);
177
33417e70 178/* io memory support */
33417e70
FB
179CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 181void *io_mem_opaque[IO_MEM_NB_ENTRIES];
88715657 182char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
183static int io_mem_watch;
184#endif
33417e70 185
34865134 186/* log support */
d9b630fd 187static const char *logfilename = "/tmp/qemu.log";
34865134
FB
188FILE *logfile;
189int loglevel;
e735b91c 190static int log_append = 0;
34865134 191
e3db7226
FB
192/* statistics */
193static int tlb_flush_count;
194static int tb_flush_count;
195static int tb_phys_invalidate_count;
196
db7b5426
BS
197#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198typedef struct subpage_t {
199 target_phys_addr_t base;
3ee89922
BS
200 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202 void *opaque[TARGET_PAGE_SIZE][2][4];
8da3ff18 203 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
204} subpage_t;
205
7cb69cae
FB
206#ifdef _WIN32
207static void map_exec(void *addr, long size)
208{
209 DWORD old_protect;
210 VirtualProtect(addr, size,
211 PAGE_EXECUTE_READWRITE, &old_protect);
212
213}
214#else
215static void map_exec(void *addr, long size)
216{
4369415f 217 unsigned long start, end, page_size;
7cb69cae 218
4369415f 219 page_size = getpagesize();
7cb69cae 220 start = (unsigned long)addr;
4369415f 221 start &= ~(page_size - 1);
7cb69cae
FB
222
223 end = (unsigned long)addr + size;
4369415f
FB
224 end += page_size - 1;
225 end &= ~(page_size - 1);
7cb69cae
FB
226
227 mprotect((void *)start, end - start,
228 PROT_READ | PROT_WRITE | PROT_EXEC);
229}
230#endif
231
b346ff46 232static void page_init(void)
54936004 233{
83fb7adf 234 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 235 TARGET_PAGE_SIZE */
c2b48b69
AL
236#ifdef _WIN32
237 {
238 SYSTEM_INFO system_info;
239
240 GetSystemInfo(&system_info);
241 qemu_real_host_page_size = system_info.dwPageSize;
242 }
243#else
244 qemu_real_host_page_size = getpagesize();
245#endif
83fb7adf
FB
246 if (qemu_host_page_size == 0)
247 qemu_host_page_size = qemu_real_host_page_size;
248 if (qemu_host_page_size < TARGET_PAGE_SIZE)
249 qemu_host_page_size = TARGET_PAGE_SIZE;
250 qemu_host_page_bits = 0;
251 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252 qemu_host_page_bits++;
253 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
254 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
256
257#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
258 {
259 long long startaddr, endaddr;
260 FILE *f;
261 int n;
262
c8a706fe 263 mmap_lock();
0776590d 264 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
265 f = fopen("/proc/self/maps", "r");
266 if (f) {
267 do {
268 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269 if (n == 2) {
e0b8d65a
BS
270 startaddr = MIN(startaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 endaddr = MIN(endaddr,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 274 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
275 TARGET_PAGE_ALIGN(endaddr),
276 PAGE_RESERVED);
277 }
278 } while (!feof(f));
279 fclose(f);
280 }
c8a706fe 281 mmap_unlock();
50a9569b
AZ
282 }
283#endif
54936004
FB
284}
285
434929bf 286static inline PageDesc **page_l1_map(target_ulong index)
54936004 287{
17e2377a
PB
288#if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
d8173e0f 291 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
292 return NULL;
293#endif
434929bf
AL
294 return &l1_map[index >> L2_BITS];
295}
296
297static inline PageDesc *page_find_alloc(target_ulong index)
298{
299 PageDesc **lp, *p;
300 lp = page_l1_map(index);
301 if (!lp)
302 return NULL;
303
54936004
FB
304 p = *lp;
305 if (!p) {
306 /* allocate if not found */
17e2377a 307#if defined(CONFIG_USER_ONLY)
17e2377a
PB
308 size_t len = sizeof(PageDesc) * L2_SIZE;
309 /* Don't use qemu_malloc because it may recurse. */
310 p = mmap(0, len, PROT_READ | PROT_WRITE,
311 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 312 *lp = p;
fb1c2cd7
AJ
313 if (h2g_valid(p)) {
314 unsigned long addr = h2g(p);
17e2377a
PB
315 page_set_flags(addr & TARGET_PAGE_MASK,
316 TARGET_PAGE_ALIGN(addr + len),
317 PAGE_RESERVED);
318 }
319#else
320 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321 *lp = p;
322#endif
54936004
FB
323 }
324 return p + (index & (L2_SIZE - 1));
325}
326
00f82b8a 327static inline PageDesc *page_find(target_ulong index)
54936004 328{
434929bf
AL
329 PageDesc **lp, *p;
330 lp = page_l1_map(index);
331 if (!lp)
332 return NULL;
54936004 333
434929bf 334 p = *lp;
54936004
FB
335 if (!p)
336 return 0;
fd6ce8f6
FB
337 return p + (index & (L2_SIZE - 1));
338}
339
108c49b8 340static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 341{
108c49b8 342 void **lp, **p;
e3f4e2a4 343 PhysPageDesc *pd;
92e873b9 344
108c49b8
FB
345 p = (void **)l1_phys_map;
346#if TARGET_PHYS_ADDR_SPACE_BITS > 32
347
348#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350#endif
351 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
352 p = *lp;
353 if (!p) {
354 /* allocate if not found */
108c49b8
FB
355 if (!alloc)
356 return NULL;
357 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358 memset(p, 0, sizeof(void *) * L1_SIZE);
359 *lp = p;
360 }
361#endif
362 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
363 pd = *lp;
364 if (!pd) {
365 int i;
108c49b8
FB
366 /* allocate if not found */
367 if (!alloc)
368 return NULL;
e3f4e2a4
PB
369 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370 *lp = pd;
67c4d23c 371 for (i = 0; i < L2_SIZE; i++) {
e3f4e2a4 372 pd[i].phys_offset = IO_MEM_UNASSIGNED;
67c4d23c
PB
373 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
374 }
92e873b9 375 }
e3f4e2a4 376 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
377}
378
108c49b8 379static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 380{
108c49b8 381 return phys_page_find_alloc(index, 0);
92e873b9
FB
382}
383
9fa3e853 384#if !defined(CONFIG_USER_ONLY)
6a00d601 385static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 386static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 387 target_ulong vaddr);
c8a706fe
PB
388#define mmap_lock() do { } while(0)
389#define mmap_unlock() do { } while(0)
9fa3e853 390#endif
fd6ce8f6 391
4369415f
FB
392#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
393
394#if defined(CONFIG_USER_ONLY)
395/* Currently it is not recommanded to allocate big chunks of data in
396 user mode. It will change when a dedicated libc will be used */
397#define USE_STATIC_CODE_GEN_BUFFER
398#endif
399
400#ifdef USE_STATIC_CODE_GEN_BUFFER
401static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
402#endif
403
8fcd3692 404static void code_gen_alloc(unsigned long tb_size)
26a5f13b 405{
4369415f
FB
406#ifdef USE_STATIC_CODE_GEN_BUFFER
407 code_gen_buffer = static_code_gen_buffer;
408 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
409 map_exec(code_gen_buffer, code_gen_buffer_size);
410#else
26a5f13b
FB
411 code_gen_buffer_size = tb_size;
412 if (code_gen_buffer_size == 0) {
4369415f
FB
413#if defined(CONFIG_USER_ONLY)
414 /* in user mode, phys_ram_size is not meaningful */
415 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
416#else
26a5f13b 417 /* XXX: needs ajustments */
174a9a1f 418 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
4369415f 419#endif
26a5f13b
FB
420 }
421 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
422 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
423 /* The code gen buffer location may have constraints depending on
424 the host cpu and OS */
425#if defined(__linux__)
426 {
427 int flags;
141ac468
BS
428 void *start = NULL;
429
26a5f13b
FB
430 flags = MAP_PRIVATE | MAP_ANONYMOUS;
431#if defined(__x86_64__)
432 flags |= MAP_32BIT;
433 /* Cannot map more than that */
434 if (code_gen_buffer_size > (800 * 1024 * 1024))
435 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
436#elif defined(__sparc_v9__)
437 // Map the buffer below 2G, so we can use direct calls and branches
438 flags |= MAP_FIXED;
439 start = (void *) 0x60000000UL;
440 if (code_gen_buffer_size > (512 * 1024 * 1024))
441 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 442#elif defined(__arm__)
63d41246 443 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
444 flags |= MAP_FIXED;
445 start = (void *) 0x01000000UL;
446 if (code_gen_buffer_size > 16 * 1024 * 1024)
447 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 448#endif
141ac468
BS
449 code_gen_buffer = mmap(start, code_gen_buffer_size,
450 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
451 flags, -1, 0);
452 if (code_gen_buffer == MAP_FAILED) {
453 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
454 exit(1);
455 }
456 }
06e67a82
AL
457#elif defined(__FreeBSD__)
458 {
459 int flags;
460 void *addr = NULL;
461 flags = MAP_PRIVATE | MAP_ANONYMOUS;
462#if defined(__x86_64__)
463 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
464 * 0x40000000 is free */
465 flags |= MAP_FIXED;
466 addr = (void *)0x40000000;
467 /* Cannot map more than that */
468 if (code_gen_buffer_size > (800 * 1024 * 1024))
469 code_gen_buffer_size = (800 * 1024 * 1024);
470#endif
471 code_gen_buffer = mmap(addr, code_gen_buffer_size,
472 PROT_WRITE | PROT_READ | PROT_EXEC,
473 flags, -1, 0);
474 if (code_gen_buffer == MAP_FAILED) {
475 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
476 exit(1);
477 }
478 }
26a5f13b
FB
479#else
480 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
481 map_exec(code_gen_buffer, code_gen_buffer_size);
482#endif
4369415f 483#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
484 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
485 code_gen_buffer_max_size = code_gen_buffer_size -
486 code_gen_max_block_size();
487 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
488 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
489}
490
491/* Must be called before using the QEMU cpus. 'tb_size' is the size
492 (in bytes) allocated to the translation buffer. Zero means default
493 size. */
494void cpu_exec_init_all(unsigned long tb_size)
495{
26a5f13b
FB
496 cpu_gen_init();
497 code_gen_alloc(tb_size);
498 code_gen_ptr = code_gen_buffer;
4369415f 499 page_init();
e2eef170 500#if !defined(CONFIG_USER_ONLY)
26a5f13b 501 io_mem_init();
e2eef170 502#endif
26a5f13b
FB
503}
504
9656f324
PB
505#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
506
507#define CPU_COMMON_SAVE_VERSION 1
508
509static void cpu_common_save(QEMUFile *f, void *opaque)
510{
511 CPUState *env = opaque;
512
513 qemu_put_be32s(f, &env->halted);
514 qemu_put_be32s(f, &env->interrupt_request);
515}
516
517static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
518{
519 CPUState *env = opaque;
520
521 if (version_id != CPU_COMMON_SAVE_VERSION)
522 return -EINVAL;
523
524 qemu_get_be32s(f, &env->halted);
75f482ae 525 qemu_get_be32s(f, &env->interrupt_request);
d9aa1fce 526 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
9656f324
PB
527 tlb_flush(env, 1);
528
529 return 0;
530}
531#endif
532
6a00d601 533void cpu_exec_init(CPUState *env)
fd6ce8f6 534{
6a00d601
FB
535 CPUState **penv;
536 int cpu_index;
537
6a00d601
FB
538 env->next_cpu = NULL;
539 penv = &first_cpu;
540 cpu_index = 0;
541 while (*penv != NULL) {
542 penv = (CPUState **)&(*penv)->next_cpu;
543 cpu_index++;
544 }
545 env->cpu_index = cpu_index;
c0ce998e
AL
546 TAILQ_INIT(&env->breakpoints);
547 TAILQ_INIT(&env->watchpoints);
6a00d601 548 *penv = env;
b3c7724c 549#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
550 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
551 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
552 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
553 cpu_save, cpu_load, env);
554#endif
fd6ce8f6
FB
555}
556
9fa3e853
FB
557static inline void invalidate_page_bitmap(PageDesc *p)
558{
559 if (p->code_bitmap) {
59817ccb 560 qemu_free(p->code_bitmap);
9fa3e853
FB
561 p->code_bitmap = NULL;
562 }
563 p->code_write_count = 0;
564}
565
fd6ce8f6
FB
566/* set to NULL all the 'first_tb' fields in all PageDescs */
567static void page_flush_tb(void)
568{
569 int i, j;
570 PageDesc *p;
571
572 for(i = 0; i < L1_SIZE; i++) {
573 p = l1_map[i];
574 if (p) {
9fa3e853
FB
575 for(j = 0; j < L2_SIZE; j++) {
576 p->first_tb = NULL;
577 invalidate_page_bitmap(p);
578 p++;
579 }
fd6ce8f6
FB
580 }
581 }
582}
583
584/* flush all the translation blocks */
d4e8164f 585/* XXX: tb_flush is currently not thread safe */
6a00d601 586void tb_flush(CPUState *env1)
fd6ce8f6 587{
6a00d601 588 CPUState *env;
0124311e 589#if defined(DEBUG_FLUSH)
ab3d1727
BS
590 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
591 (unsigned long)(code_gen_ptr - code_gen_buffer),
592 nb_tbs, nb_tbs > 0 ?
593 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 594#endif
26a5f13b 595 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
596 cpu_abort(env1, "Internal error: code buffer overflow\n");
597
fd6ce8f6 598 nb_tbs = 0;
3b46e624 599
6a00d601
FB
600 for(env = first_cpu; env != NULL; env = env->next_cpu) {
601 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
602 }
9fa3e853 603
8a8a608f 604 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 605 page_flush_tb();
9fa3e853 606
fd6ce8f6 607 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
608 /* XXX: flush processor icache at this point if cache flush is
609 expensive */
e3db7226 610 tb_flush_count++;
fd6ce8f6
FB
611}
612
613#ifdef DEBUG_TB_CHECK
614
bc98a7ef 615static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
616{
617 TranslationBlock *tb;
618 int i;
619 address &= TARGET_PAGE_MASK;
99773bd4
PB
620 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
621 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
622 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
623 address >= tb->pc + tb->size)) {
624 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 625 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
626 }
627 }
628 }
629}
630
631/* verify that all the pages have correct rights for code */
632static void tb_page_check(void)
633{
634 TranslationBlock *tb;
635 int i, flags1, flags2;
3b46e624 636
99773bd4
PB
637 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
638 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
639 flags1 = page_get_flags(tb->pc);
640 flags2 = page_get_flags(tb->pc + tb->size - 1);
641 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
642 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 643 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
644 }
645 }
646 }
647}
648
bdaf78e0 649static void tb_jmp_check(TranslationBlock *tb)
d4e8164f
FB
650{
651 TranslationBlock *tb1;
652 unsigned int n1;
653
654 /* suppress any remaining jumps to this TB */
655 tb1 = tb->jmp_first;
656 for(;;) {
657 n1 = (long)tb1 & 3;
658 tb1 = (TranslationBlock *)((long)tb1 & ~3);
659 if (n1 == 2)
660 break;
661 tb1 = tb1->jmp_next[n1];
662 }
663 /* check end of list */
664 if (tb1 != tb) {
665 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
666 }
667}
668
fd6ce8f6
FB
669#endif
670
671/* invalidate one TB */
672static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
673 int next_offset)
674{
675 TranslationBlock *tb1;
676 for(;;) {
677 tb1 = *ptb;
678 if (tb1 == tb) {
679 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
680 break;
681 }
682 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
683 }
684}
685
9fa3e853
FB
686static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
687{
688 TranslationBlock *tb1;
689 unsigned int n1;
690
691 for(;;) {
692 tb1 = *ptb;
693 n1 = (long)tb1 & 3;
694 tb1 = (TranslationBlock *)((long)tb1 & ~3);
695 if (tb1 == tb) {
696 *ptb = tb1->page_next[n1];
697 break;
698 }
699 ptb = &tb1->page_next[n1];
700 }
701}
702
d4e8164f
FB
703static inline void tb_jmp_remove(TranslationBlock *tb, int n)
704{
705 TranslationBlock *tb1, **ptb;
706 unsigned int n1;
707
708 ptb = &tb->jmp_next[n];
709 tb1 = *ptb;
710 if (tb1) {
711 /* find tb(n) in circular list */
712 for(;;) {
713 tb1 = *ptb;
714 n1 = (long)tb1 & 3;
715 tb1 = (TranslationBlock *)((long)tb1 & ~3);
716 if (n1 == n && tb1 == tb)
717 break;
718 if (n1 == 2) {
719 ptb = &tb1->jmp_first;
720 } else {
721 ptb = &tb1->jmp_next[n1];
722 }
723 }
724 /* now we can suppress tb(n) from the list */
725 *ptb = tb->jmp_next[n];
726
727 tb->jmp_next[n] = NULL;
728 }
729}
730
731/* reset the jump entry 'n' of a TB so that it is not chained to
732 another TB */
733static inline void tb_reset_jump(TranslationBlock *tb, int n)
734{
735 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
736}
737
2e70f6ef 738void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 739{
6a00d601 740 CPUState *env;
8a40a180 741 PageDesc *p;
d4e8164f 742 unsigned int h, n1;
00f82b8a 743 target_phys_addr_t phys_pc;
8a40a180 744 TranslationBlock *tb1, *tb2;
3b46e624 745
8a40a180
FB
746 /* remove the TB from the hash list */
747 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
748 h = tb_phys_hash_func(phys_pc);
5fafdf24 749 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
750 offsetof(TranslationBlock, phys_hash_next));
751
752 /* remove the TB from the page list */
753 if (tb->page_addr[0] != page_addr) {
754 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
755 tb_page_remove(&p->first_tb, tb);
756 invalidate_page_bitmap(p);
757 }
758 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
759 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
760 tb_page_remove(&p->first_tb, tb);
761 invalidate_page_bitmap(p);
762 }
763
36bdbe54 764 tb_invalidated_flag = 1;
59817ccb 765
fd6ce8f6 766 /* remove the TB from the hash list */
8a40a180 767 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
768 for(env = first_cpu; env != NULL; env = env->next_cpu) {
769 if (env->tb_jmp_cache[h] == tb)
770 env->tb_jmp_cache[h] = NULL;
771 }
d4e8164f
FB
772
773 /* suppress this TB from the two jump lists */
774 tb_jmp_remove(tb, 0);
775 tb_jmp_remove(tb, 1);
776
777 /* suppress any remaining jumps to this TB */
778 tb1 = tb->jmp_first;
779 for(;;) {
780 n1 = (long)tb1 & 3;
781 if (n1 == 2)
782 break;
783 tb1 = (TranslationBlock *)((long)tb1 & ~3);
784 tb2 = tb1->jmp_next[n1];
785 tb_reset_jump(tb1, n1);
786 tb1->jmp_next[n1] = NULL;
787 tb1 = tb2;
788 }
789 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 790
e3db7226 791 tb_phys_invalidate_count++;
9fa3e853
FB
792}
793
794static inline void set_bits(uint8_t *tab, int start, int len)
795{
796 int end, mask, end1;
797
798 end = start + len;
799 tab += start >> 3;
800 mask = 0xff << (start & 7);
801 if ((start & ~7) == (end & ~7)) {
802 if (start < end) {
803 mask &= ~(0xff << (end & 7));
804 *tab |= mask;
805 }
806 } else {
807 *tab++ |= mask;
808 start = (start + 8) & ~7;
809 end1 = end & ~7;
810 while (start < end1) {
811 *tab++ = 0xff;
812 start += 8;
813 }
814 if (start < end) {
815 mask = ~(0xff << (end & 7));
816 *tab |= mask;
817 }
818 }
819}
820
821static void build_page_bitmap(PageDesc *p)
822{
823 int n, tb_start, tb_end;
824 TranslationBlock *tb;
3b46e624 825
b2a7081a 826 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
827
828 tb = p->first_tb;
829 while (tb != NULL) {
830 n = (long)tb & 3;
831 tb = (TranslationBlock *)((long)tb & ~3);
832 /* NOTE: this is subtle as a TB may span two physical pages */
833 if (n == 0) {
834 /* NOTE: tb_end may be after the end of the page, but
835 it is not a problem */
836 tb_start = tb->pc & ~TARGET_PAGE_MASK;
837 tb_end = tb_start + tb->size;
838 if (tb_end > TARGET_PAGE_SIZE)
839 tb_end = TARGET_PAGE_SIZE;
840 } else {
841 tb_start = 0;
842 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
843 }
844 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
845 tb = tb->page_next[n];
846 }
847}
848
2e70f6ef
PB
849TranslationBlock *tb_gen_code(CPUState *env,
850 target_ulong pc, target_ulong cs_base,
851 int flags, int cflags)
d720b93d
FB
852{
853 TranslationBlock *tb;
854 uint8_t *tc_ptr;
855 target_ulong phys_pc, phys_page2, virt_page2;
856 int code_gen_size;
857
c27004ec
FB
858 phys_pc = get_phys_addr_code(env, pc);
859 tb = tb_alloc(pc);
d720b93d
FB
860 if (!tb) {
861 /* flush must be done */
862 tb_flush(env);
863 /* cannot fail at this point */
c27004ec 864 tb = tb_alloc(pc);
2e70f6ef
PB
865 /* Don't forget to invalidate previous TB info. */
866 tb_invalidated_flag = 1;
d720b93d
FB
867 }
868 tc_ptr = code_gen_ptr;
869 tb->tc_ptr = tc_ptr;
870 tb->cs_base = cs_base;
871 tb->flags = flags;
872 tb->cflags = cflags;
d07bde88 873 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 874 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 875
d720b93d 876 /* check next page if needed */
c27004ec 877 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 878 phys_page2 = -1;
c27004ec 879 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
880 phys_page2 = get_phys_addr_code(env, virt_page2);
881 }
882 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 883 return tb;
d720b93d 884}
3b46e624 885
9fa3e853
FB
886/* invalidate all TBs which intersect with the target physical page
887 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
888 the same physical page. 'is_cpu_write_access' should be true if called
889 from a real cpu write access: the virtual CPU will exit the current
890 TB if code is modified inside this TB. */
00f82b8a 891void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
892 int is_cpu_write_access)
893{
6b917547 894 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 895 CPUState *env = cpu_single_env;
9fa3e853 896 target_ulong tb_start, tb_end;
6b917547
AL
897 PageDesc *p;
898 int n;
899#ifdef TARGET_HAS_PRECISE_SMC
900 int current_tb_not_found = is_cpu_write_access;
901 TranslationBlock *current_tb = NULL;
902 int current_tb_modified = 0;
903 target_ulong current_pc = 0;
904 target_ulong current_cs_base = 0;
905 int current_flags = 0;
906#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
907
908 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 909 if (!p)
9fa3e853 910 return;
5fafdf24 911 if (!p->code_bitmap &&
d720b93d
FB
912 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
913 is_cpu_write_access) {
9fa3e853
FB
914 /* build code bitmap */
915 build_page_bitmap(p);
916 }
917
918 /* we remove all the TBs in the range [start, end[ */
919 /* XXX: see if in some cases it could be faster to invalidate all the code */
920 tb = p->first_tb;
921 while (tb != NULL) {
922 n = (long)tb & 3;
923 tb = (TranslationBlock *)((long)tb & ~3);
924 tb_next = tb->page_next[n];
925 /* NOTE: this is subtle as a TB may span two physical pages */
926 if (n == 0) {
927 /* NOTE: tb_end may be after the end of the page, but
928 it is not a problem */
929 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
930 tb_end = tb_start + tb->size;
931 } else {
932 tb_start = tb->page_addr[1];
933 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
934 }
935 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
936#ifdef TARGET_HAS_PRECISE_SMC
937 if (current_tb_not_found) {
938 current_tb_not_found = 0;
939 current_tb = NULL;
2e70f6ef 940 if (env->mem_io_pc) {
d720b93d 941 /* now we have a real cpu fault */
2e70f6ef 942 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
943 }
944 }
945 if (current_tb == tb &&
2e70f6ef 946 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
947 /* If we are modifying the current TB, we must stop
948 its execution. We could be more precise by checking
949 that the modification is after the current PC, but it
950 would require a specialized function to partially
951 restore the CPU state */
3b46e624 952
d720b93d 953 current_tb_modified = 1;
5fafdf24 954 cpu_restore_state(current_tb, env,
2e70f6ef 955 env->mem_io_pc, NULL);
6b917547
AL
956 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
957 &current_flags);
d720b93d
FB
958 }
959#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
960 /* we need to do that to handle the case where a signal
961 occurs while doing tb_phys_invalidate() */
962 saved_tb = NULL;
963 if (env) {
964 saved_tb = env->current_tb;
965 env->current_tb = NULL;
966 }
9fa3e853 967 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
968 if (env) {
969 env->current_tb = saved_tb;
970 if (env->interrupt_request && env->current_tb)
971 cpu_interrupt(env, env->interrupt_request);
972 }
9fa3e853
FB
973 }
974 tb = tb_next;
975 }
976#if !defined(CONFIG_USER_ONLY)
977 /* if no code remaining, no need to continue to use slow writes */
978 if (!p->first_tb) {
979 invalidate_page_bitmap(p);
d720b93d 980 if (is_cpu_write_access) {
2e70f6ef 981 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
982 }
983 }
984#endif
985#ifdef TARGET_HAS_PRECISE_SMC
986 if (current_tb_modified) {
987 /* we generate a block containing just the instruction
988 modifying the memory. It will ensure that it cannot modify
989 itself */
ea1c1802 990 env->current_tb = NULL;
2e70f6ef 991 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 992 cpu_resume_from_signal(env, NULL);
9fa3e853 993 }
fd6ce8f6 994#endif
9fa3e853 995}
fd6ce8f6 996
9fa3e853 997/* len must be <= 8 and start must be a multiple of len */
00f82b8a 998static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
999{
1000 PageDesc *p;
1001 int offset, b;
59817ccb 1002#if 0
a4193c8a 1003 if (1) {
93fcfe39
AL
1004 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1005 cpu_single_env->mem_io_vaddr, len,
1006 cpu_single_env->eip,
1007 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1008 }
1009#endif
9fa3e853 1010 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1011 if (!p)
9fa3e853
FB
1012 return;
1013 if (p->code_bitmap) {
1014 offset = start & ~TARGET_PAGE_MASK;
1015 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1016 if (b & ((1 << len) - 1))
1017 goto do_invalidate;
1018 } else {
1019 do_invalidate:
d720b93d 1020 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1021 }
1022}
1023
9fa3e853 1024#if !defined(CONFIG_SOFTMMU)
00f82b8a 1025static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1026 unsigned long pc, void *puc)
9fa3e853 1027{
6b917547 1028 TranslationBlock *tb;
9fa3e853 1029 PageDesc *p;
6b917547 1030 int n;
d720b93d 1031#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1032 TranslationBlock *current_tb = NULL;
d720b93d 1033 CPUState *env = cpu_single_env;
6b917547
AL
1034 int current_tb_modified = 0;
1035 target_ulong current_pc = 0;
1036 target_ulong current_cs_base = 0;
1037 int current_flags = 0;
d720b93d 1038#endif
9fa3e853
FB
1039
1040 addr &= TARGET_PAGE_MASK;
1041 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1042 if (!p)
9fa3e853
FB
1043 return;
1044 tb = p->first_tb;
d720b93d
FB
1045#ifdef TARGET_HAS_PRECISE_SMC
1046 if (tb && pc != 0) {
1047 current_tb = tb_find_pc(pc);
1048 }
1049#endif
9fa3e853
FB
1050 while (tb != NULL) {
1051 n = (long)tb & 3;
1052 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1053#ifdef TARGET_HAS_PRECISE_SMC
1054 if (current_tb == tb &&
2e70f6ef 1055 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1056 /* If we are modifying the current TB, we must stop
1057 its execution. We could be more precise by checking
1058 that the modification is after the current PC, but it
1059 would require a specialized function to partially
1060 restore the CPU state */
3b46e624 1061
d720b93d
FB
1062 current_tb_modified = 1;
1063 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1064 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1065 &current_flags);
d720b93d
FB
1066 }
1067#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1068 tb_phys_invalidate(tb, addr);
1069 tb = tb->page_next[n];
1070 }
fd6ce8f6 1071 p->first_tb = NULL;
d720b93d
FB
1072#ifdef TARGET_HAS_PRECISE_SMC
1073 if (current_tb_modified) {
1074 /* we generate a block containing just the instruction
1075 modifying the memory. It will ensure that it cannot modify
1076 itself */
ea1c1802 1077 env->current_tb = NULL;
2e70f6ef 1078 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1079 cpu_resume_from_signal(env, puc);
1080 }
1081#endif
fd6ce8f6 1082}
9fa3e853 1083#endif
fd6ce8f6
FB
1084
1085/* add the tb in the target page and protect it if necessary */
5fafdf24 1086static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1087 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1088{
1089 PageDesc *p;
9fa3e853
FB
1090 TranslationBlock *last_first_tb;
1091
1092 tb->page_addr[n] = page_addr;
3a7d929e 1093 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1094 tb->page_next[n] = p->first_tb;
1095 last_first_tb = p->first_tb;
1096 p->first_tb = (TranslationBlock *)((long)tb | n);
1097 invalidate_page_bitmap(p);
fd6ce8f6 1098
107db443 1099#if defined(TARGET_HAS_SMC) || 1
d720b93d 1100
9fa3e853 1101#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1102 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1103 target_ulong addr;
1104 PageDesc *p2;
9fa3e853
FB
1105 int prot;
1106
fd6ce8f6
FB
1107 /* force the host page as non writable (writes will have a
1108 page fault + mprotect overhead) */
53a5960a 1109 page_addr &= qemu_host_page_mask;
fd6ce8f6 1110 prot = 0;
53a5960a
PB
1111 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1112 addr += TARGET_PAGE_SIZE) {
1113
1114 p2 = page_find (addr >> TARGET_PAGE_BITS);
1115 if (!p2)
1116 continue;
1117 prot |= p2->flags;
1118 p2->flags &= ~PAGE_WRITE;
1119 page_get_flags(addr);
1120 }
5fafdf24 1121 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1122 (prot & PAGE_BITS) & ~PAGE_WRITE);
1123#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1124 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1125 page_addr);
fd6ce8f6 1126#endif
fd6ce8f6 1127 }
9fa3e853
FB
1128#else
1129 /* if some code is already present, then the pages are already
1130 protected. So we handle the case where only the first TB is
1131 allocated in a physical page */
1132 if (!last_first_tb) {
6a00d601 1133 tlb_protect_code(page_addr);
9fa3e853
FB
1134 }
1135#endif
d720b93d
FB
1136
1137#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1138}
1139
1140/* Allocate a new translation block. Flush the translation buffer if
1141 too many translation blocks or too much generated code. */
c27004ec 1142TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1143{
1144 TranslationBlock *tb;
fd6ce8f6 1145
26a5f13b
FB
1146 if (nb_tbs >= code_gen_max_blocks ||
1147 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1148 return NULL;
fd6ce8f6
FB
1149 tb = &tbs[nb_tbs++];
1150 tb->pc = pc;
b448f2f3 1151 tb->cflags = 0;
d4e8164f
FB
1152 return tb;
1153}
1154
2e70f6ef
PB
1155void tb_free(TranslationBlock *tb)
1156{
bf20dc07 1157 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1158 Ignore the hard cases and just back up if this TB happens to
1159 be the last one generated. */
1160 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1161 code_gen_ptr = tb->tc_ptr;
1162 nb_tbs--;
1163 }
1164}
1165
9fa3e853
FB
1166/* add a new TB and link it to the physical page tables. phys_page2 is
1167 (-1) to indicate that only one page contains the TB. */
5fafdf24 1168void tb_link_phys(TranslationBlock *tb,
9fa3e853 1169 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1170{
9fa3e853
FB
1171 unsigned int h;
1172 TranslationBlock **ptb;
1173
c8a706fe
PB
1174 /* Grab the mmap lock to stop another thread invalidating this TB
1175 before we are done. */
1176 mmap_lock();
9fa3e853
FB
1177 /* add in the physical hash table */
1178 h = tb_phys_hash_func(phys_pc);
1179 ptb = &tb_phys_hash[h];
1180 tb->phys_hash_next = *ptb;
1181 *ptb = tb;
fd6ce8f6
FB
1182
1183 /* add in the page list */
9fa3e853
FB
1184 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1185 if (phys_page2 != -1)
1186 tb_alloc_page(tb, 1, phys_page2);
1187 else
1188 tb->page_addr[1] = -1;
9fa3e853 1189
d4e8164f
FB
1190 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1191 tb->jmp_next[0] = NULL;
1192 tb->jmp_next[1] = NULL;
1193
1194 /* init original jump addresses */
1195 if (tb->tb_next_offset[0] != 0xffff)
1196 tb_reset_jump(tb, 0);
1197 if (tb->tb_next_offset[1] != 0xffff)
1198 tb_reset_jump(tb, 1);
8a40a180
FB
1199
1200#ifdef DEBUG_TB_CHECK
1201 tb_page_check();
1202#endif
c8a706fe 1203 mmap_unlock();
fd6ce8f6
FB
1204}
1205
9fa3e853
FB
1206/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1207 tb[1].tc_ptr. Return NULL if not found */
1208TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1209{
9fa3e853
FB
1210 int m_min, m_max, m;
1211 unsigned long v;
1212 TranslationBlock *tb;
a513fe19
FB
1213
1214 if (nb_tbs <= 0)
1215 return NULL;
1216 if (tc_ptr < (unsigned long)code_gen_buffer ||
1217 tc_ptr >= (unsigned long)code_gen_ptr)
1218 return NULL;
1219 /* binary search (cf Knuth) */
1220 m_min = 0;
1221 m_max = nb_tbs - 1;
1222 while (m_min <= m_max) {
1223 m = (m_min + m_max) >> 1;
1224 tb = &tbs[m];
1225 v = (unsigned long)tb->tc_ptr;
1226 if (v == tc_ptr)
1227 return tb;
1228 else if (tc_ptr < v) {
1229 m_max = m - 1;
1230 } else {
1231 m_min = m + 1;
1232 }
5fafdf24 1233 }
a513fe19
FB
1234 return &tbs[m_max];
1235}
7501267e 1236
ea041c0e
FB
1237static void tb_reset_jump_recursive(TranslationBlock *tb);
1238
1239static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1240{
1241 TranslationBlock *tb1, *tb_next, **ptb;
1242 unsigned int n1;
1243
1244 tb1 = tb->jmp_next[n];
1245 if (tb1 != NULL) {
1246 /* find head of list */
1247 for(;;) {
1248 n1 = (long)tb1 & 3;
1249 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1250 if (n1 == 2)
1251 break;
1252 tb1 = tb1->jmp_next[n1];
1253 }
1254 /* we are now sure now that tb jumps to tb1 */
1255 tb_next = tb1;
1256
1257 /* remove tb from the jmp_first list */
1258 ptb = &tb_next->jmp_first;
1259 for(;;) {
1260 tb1 = *ptb;
1261 n1 = (long)tb1 & 3;
1262 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1263 if (n1 == n && tb1 == tb)
1264 break;
1265 ptb = &tb1->jmp_next[n1];
1266 }
1267 *ptb = tb->jmp_next[n];
1268 tb->jmp_next[n] = NULL;
3b46e624 1269
ea041c0e
FB
1270 /* suppress the jump to next tb in generated code */
1271 tb_reset_jump(tb, n);
1272
0124311e 1273 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1274 tb_reset_jump_recursive(tb_next);
1275 }
1276}
1277
1278static void tb_reset_jump_recursive(TranslationBlock *tb)
1279{
1280 tb_reset_jump_recursive2(tb, 0);
1281 tb_reset_jump_recursive2(tb, 1);
1282}
1283
1fddef4b 1284#if defined(TARGET_HAS_ICE)
d720b93d
FB
1285static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1286{
9b3c35e0
JM
1287 target_phys_addr_t addr;
1288 target_ulong pd;
c2f07f81
PB
1289 ram_addr_t ram_addr;
1290 PhysPageDesc *p;
d720b93d 1291
c2f07f81
PB
1292 addr = cpu_get_phys_page_debug(env, pc);
1293 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1294 if (!p) {
1295 pd = IO_MEM_UNASSIGNED;
1296 } else {
1297 pd = p->phys_offset;
1298 }
1299 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1300 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1301}
c27004ec 1302#endif
d720b93d 1303
6658ffb8 1304/* Add a watchpoint. */
a1d1bb31
AL
1305int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1306 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1307{
b4051334 1308 target_ulong len_mask = ~(len - 1);
c0ce998e 1309 CPUWatchpoint *wp;
6658ffb8 1310
b4051334
AL
1311 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1312 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1313 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1314 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1315 return -EINVAL;
1316 }
a1d1bb31 1317 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1318
1319 wp->vaddr = addr;
b4051334 1320 wp->len_mask = len_mask;
a1d1bb31
AL
1321 wp->flags = flags;
1322
2dc9f411 1323 /* keep all GDB-injected watchpoints in front */
c0ce998e
AL
1324 if (flags & BP_GDB)
1325 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1326 else
1327 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1328
6658ffb8 1329 tlb_flush_page(env, addr);
a1d1bb31
AL
1330
1331 if (watchpoint)
1332 *watchpoint = wp;
1333 return 0;
6658ffb8
PB
1334}
1335
a1d1bb31
AL
1336/* Remove a specific watchpoint. */
1337int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1338 int flags)
6658ffb8 1339{
b4051334 1340 target_ulong len_mask = ~(len - 1);
a1d1bb31 1341 CPUWatchpoint *wp;
6658ffb8 1342
c0ce998e 1343 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1344 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1345 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1346 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1347 return 0;
1348 }
1349 }
a1d1bb31 1350 return -ENOENT;
6658ffb8
PB
1351}
1352
a1d1bb31
AL
1353/* Remove a specific watchpoint by reference. */
1354void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1355{
c0ce998e 1356 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1357
a1d1bb31
AL
1358 tlb_flush_page(env, watchpoint->vaddr);
1359
1360 qemu_free(watchpoint);
1361}
1362
1363/* Remove all matching watchpoints. */
1364void cpu_watchpoint_remove_all(CPUState *env, int mask)
1365{
c0ce998e 1366 CPUWatchpoint *wp, *next;
a1d1bb31 1367
c0ce998e 1368 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1369 if (wp->flags & mask)
1370 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1371 }
7d03f82f
EI
1372}
1373
a1d1bb31
AL
1374/* Add a breakpoint. */
1375int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1376 CPUBreakpoint **breakpoint)
4c3a88a2 1377{
1fddef4b 1378#if defined(TARGET_HAS_ICE)
c0ce998e 1379 CPUBreakpoint *bp;
3b46e624 1380
a1d1bb31 1381 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1382
a1d1bb31
AL
1383 bp->pc = pc;
1384 bp->flags = flags;
1385
2dc9f411 1386 /* keep all GDB-injected breakpoints in front */
c0ce998e
AL
1387 if (flags & BP_GDB)
1388 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1389 else
1390 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1391
d720b93d 1392 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1393
1394 if (breakpoint)
1395 *breakpoint = bp;
4c3a88a2
FB
1396 return 0;
1397#else
a1d1bb31 1398 return -ENOSYS;
4c3a88a2
FB
1399#endif
1400}
1401
a1d1bb31
AL
1402/* Remove a specific breakpoint. */
1403int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1404{
7d03f82f 1405#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1406 CPUBreakpoint *bp;
1407
c0ce998e 1408 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1409 if (bp->pc == pc && bp->flags == flags) {
1410 cpu_breakpoint_remove_by_ref(env, bp);
1411 return 0;
1412 }
7d03f82f 1413 }
a1d1bb31
AL
1414 return -ENOENT;
1415#else
1416 return -ENOSYS;
7d03f82f
EI
1417#endif
1418}
1419
a1d1bb31
AL
1420/* Remove a specific breakpoint by reference. */
1421void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1422{
1fddef4b 1423#if defined(TARGET_HAS_ICE)
c0ce998e 1424 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1425
a1d1bb31
AL
1426 breakpoint_invalidate(env, breakpoint->pc);
1427
1428 qemu_free(breakpoint);
1429#endif
1430}
1431
1432/* Remove all matching breakpoints. */
1433void cpu_breakpoint_remove_all(CPUState *env, int mask)
1434{
1435#if defined(TARGET_HAS_ICE)
c0ce998e 1436 CPUBreakpoint *bp, *next;
a1d1bb31 1437
c0ce998e 1438 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1439 if (bp->flags & mask)
1440 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1441 }
4c3a88a2
FB
1442#endif
1443}
1444
c33a346e
FB
1445/* enable or disable single step mode. EXCP_DEBUG is returned by the
1446 CPU loop after each instruction */
1447void cpu_single_step(CPUState *env, int enabled)
1448{
1fddef4b 1449#if defined(TARGET_HAS_ICE)
c33a346e
FB
1450 if (env->singlestep_enabled != enabled) {
1451 env->singlestep_enabled = enabled;
1452 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1453 /* XXX: only flush what is necessary */
0124311e 1454 tb_flush(env);
c33a346e
FB
1455 }
1456#endif
1457}
1458
34865134
FB
1459/* enable or disable low levels log */
1460void cpu_set_log(int log_flags)
1461{
1462 loglevel = log_flags;
1463 if (loglevel && !logfile) {
11fcfab4 1464 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1465 if (!logfile) {
1466 perror(logfilename);
1467 _exit(1);
1468 }
9fa3e853
FB
1469#if !defined(CONFIG_SOFTMMU)
1470 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1471 {
b55266b5 1472 static char logfile_buf[4096];
9fa3e853
FB
1473 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1474 }
1475#else
34865134 1476 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1477#endif
e735b91c
PB
1478 log_append = 1;
1479 }
1480 if (!loglevel && logfile) {
1481 fclose(logfile);
1482 logfile = NULL;
34865134
FB
1483 }
1484}
1485
1486void cpu_set_log_filename(const char *filename)
1487{
1488 logfilename = strdup(filename);
e735b91c
PB
1489 if (logfile) {
1490 fclose(logfile);
1491 logfile = NULL;
1492 }
1493 cpu_set_log(loglevel);
34865134 1494}
c33a346e 1495
0124311e 1496/* mask must never be zero, except for A20 change call */
68a79315 1497void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1498{
d5975363 1499#if !defined(USE_NPTL)
ea041c0e 1500 TranslationBlock *tb;
15a51156 1501 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1502#endif
2e70f6ef 1503 int old_mask;
59817ccb 1504
8a11f5ff
AJ
1505 if (mask & CPU_INTERRUPT_EXIT) {
1506 env->exit_request = 1;
1507 mask &= ~CPU_INTERRUPT_EXIT;
1508 }
1509
2e70f6ef 1510 old_mask = env->interrupt_request;
68a79315 1511 env->interrupt_request |= mask;
d5975363
PB
1512#if defined(USE_NPTL)
1513 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1514 problem and hope the cpu will stop of its own accord. For userspace
1515 emulation this often isn't actually as bad as it sounds. Often
1516 signals are used primarily to interrupt blocking syscalls. */
1517#else
2e70f6ef 1518 if (use_icount) {
266910c4 1519 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1520#ifndef CONFIG_USER_ONLY
2e70f6ef 1521 if (!can_do_io(env)
8a11f5ff 1522 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1523 cpu_abort(env, "Raised interrupt while not in I/O function");
1524 }
1525#endif
1526 } else {
1527 tb = env->current_tb;
1528 /* if the cpu is currently executing code, we must unlink it and
1529 all the potentially executing TB */
1530 if (tb && !testandset(&interrupt_lock)) {
1531 env->current_tb = NULL;
1532 tb_reset_jump_recursive(tb);
1533 resetlock(&interrupt_lock);
1534 }
ea041c0e 1535 }
d5975363 1536#endif
ea041c0e
FB
1537}
1538
b54ad049
FB
1539void cpu_reset_interrupt(CPUState *env, int mask)
1540{
1541 env->interrupt_request &= ~mask;
1542}
1543
c7cd6a37 1544const CPULogItem cpu_log_items[] = {
5fafdf24 1545 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1546 "show generated host assembly code for each compiled TB" },
1547 { CPU_LOG_TB_IN_ASM, "in_asm",
1548 "show target assembly code for each compiled TB" },
5fafdf24 1549 { CPU_LOG_TB_OP, "op",
57fec1fe 1550 "show micro ops for each compiled TB" },
f193c797 1551 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1552 "show micro ops "
1553#ifdef TARGET_I386
1554 "before eflags optimization and "
f193c797 1555#endif
e01a1157 1556 "after liveness analysis" },
f193c797
FB
1557 { CPU_LOG_INT, "int",
1558 "show interrupts/exceptions in short format" },
1559 { CPU_LOG_EXEC, "exec",
1560 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1561 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1562 "show CPU state before block translation" },
f193c797
FB
1563#ifdef TARGET_I386
1564 { CPU_LOG_PCALL, "pcall",
1565 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1566 { CPU_LOG_RESET, "cpu_reset",
1567 "show CPU state before CPU resets" },
f193c797 1568#endif
8e3a9fd2 1569#ifdef DEBUG_IOPORT
fd872598
FB
1570 { CPU_LOG_IOPORT, "ioport",
1571 "show all i/o ports accesses" },
8e3a9fd2 1572#endif
f193c797
FB
1573 { 0, NULL, NULL },
1574};
1575
1576static int cmp1(const char *s1, int n, const char *s2)
1577{
1578 if (strlen(s2) != n)
1579 return 0;
1580 return memcmp(s1, s2, n) == 0;
1581}
3b46e624 1582
f193c797
FB
1583/* takes a comma separated list of log masks. Return 0 if error. */
1584int cpu_str_to_log_mask(const char *str)
1585{
c7cd6a37 1586 const CPULogItem *item;
f193c797
FB
1587 int mask;
1588 const char *p, *p1;
1589
1590 p = str;
1591 mask = 0;
1592 for(;;) {
1593 p1 = strchr(p, ',');
1594 if (!p1)
1595 p1 = p + strlen(p);
8e3a9fd2
FB
1596 if(cmp1(p,p1-p,"all")) {
1597 for(item = cpu_log_items; item->mask != 0; item++) {
1598 mask |= item->mask;
1599 }
1600 } else {
f193c797
FB
1601 for(item = cpu_log_items; item->mask != 0; item++) {
1602 if (cmp1(p, p1 - p, item->name))
1603 goto found;
1604 }
1605 return 0;
8e3a9fd2 1606 }
f193c797
FB
1607 found:
1608 mask |= item->mask;
1609 if (*p1 != ',')
1610 break;
1611 p = p1 + 1;
1612 }
1613 return mask;
1614}
ea041c0e 1615
7501267e
FB
1616void cpu_abort(CPUState *env, const char *fmt, ...)
1617{
1618 va_list ap;
493ae1f0 1619 va_list ap2;
7501267e
FB
1620
1621 va_start(ap, fmt);
493ae1f0 1622 va_copy(ap2, ap);
7501267e
FB
1623 fprintf(stderr, "qemu: fatal: ");
1624 vfprintf(stderr, fmt, ap);
1625 fprintf(stderr, "\n");
1626#ifdef TARGET_I386
7fe48483
FB
1627 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1628#else
1629 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1630#endif
93fcfe39
AL
1631 if (qemu_log_enabled()) {
1632 qemu_log("qemu: fatal: ");
1633 qemu_log_vprintf(fmt, ap2);
1634 qemu_log("\n");
f9373291 1635#ifdef TARGET_I386
93fcfe39 1636 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1637#else
93fcfe39 1638 log_cpu_state(env, 0);
f9373291 1639#endif
31b1a7b4 1640 qemu_log_flush();
93fcfe39 1641 qemu_log_close();
924edcae 1642 }
493ae1f0 1643 va_end(ap2);
f9373291 1644 va_end(ap);
7501267e
FB
1645 abort();
1646}
1647
c5be9f08
TS
1648CPUState *cpu_copy(CPUState *env)
1649{
01ba9816 1650 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1651 CPUState *next_cpu = new_env->next_cpu;
1652 int cpu_index = new_env->cpu_index;
5a38f081
AL
1653#if defined(TARGET_HAS_ICE)
1654 CPUBreakpoint *bp;
1655 CPUWatchpoint *wp;
1656#endif
1657
c5be9f08 1658 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1659
1660 /* Preserve chaining and index. */
c5be9f08
TS
1661 new_env->next_cpu = next_cpu;
1662 new_env->cpu_index = cpu_index;
5a38f081
AL
1663
1664 /* Clone all break/watchpoints.
1665 Note: Once we support ptrace with hw-debug register access, make sure
1666 BP_CPU break/watchpoints are handled correctly on clone. */
1667 TAILQ_INIT(&env->breakpoints);
1668 TAILQ_INIT(&env->watchpoints);
1669#if defined(TARGET_HAS_ICE)
1670 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1671 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1672 }
1673 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1674 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1675 wp->flags, NULL);
1676 }
1677#endif
1678
c5be9f08
TS
1679 return new_env;
1680}
1681
0124311e
FB
1682#if !defined(CONFIG_USER_ONLY)
1683
5c751e99
EI
1684static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1685{
1686 unsigned int i;
1687
1688 /* Discard jump cache entries for any tb which might potentially
1689 overlap the flushed page. */
1690 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1691 memset (&env->tb_jmp_cache[i], 0,
1692 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1693
1694 i = tb_jmp_cache_hash_page(addr);
1695 memset (&env->tb_jmp_cache[i], 0,
1696 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1697}
1698
ee8b7021
FB
1699/* NOTE: if flush_global is true, also flush global entries (not
1700 implemented yet) */
1701void tlb_flush(CPUState *env, int flush_global)
33417e70 1702{
33417e70 1703 int i;
0124311e 1704
9fa3e853
FB
1705#if defined(DEBUG_TLB)
1706 printf("tlb_flush:\n");
1707#endif
0124311e
FB
1708 /* must reset current TB so that interrupts cannot modify the
1709 links while we are modifying them */
1710 env->current_tb = NULL;
1711
33417e70 1712 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1713 env->tlb_table[0][i].addr_read = -1;
1714 env->tlb_table[0][i].addr_write = -1;
1715 env->tlb_table[0][i].addr_code = -1;
1716 env->tlb_table[1][i].addr_read = -1;
1717 env->tlb_table[1][i].addr_write = -1;
1718 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1719#if (NB_MMU_MODES >= 3)
1720 env->tlb_table[2][i].addr_read = -1;
1721 env->tlb_table[2][i].addr_write = -1;
1722 env->tlb_table[2][i].addr_code = -1;
1723#if (NB_MMU_MODES == 4)
1724 env->tlb_table[3][i].addr_read = -1;
1725 env->tlb_table[3][i].addr_write = -1;
1726 env->tlb_table[3][i].addr_code = -1;
1727#endif
1728#endif
33417e70 1729 }
9fa3e853 1730
8a40a180 1731 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1732
0a962c02
FB
1733#ifdef USE_KQEMU
1734 if (env->kqemu_enabled) {
1735 kqemu_flush(env, flush_global);
1736 }
9fa3e853 1737#endif
e3db7226 1738 tlb_flush_count++;
33417e70
FB
1739}
1740
274da6b2 1741static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1742{
5fafdf24 1743 if (addr == (tlb_entry->addr_read &
84b7b8e7 1744 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1745 addr == (tlb_entry->addr_write &
84b7b8e7 1746 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1747 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1748 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1749 tlb_entry->addr_read = -1;
1750 tlb_entry->addr_write = -1;
1751 tlb_entry->addr_code = -1;
1752 }
61382a50
FB
1753}
1754
2e12669a 1755void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1756{
8a40a180 1757 int i;
0124311e 1758
9fa3e853 1759#if defined(DEBUG_TLB)
108c49b8 1760 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1761#endif
0124311e
FB
1762 /* must reset current TB so that interrupts cannot modify the
1763 links while we are modifying them */
1764 env->current_tb = NULL;
61382a50
FB
1765
1766 addr &= TARGET_PAGE_MASK;
1767 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1768 tlb_flush_entry(&env->tlb_table[0][i], addr);
1769 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1770#if (NB_MMU_MODES >= 3)
1771 tlb_flush_entry(&env->tlb_table[2][i], addr);
1772#if (NB_MMU_MODES == 4)
1773 tlb_flush_entry(&env->tlb_table[3][i], addr);
1774#endif
1775#endif
0124311e 1776
5c751e99 1777 tlb_flush_jmp_cache(env, addr);
9fa3e853 1778
0a962c02
FB
1779#ifdef USE_KQEMU
1780 if (env->kqemu_enabled) {
1781 kqemu_flush_page(env, addr);
1782 }
1783#endif
9fa3e853
FB
1784}
1785
9fa3e853
FB
1786/* update the TLBs so that writes to code in the virtual page 'addr'
1787 can be detected */
6a00d601 1788static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1789{
5fafdf24 1790 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1791 ram_addr + TARGET_PAGE_SIZE,
1792 CODE_DIRTY_FLAG);
9fa3e853
FB
1793}
1794
9fa3e853 1795/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1796 tested for self modifying code */
5fafdf24 1797static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1798 target_ulong vaddr)
9fa3e853 1799{
3a7d929e 1800 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1801}
1802
5fafdf24 1803static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1804 unsigned long start, unsigned long length)
1805{
1806 unsigned long addr;
84b7b8e7
FB
1807 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1808 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1809 if ((addr - start) < length) {
0f459d16 1810 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1811 }
1812 }
1813}
1814
3a7d929e 1815void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1816 int dirty_flags)
1ccde1cb
FB
1817{
1818 CPUState *env;
4f2ac237 1819 unsigned long length, start1;
0a962c02
FB
1820 int i, mask, len;
1821 uint8_t *p;
1ccde1cb
FB
1822
1823 start &= TARGET_PAGE_MASK;
1824 end = TARGET_PAGE_ALIGN(end);
1825
1826 length = end - start;
1827 if (length == 0)
1828 return;
0a962c02 1829 len = length >> TARGET_PAGE_BITS;
3a7d929e 1830#ifdef USE_KQEMU
6a00d601
FB
1831 /* XXX: should not depend on cpu context */
1832 env = first_cpu;
3a7d929e 1833 if (env->kqemu_enabled) {
f23db169
FB
1834 ram_addr_t addr;
1835 addr = start;
1836 for(i = 0; i < len; i++) {
1837 kqemu_set_notdirty(env, addr);
1838 addr += TARGET_PAGE_SIZE;
1839 }
3a7d929e
FB
1840 }
1841#endif
f23db169
FB
1842 mask = ~dirty_flags;
1843 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1844 for(i = 0; i < len; i++)
1845 p[i] &= mask;
1846
1ccde1cb
FB
1847 /* we modify the TLB cache so that the dirty bit will be set again
1848 when accessing the range */
59817ccb 1849 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1850 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1851 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1852 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1853 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1854 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1855#if (NB_MMU_MODES >= 3)
1856 for(i = 0; i < CPU_TLB_SIZE; i++)
1857 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1858#if (NB_MMU_MODES == 4)
1859 for(i = 0; i < CPU_TLB_SIZE; i++)
1860 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1861#endif
1862#endif
6a00d601 1863 }
1ccde1cb
FB
1864}
1865
74576198
AL
1866int cpu_physical_memory_set_dirty_tracking(int enable)
1867{
1868 in_migration = enable;
1869 return 0;
1870}
1871
1872int cpu_physical_memory_get_dirty_tracking(void)
1873{
1874 return in_migration;
1875}
1876
2bec46dc
AL
1877void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1878{
1879 if (kvm_enabled())
1880 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1881}
1882
3a7d929e
FB
1883static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1884{
1885 ram_addr_t ram_addr;
1886
84b7b8e7 1887 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1888 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1889 tlb_entry->addend - (unsigned long)phys_ram_base;
1890 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1891 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1892 }
1893 }
1894}
1895
1896/* update the TLB according to the current state of the dirty bits */
1897void cpu_tlb_update_dirty(CPUState *env)
1898{
1899 int i;
1900 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1901 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1902 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1903 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1904#if (NB_MMU_MODES >= 3)
1905 for(i = 0; i < CPU_TLB_SIZE; i++)
1906 tlb_update_dirty(&env->tlb_table[2][i]);
1907#if (NB_MMU_MODES == 4)
1908 for(i = 0; i < CPU_TLB_SIZE; i++)
1909 tlb_update_dirty(&env->tlb_table[3][i]);
1910#endif
1911#endif
3a7d929e
FB
1912}
1913
0f459d16 1914static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1915{
0f459d16
PB
1916 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1917 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1918}
1919
0f459d16
PB
1920/* update the TLB corresponding to virtual page vaddr
1921 so that it is no longer dirty */
1922static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1923{
1ccde1cb
FB
1924 int i;
1925
0f459d16 1926 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1927 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1928 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1929 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1930#if (NB_MMU_MODES >= 3)
0f459d16 1931 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1932#if (NB_MMU_MODES == 4)
0f459d16 1933 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1934#endif
1935#endif
9fa3e853
FB
1936}
1937
59817ccb
FB
1938/* add a new TLB entry. At most one entry for a given virtual address
1939 is permitted. Return 0 if OK or 2 if the page could not be mapped
1940 (can only happen in non SOFTMMU mode for I/O pages or pages
1941 conflicting with the host address space). */
5fafdf24
TS
1942int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1943 target_phys_addr_t paddr, int prot,
6ebbf390 1944 int mmu_idx, int is_softmmu)
9fa3e853 1945{
92e873b9 1946 PhysPageDesc *p;
4f2ac237 1947 unsigned long pd;
9fa3e853 1948 unsigned int index;
4f2ac237 1949 target_ulong address;
0f459d16 1950 target_ulong code_address;
108c49b8 1951 target_phys_addr_t addend;
9fa3e853 1952 int ret;
84b7b8e7 1953 CPUTLBEntry *te;
a1d1bb31 1954 CPUWatchpoint *wp;
0f459d16 1955 target_phys_addr_t iotlb;
9fa3e853 1956
92e873b9 1957 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1958 if (!p) {
1959 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1960 } else {
1961 pd = p->phys_offset;
9fa3e853
FB
1962 }
1963#if defined(DEBUG_TLB)
6ebbf390
JM
1964 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1965 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1966#endif
1967
1968 ret = 0;
0f459d16
PB
1969 address = vaddr;
1970 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1971 /* IO memory case (romd handled later) */
1972 address |= TLB_MMIO;
1973 }
1974 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1975 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1976 /* Normal RAM. */
1977 iotlb = pd & TARGET_PAGE_MASK;
1978 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1979 iotlb |= IO_MEM_NOTDIRTY;
1980 else
1981 iotlb |= IO_MEM_ROM;
1982 } else {
1983 /* IO handlers are currently passed a phsical address.
1984 It would be nice to pass an offset from the base address
1985 of that region. This would avoid having to special case RAM,
1986 and avoid full address decoding in every device.
1987 We can't use the high bits of pd for this because
1988 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
1989 iotlb = (pd & ~TARGET_PAGE_MASK);
1990 if (p) {
8da3ff18
PB
1991 iotlb += p->region_offset;
1992 } else {
1993 iotlb += paddr;
1994 }
0f459d16
PB
1995 }
1996
1997 code_address = address;
1998 /* Make accesses to pages with watchpoints go via the
1999 watchpoint trap routines. */
c0ce998e 2000 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2001 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
2002 iotlb = io_mem_watch + paddr;
2003 /* TODO: The memory case can be optimized by not trapping
2004 reads of pages with a write breakpoint. */
2005 address |= TLB_MMIO;
6658ffb8 2006 }
0f459d16 2007 }
d79acba4 2008
0f459d16
PB
2009 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2010 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2011 te = &env->tlb_table[mmu_idx][index];
2012 te->addend = addend - vaddr;
2013 if (prot & PAGE_READ) {
2014 te->addr_read = address;
2015 } else {
2016 te->addr_read = -1;
2017 }
5c751e99 2018
0f459d16
PB
2019 if (prot & PAGE_EXEC) {
2020 te->addr_code = code_address;
2021 } else {
2022 te->addr_code = -1;
2023 }
2024 if (prot & PAGE_WRITE) {
2025 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2026 (pd & IO_MEM_ROMD)) {
2027 /* Write access calls the I/O callback. */
2028 te->addr_write = address | TLB_MMIO;
2029 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2030 !cpu_physical_memory_is_dirty(pd)) {
2031 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2032 } else {
0f459d16 2033 te->addr_write = address;
9fa3e853 2034 }
0f459d16
PB
2035 } else {
2036 te->addr_write = -1;
9fa3e853 2037 }
9fa3e853
FB
2038 return ret;
2039}
2040
0124311e
FB
2041#else
2042
ee8b7021 2043void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2044{
2045}
2046
2e12669a 2047void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2048{
2049}
2050
5fafdf24
TS
2051int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2052 target_phys_addr_t paddr, int prot,
6ebbf390 2053 int mmu_idx, int is_softmmu)
9fa3e853
FB
2054{
2055 return 0;
2056}
0124311e 2057
9fa3e853
FB
2058/* dump memory mappings */
2059void page_dump(FILE *f)
33417e70 2060{
9fa3e853
FB
2061 unsigned long start, end;
2062 int i, j, prot, prot1;
2063 PageDesc *p;
33417e70 2064
9fa3e853
FB
2065 fprintf(f, "%-8s %-8s %-8s %s\n",
2066 "start", "end", "size", "prot");
2067 start = -1;
2068 end = -1;
2069 prot = 0;
2070 for(i = 0; i <= L1_SIZE; i++) {
2071 if (i < L1_SIZE)
2072 p = l1_map[i];
2073 else
2074 p = NULL;
2075 for(j = 0;j < L2_SIZE; j++) {
2076 if (!p)
2077 prot1 = 0;
2078 else
2079 prot1 = p[j].flags;
2080 if (prot1 != prot) {
2081 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2082 if (start != -1) {
2083 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 2084 start, end, end - start,
9fa3e853
FB
2085 prot & PAGE_READ ? 'r' : '-',
2086 prot & PAGE_WRITE ? 'w' : '-',
2087 prot & PAGE_EXEC ? 'x' : '-');
2088 }
2089 if (prot1 != 0)
2090 start = end;
2091 else
2092 start = -1;
2093 prot = prot1;
2094 }
2095 if (!p)
2096 break;
2097 }
33417e70 2098 }
33417e70
FB
2099}
2100
53a5960a 2101int page_get_flags(target_ulong address)
33417e70 2102{
9fa3e853
FB
2103 PageDesc *p;
2104
2105 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2106 if (!p)
9fa3e853
FB
2107 return 0;
2108 return p->flags;
2109}
2110
2111/* modify the flags of a page and invalidate the code if
2112 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2113 depending on PAGE_WRITE */
53a5960a 2114void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2115{
2116 PageDesc *p;
53a5960a 2117 target_ulong addr;
9fa3e853 2118
c8a706fe 2119 /* mmap_lock should already be held. */
9fa3e853
FB
2120 start = start & TARGET_PAGE_MASK;
2121 end = TARGET_PAGE_ALIGN(end);
2122 if (flags & PAGE_WRITE)
2123 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2124 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2125 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2126 /* We may be called for host regions that are outside guest
2127 address space. */
2128 if (!p)
2129 return;
9fa3e853
FB
2130 /* if the write protection is set, then we invalidate the code
2131 inside */
5fafdf24 2132 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2133 (flags & PAGE_WRITE) &&
2134 p->first_tb) {
d720b93d 2135 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2136 }
2137 p->flags = flags;
2138 }
33417e70
FB
2139}
2140
3d97b40b
TS
2141int page_check_range(target_ulong start, target_ulong len, int flags)
2142{
2143 PageDesc *p;
2144 target_ulong end;
2145 target_ulong addr;
2146
55f280c9
AZ
2147 if (start + len < start)
2148 /* we've wrapped around */
2149 return -1;
2150
3d97b40b
TS
2151 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2152 start = start & TARGET_PAGE_MASK;
2153
3d97b40b
TS
2154 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2155 p = page_find(addr >> TARGET_PAGE_BITS);
2156 if( !p )
2157 return -1;
2158 if( !(p->flags & PAGE_VALID) )
2159 return -1;
2160
dae3270c 2161 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2162 return -1;
dae3270c
FB
2163 if (flags & PAGE_WRITE) {
2164 if (!(p->flags & PAGE_WRITE_ORG))
2165 return -1;
2166 /* unprotect the page if it was put read-only because it
2167 contains translated code */
2168 if (!(p->flags & PAGE_WRITE)) {
2169 if (!page_unprotect(addr, 0, NULL))
2170 return -1;
2171 }
2172 return 0;
2173 }
3d97b40b
TS
2174 }
2175 return 0;
2176}
2177
9fa3e853
FB
2178/* called from signal handler: invalidate the code and unprotect the
2179 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2180int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2181{
2182 unsigned int page_index, prot, pindex;
2183 PageDesc *p, *p1;
53a5960a 2184 target_ulong host_start, host_end, addr;
9fa3e853 2185
c8a706fe
PB
2186 /* Technically this isn't safe inside a signal handler. However we
2187 know this only ever happens in a synchronous SEGV handler, so in
2188 practice it seems to be ok. */
2189 mmap_lock();
2190
83fb7adf 2191 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2192 page_index = host_start >> TARGET_PAGE_BITS;
2193 p1 = page_find(page_index);
c8a706fe
PB
2194 if (!p1) {
2195 mmap_unlock();
9fa3e853 2196 return 0;
c8a706fe 2197 }
83fb7adf 2198 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2199 p = p1;
2200 prot = 0;
2201 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2202 prot |= p->flags;
2203 p++;
2204 }
2205 /* if the page was really writable, then we change its
2206 protection back to writable */
2207 if (prot & PAGE_WRITE_ORG) {
2208 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2209 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2210 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2211 (prot & PAGE_BITS) | PAGE_WRITE);
2212 p1[pindex].flags |= PAGE_WRITE;
2213 /* and since the content will be modified, we must invalidate
2214 the corresponding translated code. */
d720b93d 2215 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2216#ifdef DEBUG_TB_CHECK
2217 tb_invalidate_check(address);
2218#endif
c8a706fe 2219 mmap_unlock();
9fa3e853
FB
2220 return 1;
2221 }
2222 }
c8a706fe 2223 mmap_unlock();
9fa3e853
FB
2224 return 0;
2225}
2226
6a00d601
FB
2227static inline void tlb_set_dirty(CPUState *env,
2228 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2229{
2230}
9fa3e853
FB
2231#endif /* defined(CONFIG_USER_ONLY) */
2232
e2eef170 2233#if !defined(CONFIG_USER_ONLY)
8da3ff18 2234
db7b5426 2235static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2236 ram_addr_t memory, ram_addr_t region_offset);
00f82b8a 2237static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2238 ram_addr_t orig_memory, ram_addr_t region_offset);
db7b5426
BS
2239#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2240 need_subpage) \
2241 do { \
2242 if (addr > start_addr) \
2243 start_addr2 = 0; \
2244 else { \
2245 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2246 if (start_addr2 > 0) \
2247 need_subpage = 1; \
2248 } \
2249 \
49e9fba2 2250 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2251 end_addr2 = TARGET_PAGE_SIZE - 1; \
2252 else { \
2253 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2254 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2255 need_subpage = 1; \
2256 } \
2257 } while (0)
2258
33417e70
FB
2259/* register physical memory. 'size' must be a multiple of the target
2260 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2261 io memory page. The address used when calling the IO function is
2262 the offset from the start of the region, plus region_offset. Both
2263 start_region and regon_offset are rounded down to a page boundary
2264 before calculating this offset. This should not be a problem unless
2265 the low bits of start_addr and region_offset differ. */
2266void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2267 ram_addr_t size,
2268 ram_addr_t phys_offset,
2269 ram_addr_t region_offset)
33417e70 2270{
108c49b8 2271 target_phys_addr_t addr, end_addr;
92e873b9 2272 PhysPageDesc *p;
9d42037b 2273 CPUState *env;
00f82b8a 2274 ram_addr_t orig_size = size;
db7b5426 2275 void *subpage;
33417e70 2276
da260249
FB
2277#ifdef USE_KQEMU
2278 /* XXX: should not depend on cpu context */
2279 env = first_cpu;
2280 if (env->kqemu_enabled) {
2281 kqemu_set_phys_mem(start_addr, size, phys_offset);
2282 }
2283#endif
7ba1e619
AL
2284 if (kvm_enabled())
2285 kvm_set_phys_mem(start_addr, size, phys_offset);
2286
67c4d23c
PB
2287 if (phys_offset == IO_MEM_UNASSIGNED) {
2288 region_offset = start_addr;
2289 }
8da3ff18 2290 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2291 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2292 end_addr = start_addr + (target_phys_addr_t)size;
2293 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2294 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2295 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2296 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2297 target_phys_addr_t start_addr2, end_addr2;
2298 int need_subpage = 0;
2299
2300 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2301 need_subpage);
4254fab8 2302 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2303 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2304 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2305 &p->phys_offset, orig_memory,
2306 p->region_offset);
db7b5426
BS
2307 } else {
2308 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2309 >> IO_MEM_SHIFT];
2310 }
8da3ff18
PB
2311 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2312 region_offset);
2313 p->region_offset = 0;
db7b5426
BS
2314 } else {
2315 p->phys_offset = phys_offset;
2316 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2317 (phys_offset & IO_MEM_ROMD))
2318 phys_offset += TARGET_PAGE_SIZE;
2319 }
2320 } else {
2321 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2322 p->phys_offset = phys_offset;
8da3ff18 2323 p->region_offset = region_offset;
db7b5426 2324 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2325 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2326 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2327 } else {
db7b5426
BS
2328 target_phys_addr_t start_addr2, end_addr2;
2329 int need_subpage = 0;
2330
2331 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2332 end_addr2, need_subpage);
2333
4254fab8 2334 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426 2335 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2336 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2337 addr & TARGET_PAGE_MASK);
db7b5426 2338 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2339 phys_offset, region_offset);
2340 p->region_offset = 0;
db7b5426
BS
2341 }
2342 }
2343 }
8da3ff18 2344 region_offset += TARGET_PAGE_SIZE;
33417e70 2345 }
3b46e624 2346
9d42037b
FB
2347 /* since each CPU stores ram addresses in its TLB cache, we must
2348 reset the modified entries */
2349 /* XXX: slow ! */
2350 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2351 tlb_flush(env, 1);
2352 }
33417e70
FB
2353}
2354
ba863458 2355/* XXX: temporary until new memory mapping API */
00f82b8a 2356ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2357{
2358 PhysPageDesc *p;
2359
2360 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2361 if (!p)
2362 return IO_MEM_UNASSIGNED;
2363 return p->phys_offset;
2364}
2365
f65ed4c1
AL
2366void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2367{
2368 if (kvm_enabled())
2369 kvm_coalesce_mmio_region(addr, size);
2370}
2371
2372void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2373{
2374 if (kvm_enabled())
2375 kvm_uncoalesce_mmio_region(addr, size);
2376}
2377
e9a1ab19 2378/* XXX: better than nothing */
00f82b8a 2379ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2380{
2381 ram_addr_t addr;
7fb4fdcf 2382 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
012a7045 2383 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
ed441467 2384 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2385 abort();
2386 }
2387 addr = phys_ram_alloc_offset;
2388 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
57ba0792
JK
2389
2390 if (kvm_enabled())
2391 kvm_setup_guest_memory(phys_ram_base + addr, size);
2392
e9a1ab19
FB
2393 return addr;
2394}
2395
2396void qemu_ram_free(ram_addr_t addr)
2397{
2398}
2399
a4193c8a 2400static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2401{
67d3b957 2402#ifdef DEBUG_UNASSIGNED
ab3d1727 2403 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2404#endif
0a6f8a6d 2405#if defined(TARGET_SPARC)
e18231a3
BS
2406 do_unassigned_access(addr, 0, 0, 0, 1);
2407#endif
2408 return 0;
2409}
2410
2411static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2412{
2413#ifdef DEBUG_UNASSIGNED
2414 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2415#endif
0a6f8a6d 2416#if defined(TARGET_SPARC)
e18231a3
BS
2417 do_unassigned_access(addr, 0, 0, 0, 2);
2418#endif
2419 return 0;
2420}
2421
2422static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2423{
2424#ifdef DEBUG_UNASSIGNED
2425 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2426#endif
0a6f8a6d 2427#if defined(TARGET_SPARC)
e18231a3 2428 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2429#endif
33417e70
FB
2430 return 0;
2431}
2432
a4193c8a 2433static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2434{
67d3b957 2435#ifdef DEBUG_UNASSIGNED
ab3d1727 2436 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2437#endif
0a6f8a6d 2438#if defined(TARGET_SPARC)
e18231a3
BS
2439 do_unassigned_access(addr, 1, 0, 0, 1);
2440#endif
2441}
2442
2443static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2444{
2445#ifdef DEBUG_UNASSIGNED
2446 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2447#endif
0a6f8a6d 2448#if defined(TARGET_SPARC)
e18231a3
BS
2449 do_unassigned_access(addr, 1, 0, 0, 2);
2450#endif
2451}
2452
2453static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2454{
2455#ifdef DEBUG_UNASSIGNED
2456 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2457#endif
0a6f8a6d 2458#if defined(TARGET_SPARC)
e18231a3 2459 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2460#endif
33417e70
FB
2461}
2462
2463static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2464 unassigned_mem_readb,
e18231a3
BS
2465 unassigned_mem_readw,
2466 unassigned_mem_readl,
33417e70
FB
2467};
2468
2469static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2470 unassigned_mem_writeb,
e18231a3
BS
2471 unassigned_mem_writew,
2472 unassigned_mem_writel,
33417e70
FB
2473};
2474
0f459d16
PB
2475static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2476 uint32_t val)
9fa3e853 2477{
3a7d929e 2478 int dirty_flags;
3a7d929e
FB
2479 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2480 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2481#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2482 tb_invalidate_phys_page_fast(ram_addr, 1);
2483 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2484#endif
3a7d929e 2485 }
0f459d16 2486 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2487#ifdef USE_KQEMU
2488 if (cpu_single_env->kqemu_enabled &&
2489 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2490 kqemu_modify_page(cpu_single_env, ram_addr);
2491#endif
f23db169
FB
2492 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2493 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2494 /* we remove the notdirty callback only if the code has been
2495 flushed */
2496 if (dirty_flags == 0xff)
2e70f6ef 2497 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2498}
2499
0f459d16
PB
2500static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2501 uint32_t val)
9fa3e853 2502{
3a7d929e 2503 int dirty_flags;
3a7d929e
FB
2504 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2505 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2506#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2507 tb_invalidate_phys_page_fast(ram_addr, 2);
2508 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2509#endif
3a7d929e 2510 }
0f459d16 2511 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2512#ifdef USE_KQEMU
2513 if (cpu_single_env->kqemu_enabled &&
2514 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2515 kqemu_modify_page(cpu_single_env, ram_addr);
2516#endif
f23db169
FB
2517 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2518 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2519 /* we remove the notdirty callback only if the code has been
2520 flushed */
2521 if (dirty_flags == 0xff)
2e70f6ef 2522 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2523}
2524
0f459d16
PB
2525static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2526 uint32_t val)
9fa3e853 2527{
3a7d929e 2528 int dirty_flags;
3a7d929e
FB
2529 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2530 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2531#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2532 tb_invalidate_phys_page_fast(ram_addr, 4);
2533 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2534#endif
3a7d929e 2535 }
0f459d16 2536 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2537#ifdef USE_KQEMU
2538 if (cpu_single_env->kqemu_enabled &&
2539 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2540 kqemu_modify_page(cpu_single_env, ram_addr);
2541#endif
f23db169
FB
2542 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2543 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2544 /* we remove the notdirty callback only if the code has been
2545 flushed */
2546 if (dirty_flags == 0xff)
2e70f6ef 2547 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2548}
2549
3a7d929e 2550static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2551 NULL, /* never used */
2552 NULL, /* never used */
2553 NULL, /* never used */
2554};
2555
1ccde1cb
FB
2556static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2557 notdirty_mem_writeb,
2558 notdirty_mem_writew,
2559 notdirty_mem_writel,
2560};
2561
0f459d16 2562/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2563static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
2564{
2565 CPUState *env = cpu_single_env;
06d55cc1
AL
2566 target_ulong pc, cs_base;
2567 TranslationBlock *tb;
0f459d16 2568 target_ulong vaddr;
a1d1bb31 2569 CPUWatchpoint *wp;
06d55cc1 2570 int cpu_flags;
0f459d16 2571
06d55cc1
AL
2572 if (env->watchpoint_hit) {
2573 /* We re-entered the check after replacing the TB. Now raise
2574 * the debug interrupt so that is will trigger after the
2575 * current instruction. */
2576 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2577 return;
2578 }
2e70f6ef 2579 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
c0ce998e 2580 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2581 if ((vaddr == (wp->vaddr & len_mask) ||
2582 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2583 wp->flags |= BP_WATCHPOINT_HIT;
2584 if (!env->watchpoint_hit) {
2585 env->watchpoint_hit = wp;
2586 tb = tb_find_pc(env->mem_io_pc);
2587 if (!tb) {
2588 cpu_abort(env, "check_watchpoint: could not find TB for "
2589 "pc=%p", (void *)env->mem_io_pc);
2590 }
2591 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2592 tb_phys_invalidate(tb, -1);
2593 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2594 env->exception_index = EXCP_DEBUG;
2595 } else {
2596 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2597 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2598 }
2599 cpu_resume_from_signal(env, NULL);
06d55cc1 2600 }
6e140f28
AL
2601 } else {
2602 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2603 }
2604 }
2605}
2606
6658ffb8
PB
2607/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2608 so these check for a hit then pass through to the normal out-of-line
2609 phys routines. */
2610static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2611{
b4051334 2612 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
2613 return ldub_phys(addr);
2614}
2615
2616static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2617{
b4051334 2618 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
2619 return lduw_phys(addr);
2620}
2621
2622static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2623{
b4051334 2624 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
2625 return ldl_phys(addr);
2626}
2627
6658ffb8
PB
2628static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2629 uint32_t val)
2630{
b4051334 2631 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
2632 stb_phys(addr, val);
2633}
2634
2635static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2636 uint32_t val)
2637{
b4051334 2638 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
2639 stw_phys(addr, val);
2640}
2641
2642static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2643 uint32_t val)
2644{
b4051334 2645 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
2646 stl_phys(addr, val);
2647}
2648
2649static CPUReadMemoryFunc *watch_mem_read[3] = {
2650 watch_mem_readb,
2651 watch_mem_readw,
2652 watch_mem_readl,
2653};
2654
2655static CPUWriteMemoryFunc *watch_mem_write[3] = {
2656 watch_mem_writeb,
2657 watch_mem_writew,
2658 watch_mem_writel,
2659};
6658ffb8 2660
db7b5426
BS
2661static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2662 unsigned int len)
2663{
db7b5426
BS
2664 uint32_t ret;
2665 unsigned int idx;
2666
8da3ff18 2667 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2668#if defined(DEBUG_SUBPAGE)
2669 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2670 mmio, len, addr, idx);
2671#endif
8da3ff18
PB
2672 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2673 addr + mmio->region_offset[idx][0][len]);
db7b5426
BS
2674
2675 return ret;
2676}
2677
2678static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2679 uint32_t value, unsigned int len)
2680{
db7b5426
BS
2681 unsigned int idx;
2682
8da3ff18 2683 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2684#if defined(DEBUG_SUBPAGE)
2685 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2686 mmio, len, addr, idx, value);
2687#endif
8da3ff18
PB
2688 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2689 addr + mmio->region_offset[idx][1][len],
2690 value);
db7b5426
BS
2691}
2692
2693static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2694{
2695#if defined(DEBUG_SUBPAGE)
2696 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2697#endif
2698
2699 return subpage_readlen(opaque, addr, 0);
2700}
2701
2702static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2703 uint32_t value)
2704{
2705#if defined(DEBUG_SUBPAGE)
2706 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2707#endif
2708 subpage_writelen(opaque, addr, value, 0);
2709}
2710
2711static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2712{
2713#if defined(DEBUG_SUBPAGE)
2714 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2715#endif
2716
2717 return subpage_readlen(opaque, addr, 1);
2718}
2719
2720static void subpage_writew (void *opaque, target_phys_addr_t addr,
2721 uint32_t value)
2722{
2723#if defined(DEBUG_SUBPAGE)
2724 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2725#endif
2726 subpage_writelen(opaque, addr, value, 1);
2727}
2728
2729static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2730{
2731#if defined(DEBUG_SUBPAGE)
2732 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2733#endif
2734
2735 return subpage_readlen(opaque, addr, 2);
2736}
2737
2738static void subpage_writel (void *opaque,
2739 target_phys_addr_t addr, uint32_t value)
2740{
2741#if defined(DEBUG_SUBPAGE)
2742 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2743#endif
2744 subpage_writelen(opaque, addr, value, 2);
2745}
2746
2747static CPUReadMemoryFunc *subpage_read[] = {
2748 &subpage_readb,
2749 &subpage_readw,
2750 &subpage_readl,
2751};
2752
2753static CPUWriteMemoryFunc *subpage_write[] = {
2754 &subpage_writeb,
2755 &subpage_writew,
2756 &subpage_writel,
2757};
2758
2759static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2760 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
2761{
2762 int idx, eidx;
4254fab8 2763 unsigned int i;
db7b5426
BS
2764
2765 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2766 return -1;
2767 idx = SUBPAGE_IDX(start);
2768 eidx = SUBPAGE_IDX(end);
2769#if defined(DEBUG_SUBPAGE)
2770 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2771 mmio, start, end, idx, eidx, memory);
2772#endif
2773 memory >>= IO_MEM_SHIFT;
2774 for (; idx <= eidx; idx++) {
4254fab8 2775 for (i = 0; i < 4; i++) {
3ee89922
BS
2776 if (io_mem_read[memory][i]) {
2777 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2778 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
8da3ff18 2779 mmio->region_offset[idx][0][i] = region_offset;
3ee89922
BS
2780 }
2781 if (io_mem_write[memory][i]) {
2782 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2783 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
8da3ff18 2784 mmio->region_offset[idx][1][i] = region_offset;
3ee89922 2785 }
4254fab8 2786 }
db7b5426
BS
2787 }
2788
2789 return 0;
2790}
2791
00f82b8a 2792static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2793 ram_addr_t orig_memory, ram_addr_t region_offset)
db7b5426
BS
2794{
2795 subpage_t *mmio;
2796 int subpage_memory;
2797
2798 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
2799
2800 mmio->base = base;
2801 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
db7b5426 2802#if defined(DEBUG_SUBPAGE)
1eec614b
AL
2803 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2804 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 2805#endif
1eec614b
AL
2806 *phys = subpage_memory | IO_MEM_SUBPAGE;
2807 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
8da3ff18 2808 region_offset);
db7b5426
BS
2809
2810 return mmio;
2811}
2812
88715657
AL
2813static int get_free_io_mem_idx(void)
2814{
2815 int i;
2816
2817 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2818 if (!io_mem_used[i]) {
2819 io_mem_used[i] = 1;
2820 return i;
2821 }
2822
2823 return -1;
2824}
2825
33417e70
FB
2826static void io_mem_init(void)
2827{
88715657
AL
2828 int i;
2829
3a7d929e 2830 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2831 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2832 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
88715657
AL
2833 for (i=0; i<5; i++)
2834 io_mem_used[i] = 1;
1ccde1cb 2835
0f459d16 2836 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2837 watch_mem_write, NULL);
1ccde1cb 2838 /* alloc dirty bits array */
0a962c02 2839 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2840 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2841}
2842
2843/* mem_read and mem_write are arrays of functions containing the
2844 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2845 2). Functions can be omitted with a NULL function pointer. The
2846 registered functions may be modified dynamically later.
2847 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2848 modified. If it is zero, a new io zone is allocated. The return
2849 value can be used with cpu_register_physical_memory(). (-1) is
2850 returned if error. */
33417e70
FB
2851int cpu_register_io_memory(int io_index,
2852 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2853 CPUWriteMemoryFunc **mem_write,
2854 void *opaque)
33417e70 2855{
4254fab8 2856 int i, subwidth = 0;
33417e70
FB
2857
2858 if (io_index <= 0) {
88715657
AL
2859 io_index = get_free_io_mem_idx();
2860 if (io_index == -1)
2861 return io_index;
33417e70
FB
2862 } else {
2863 if (io_index >= IO_MEM_NB_ENTRIES)
2864 return -1;
2865 }
b5ff1b31 2866
33417e70 2867 for(i = 0;i < 3; i++) {
4254fab8
BS
2868 if (!mem_read[i] || !mem_write[i])
2869 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2870 io_mem_read[io_index][i] = mem_read[i];
2871 io_mem_write[io_index][i] = mem_write[i];
2872 }
a4193c8a 2873 io_mem_opaque[io_index] = opaque;
4254fab8 2874 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2875}
61382a50 2876
88715657
AL
2877void cpu_unregister_io_memory(int io_table_address)
2878{
2879 int i;
2880 int io_index = io_table_address >> IO_MEM_SHIFT;
2881
2882 for (i=0;i < 3; i++) {
2883 io_mem_read[io_index][i] = unassigned_mem_read[i];
2884 io_mem_write[io_index][i] = unassigned_mem_write[i];
2885 }
2886 io_mem_opaque[io_index] = NULL;
2887 io_mem_used[io_index] = 0;
2888}
2889
8926b517
FB
2890CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2891{
2892 return io_mem_write[io_index >> IO_MEM_SHIFT];
2893}
2894
2895CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2896{
2897 return io_mem_read[io_index >> IO_MEM_SHIFT];
2898}
2899
e2eef170
PB
2900#endif /* !defined(CONFIG_USER_ONLY) */
2901
13eb76e0
FB
2902/* physical memory access (slow version, mainly for debug) */
2903#if defined(CONFIG_USER_ONLY)
5fafdf24 2904void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2905 int len, int is_write)
2906{
2907 int l, flags;
2908 target_ulong page;
53a5960a 2909 void * p;
13eb76e0
FB
2910
2911 while (len > 0) {
2912 page = addr & TARGET_PAGE_MASK;
2913 l = (page + TARGET_PAGE_SIZE) - addr;
2914 if (l > len)
2915 l = len;
2916 flags = page_get_flags(page);
2917 if (!(flags & PAGE_VALID))
2918 return;
2919 if (is_write) {
2920 if (!(flags & PAGE_WRITE))
2921 return;
579a97f7 2922 /* XXX: this code should not depend on lock_user */
72fb7daa 2923 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2924 /* FIXME - should this return an error rather than just fail? */
2925 return;
72fb7daa
AJ
2926 memcpy(p, buf, l);
2927 unlock_user(p, addr, l);
13eb76e0
FB
2928 } else {
2929 if (!(flags & PAGE_READ))
2930 return;
579a97f7 2931 /* XXX: this code should not depend on lock_user */
72fb7daa 2932 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2933 /* FIXME - should this return an error rather than just fail? */
2934 return;
72fb7daa 2935 memcpy(buf, p, l);
5b257578 2936 unlock_user(p, addr, 0);
13eb76e0
FB
2937 }
2938 len -= l;
2939 buf += l;
2940 addr += l;
2941 }
2942}
8df1cd07 2943
13eb76e0 2944#else
5fafdf24 2945void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2946 int len, int is_write)
2947{
2948 int l, io_index;
2949 uint8_t *ptr;
2950 uint32_t val;
2e12669a
FB
2951 target_phys_addr_t page;
2952 unsigned long pd;
92e873b9 2953 PhysPageDesc *p;
3b46e624 2954
13eb76e0
FB
2955 while (len > 0) {
2956 page = addr & TARGET_PAGE_MASK;
2957 l = (page + TARGET_PAGE_SIZE) - addr;
2958 if (l > len)
2959 l = len;
92e873b9 2960 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2961 if (!p) {
2962 pd = IO_MEM_UNASSIGNED;
2963 } else {
2964 pd = p->phys_offset;
2965 }
3b46e624 2966
13eb76e0 2967 if (is_write) {
3a7d929e 2968 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
6c2934db 2969 target_phys_addr_t addr1 = addr;
13eb76e0 2970 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 2971 if (p)
6c2934db 2972 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
2973 /* XXX: could force cpu_single_env to NULL to avoid
2974 potential bugs */
6c2934db 2975 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 2976 /* 32 bit write access */
c27004ec 2977 val = ldl_p(buf);
6c2934db 2978 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 2979 l = 4;
6c2934db 2980 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 2981 /* 16 bit write access */
c27004ec 2982 val = lduw_p(buf);
6c2934db 2983 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
2984 l = 2;
2985 } else {
1c213d19 2986 /* 8 bit write access */
c27004ec 2987 val = ldub_p(buf);
6c2934db 2988 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
2989 l = 1;
2990 }
2991 } else {
b448f2f3
FB
2992 unsigned long addr1;
2993 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2994 /* RAM case */
b448f2f3 2995 ptr = phys_ram_base + addr1;
13eb76e0 2996 memcpy(ptr, buf, l);
3a7d929e
FB
2997 if (!cpu_physical_memory_is_dirty(addr1)) {
2998 /* invalidate code */
2999 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3000 /* set dirty bit */
5fafdf24 3001 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 3002 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3003 }
13eb76e0
FB
3004 }
3005 } else {
5fafdf24 3006 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3007 !(pd & IO_MEM_ROMD)) {
6c2934db 3008 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3009 /* I/O case */
3010 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3011 if (p)
6c2934db
AJ
3012 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3013 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3014 /* 32 bit read access */
6c2934db 3015 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3016 stl_p(buf, val);
13eb76e0 3017 l = 4;
6c2934db 3018 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3019 /* 16 bit read access */
6c2934db 3020 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3021 stw_p(buf, val);
13eb76e0
FB
3022 l = 2;
3023 } else {
1c213d19 3024 /* 8 bit read access */
6c2934db 3025 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3026 stb_p(buf, val);
13eb76e0
FB
3027 l = 1;
3028 }
3029 } else {
3030 /* RAM case */
5fafdf24 3031 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3032 (addr & ~TARGET_PAGE_MASK);
3033 memcpy(buf, ptr, l);
3034 }
3035 }
3036 len -= l;
3037 buf += l;
3038 addr += l;
3039 }
3040}
8df1cd07 3041
d0ecd2aa 3042/* used for ROM loading : can write in RAM and ROM */
5fafdf24 3043void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3044 const uint8_t *buf, int len)
3045{
3046 int l;
3047 uint8_t *ptr;
3048 target_phys_addr_t page;
3049 unsigned long pd;
3050 PhysPageDesc *p;
3b46e624 3051
d0ecd2aa
FB
3052 while (len > 0) {
3053 page = addr & TARGET_PAGE_MASK;
3054 l = (page + TARGET_PAGE_SIZE) - addr;
3055 if (l > len)
3056 l = len;
3057 p = phys_page_find(page >> TARGET_PAGE_BITS);
3058 if (!p) {
3059 pd = IO_MEM_UNASSIGNED;
3060 } else {
3061 pd = p->phys_offset;
3062 }
3b46e624 3063
d0ecd2aa 3064 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3065 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3066 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3067 /* do nothing */
3068 } else {
3069 unsigned long addr1;
3070 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3071 /* ROM/RAM case */
3072 ptr = phys_ram_base + addr1;
3073 memcpy(ptr, buf, l);
3074 }
3075 len -= l;
3076 buf += l;
3077 addr += l;
3078 }
3079}
3080
6d16c2f8
AL
3081typedef struct {
3082 void *buffer;
3083 target_phys_addr_t addr;
3084 target_phys_addr_t len;
3085} BounceBuffer;
3086
3087static BounceBuffer bounce;
3088
ba223c29
AL
3089typedef struct MapClient {
3090 void *opaque;
3091 void (*callback)(void *opaque);
3092 LIST_ENTRY(MapClient) link;
3093} MapClient;
3094
3095static LIST_HEAD(map_client_list, MapClient) map_client_list
3096 = LIST_HEAD_INITIALIZER(map_client_list);
3097
3098void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3099{
3100 MapClient *client = qemu_malloc(sizeof(*client));
3101
3102 client->opaque = opaque;
3103 client->callback = callback;
3104 LIST_INSERT_HEAD(&map_client_list, client, link);
3105 return client;
3106}
3107
3108void cpu_unregister_map_client(void *_client)
3109{
3110 MapClient *client = (MapClient *)_client;
3111
3112 LIST_REMOVE(client, link);
259cf68e 3113 qemu_free(client);
ba223c29
AL
3114}
3115
3116static void cpu_notify_map_clients(void)
3117{
3118 MapClient *client;
3119
3120 while (!LIST_EMPTY(&map_client_list)) {
3121 client = LIST_FIRST(&map_client_list);
3122 client->callback(client->opaque);
259cf68e 3123 cpu_unregister_map_client(client);
ba223c29
AL
3124 }
3125}
3126
6d16c2f8
AL
3127/* Map a physical memory region into a host virtual address.
3128 * May map a subset of the requested range, given by and returned in *plen.
3129 * May return NULL if resources needed to perform the mapping are exhausted.
3130 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3131 * Use cpu_register_map_client() to know when retrying the map operation is
3132 * likely to succeed.
6d16c2f8
AL
3133 */
3134void *cpu_physical_memory_map(target_phys_addr_t addr,
3135 target_phys_addr_t *plen,
3136 int is_write)
3137{
3138 target_phys_addr_t len = *plen;
3139 target_phys_addr_t done = 0;
3140 int l;
3141 uint8_t *ret = NULL;
3142 uint8_t *ptr;
3143 target_phys_addr_t page;
3144 unsigned long pd;
3145 PhysPageDesc *p;
3146 unsigned long addr1;
3147
3148 while (len > 0) {
3149 page = addr & TARGET_PAGE_MASK;
3150 l = (page + TARGET_PAGE_SIZE) - addr;
3151 if (l > len)
3152 l = len;
3153 p = phys_page_find(page >> TARGET_PAGE_BITS);
3154 if (!p) {
3155 pd = IO_MEM_UNASSIGNED;
3156 } else {
3157 pd = p->phys_offset;
3158 }
3159
3160 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3161 if (done || bounce.buffer) {
3162 break;
3163 }
3164 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3165 bounce.addr = addr;
3166 bounce.len = l;
3167 if (!is_write) {
3168 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3169 }
3170 ptr = bounce.buffer;
3171 } else {
3172 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3173 ptr = phys_ram_base + addr1;
3174 }
3175 if (!done) {
3176 ret = ptr;
3177 } else if (ret + done != ptr) {
3178 break;
3179 }
3180
3181 len -= l;
3182 addr += l;
3183 done += l;
3184 }
3185 *plen = done;
3186 return ret;
3187}
3188
3189/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3190 * Will also mark the memory as dirty if is_write == 1. access_len gives
3191 * the amount of memory that was actually read or written by the caller.
3192 */
3193void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3194 int is_write, target_phys_addr_t access_len)
3195{
3196 if (buffer != bounce.buffer) {
3197 if (is_write) {
3198 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3199 while (access_len) {
3200 unsigned l;
3201 l = TARGET_PAGE_SIZE;
3202 if (l > access_len)
3203 l = access_len;
3204 if (!cpu_physical_memory_is_dirty(addr1)) {
3205 /* invalidate code */
3206 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3207 /* set dirty bit */
3208 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3209 (0xff & ~CODE_DIRTY_FLAG);
3210 }
3211 addr1 += l;
3212 access_len -= l;
3213 }
3214 }
3215 return;
3216 }
3217 if (is_write) {
3218 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3219 }
3220 qemu_free(bounce.buffer);
3221 bounce.buffer = NULL;
ba223c29 3222 cpu_notify_map_clients();
6d16c2f8 3223}
d0ecd2aa 3224
8df1cd07
FB
3225/* warning: addr must be aligned */
3226uint32_t ldl_phys(target_phys_addr_t addr)
3227{
3228 int io_index;
3229 uint8_t *ptr;
3230 uint32_t val;
3231 unsigned long pd;
3232 PhysPageDesc *p;
3233
3234 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3235 if (!p) {
3236 pd = IO_MEM_UNASSIGNED;
3237 } else {
3238 pd = p->phys_offset;
3239 }
3b46e624 3240
5fafdf24 3241 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3242 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3243 /* I/O case */
3244 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3245 if (p)
3246 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3247 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3248 } else {
3249 /* RAM case */
5fafdf24 3250 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3251 (addr & ~TARGET_PAGE_MASK);
3252 val = ldl_p(ptr);
3253 }
3254 return val;
3255}
3256
84b7b8e7
FB
3257/* warning: addr must be aligned */
3258uint64_t ldq_phys(target_phys_addr_t addr)
3259{
3260 int io_index;
3261 uint8_t *ptr;
3262 uint64_t val;
3263 unsigned long pd;
3264 PhysPageDesc *p;
3265
3266 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3267 if (!p) {
3268 pd = IO_MEM_UNASSIGNED;
3269 } else {
3270 pd = p->phys_offset;
3271 }
3b46e624 3272
2a4188a3
FB
3273 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3274 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3275 /* I/O case */
3276 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3277 if (p)
3278 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
3279#ifdef TARGET_WORDS_BIGENDIAN
3280 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3281 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3282#else
3283 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3284 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3285#endif
3286 } else {
3287 /* RAM case */
5fafdf24 3288 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3289 (addr & ~TARGET_PAGE_MASK);
3290 val = ldq_p(ptr);
3291 }
3292 return val;
3293}
3294
aab33094
FB
3295/* XXX: optimize */
3296uint32_t ldub_phys(target_phys_addr_t addr)
3297{
3298 uint8_t val;
3299 cpu_physical_memory_read(addr, &val, 1);
3300 return val;
3301}
3302
3303/* XXX: optimize */
3304uint32_t lduw_phys(target_phys_addr_t addr)
3305{
3306 uint16_t val;
3307 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3308 return tswap16(val);
3309}
3310
8df1cd07
FB
3311/* warning: addr must be aligned. The ram page is not masked as dirty
3312 and the code inside is not invalidated. It is useful if the dirty
3313 bits are used to track modified PTEs */
3314void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3315{
3316 int io_index;
3317 uint8_t *ptr;
3318 unsigned long pd;
3319 PhysPageDesc *p;
3320
3321 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3322 if (!p) {
3323 pd = IO_MEM_UNASSIGNED;
3324 } else {
3325 pd = p->phys_offset;
3326 }
3b46e624 3327
3a7d929e 3328 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3329 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3330 if (p)
3331 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3332 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3333 } else {
74576198
AL
3334 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3335 ptr = phys_ram_base + addr1;
8df1cd07 3336 stl_p(ptr, val);
74576198
AL
3337
3338 if (unlikely(in_migration)) {
3339 if (!cpu_physical_memory_is_dirty(addr1)) {
3340 /* invalidate code */
3341 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3342 /* set dirty bit */
3343 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3344 (0xff & ~CODE_DIRTY_FLAG);
3345 }
3346 }
8df1cd07
FB
3347 }
3348}
3349
bc98a7ef
JM
3350void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3351{
3352 int io_index;
3353 uint8_t *ptr;
3354 unsigned long pd;
3355 PhysPageDesc *p;
3356
3357 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3358 if (!p) {
3359 pd = IO_MEM_UNASSIGNED;
3360 } else {
3361 pd = p->phys_offset;
3362 }
3b46e624 3363
bc98a7ef
JM
3364 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3365 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3366 if (p)
3367 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
3368#ifdef TARGET_WORDS_BIGENDIAN
3369 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3370 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3371#else
3372 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3373 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3374#endif
3375 } else {
5fafdf24 3376 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3377 (addr & ~TARGET_PAGE_MASK);
3378 stq_p(ptr, val);
3379 }
3380}
3381
8df1cd07 3382/* warning: addr must be aligned */
8df1cd07
FB
3383void stl_phys(target_phys_addr_t addr, uint32_t val)
3384{
3385 int io_index;
3386 uint8_t *ptr;
3387 unsigned long pd;
3388 PhysPageDesc *p;
3389
3390 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3391 if (!p) {
3392 pd = IO_MEM_UNASSIGNED;
3393 } else {
3394 pd = p->phys_offset;
3395 }
3b46e624 3396
3a7d929e 3397 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3398 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3399 if (p)
3400 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3401 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3402 } else {
3403 unsigned long addr1;
3404 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3405 /* RAM case */
3406 ptr = phys_ram_base + addr1;
3407 stl_p(ptr, val);
3a7d929e
FB
3408 if (!cpu_physical_memory_is_dirty(addr1)) {
3409 /* invalidate code */
3410 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3411 /* set dirty bit */
f23db169
FB
3412 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3413 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3414 }
8df1cd07
FB
3415 }
3416}
3417
aab33094
FB
3418/* XXX: optimize */
3419void stb_phys(target_phys_addr_t addr, uint32_t val)
3420{
3421 uint8_t v = val;
3422 cpu_physical_memory_write(addr, &v, 1);
3423}
3424
3425/* XXX: optimize */
3426void stw_phys(target_phys_addr_t addr, uint32_t val)
3427{
3428 uint16_t v = tswap16(val);
3429 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3430}
3431
3432/* XXX: optimize */
3433void stq_phys(target_phys_addr_t addr, uint64_t val)
3434{
3435 val = tswap64(val);
3436 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3437}
3438
13eb76e0
FB
3439#endif
3440
3441/* virtual memory access for debug */
5fafdf24 3442int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3443 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3444{
3445 int l;
9b3c35e0
JM
3446 target_phys_addr_t phys_addr;
3447 target_ulong page;
13eb76e0
FB
3448
3449 while (len > 0) {
3450 page = addr & TARGET_PAGE_MASK;
3451 phys_addr = cpu_get_phys_page_debug(env, page);
3452 /* if no physical page mapped, return an error */
3453 if (phys_addr == -1)
3454 return -1;
3455 l = (page + TARGET_PAGE_SIZE) - addr;
3456 if (l > len)
3457 l = len;
5fafdf24 3458 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3459 buf, l, is_write);
13eb76e0
FB
3460 len -= l;
3461 buf += l;
3462 addr += l;
3463 }
3464 return 0;
3465}
3466
2e70f6ef
PB
3467/* in deterministic execution mode, instructions doing device I/Os
3468 must be at the end of the TB */
3469void cpu_io_recompile(CPUState *env, void *retaddr)
3470{
3471 TranslationBlock *tb;
3472 uint32_t n, cflags;
3473 target_ulong pc, cs_base;
3474 uint64_t flags;
3475
3476 tb = tb_find_pc((unsigned long)retaddr);
3477 if (!tb) {
3478 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3479 retaddr);
3480 }
3481 n = env->icount_decr.u16.low + tb->icount;
3482 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3483 /* Calculate how many instructions had been executed before the fault
bf20dc07 3484 occurred. */
2e70f6ef
PB
3485 n = n - env->icount_decr.u16.low;
3486 /* Generate a new TB ending on the I/O insn. */
3487 n++;
3488 /* On MIPS and SH, delay slot instructions can only be restarted if
3489 they were already the first instruction in the TB. If this is not
bf20dc07 3490 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3491 branch. */
3492#if defined(TARGET_MIPS)
3493 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3494 env->active_tc.PC -= 4;
3495 env->icount_decr.u16.low++;
3496 env->hflags &= ~MIPS_HFLAG_BMASK;
3497 }
3498#elif defined(TARGET_SH4)
3499 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3500 && n > 1) {
3501 env->pc -= 2;
3502 env->icount_decr.u16.low++;
3503 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3504 }
3505#endif
3506 /* This should never happen. */
3507 if (n > CF_COUNT_MASK)
3508 cpu_abort(env, "TB too big during recompile");
3509
3510 cflags = n | CF_LAST_IO;
3511 pc = tb->pc;
3512 cs_base = tb->cs_base;
3513 flags = tb->flags;
3514 tb_phys_invalidate(tb, -1);
3515 /* FIXME: In theory this could raise an exception. In practice
3516 we have already translated the block once so it's probably ok. */
3517 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3518 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3519 the first in the TB) then we end up generating a whole new TB and
3520 repeating the fault, which is horribly inefficient.
3521 Better would be to execute just this insn uncached, or generate a
3522 second new TB. */
3523 cpu_resume_from_signal(env, NULL);
3524}
3525
e3db7226
FB
3526void dump_exec_info(FILE *f,
3527 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3528{
3529 int i, target_code_size, max_target_code_size;
3530 int direct_jmp_count, direct_jmp2_count, cross_page;
3531 TranslationBlock *tb;
3b46e624 3532
e3db7226
FB
3533 target_code_size = 0;
3534 max_target_code_size = 0;
3535 cross_page = 0;
3536 direct_jmp_count = 0;
3537 direct_jmp2_count = 0;
3538 for(i = 0; i < nb_tbs; i++) {
3539 tb = &tbs[i];
3540 target_code_size += tb->size;
3541 if (tb->size > max_target_code_size)
3542 max_target_code_size = tb->size;
3543 if (tb->page_addr[1] != -1)
3544 cross_page++;
3545 if (tb->tb_next_offset[0] != 0xffff) {
3546 direct_jmp_count++;
3547 if (tb->tb_next_offset[1] != 0xffff) {
3548 direct_jmp2_count++;
3549 }
3550 }
3551 }
3552 /* XXX: avoid using doubles ? */
57fec1fe 3553 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3554 cpu_fprintf(f, "gen code size %ld/%ld\n",
3555 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3556 cpu_fprintf(f, "TB count %d/%d\n",
3557 nb_tbs, code_gen_max_blocks);
5fafdf24 3558 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3559 nb_tbs ? target_code_size / nb_tbs : 0,
3560 max_target_code_size);
5fafdf24 3561 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3562 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3563 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3564 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3565 cross_page,
e3db7226
FB
3566 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3567 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3568 direct_jmp_count,
e3db7226
FB
3569 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3570 direct_jmp2_count,
3571 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3572 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3573 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3574 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3575 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3576 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3577}
3578
5fafdf24 3579#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3580
3581#define MMUSUFFIX _cmmu
3582#define GETPC() NULL
3583#define env cpu_single_env
b769d8fe 3584#define SOFTMMU_CODE_ACCESS
61382a50
FB
3585
3586#define SHIFT 0
3587#include "softmmu_template.h"
3588
3589#define SHIFT 1
3590#include "softmmu_template.h"
3591
3592#define SHIFT 2
3593#include "softmmu_template.h"
3594
3595#define SHIFT 3
3596#include "softmmu_template.h"
3597
3598#undef env
3599
3600#endif