]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Fix BSD user
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
54936004 19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
b3c7724c 40#include "hw/hw.h"
74576198 41#include "osdep.h"
7ba1e619 42#include "kvm.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
45#endif
54936004 46
fd6ce8f6 47//#define DEBUG_TB_INVALIDATE
66e85a21 48//#define DEBUG_FLUSH
9fa3e853 49//#define DEBUG_TLB
67d3b957 50//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
51
52/* make various TB consistency checks */
5fafdf24
TS
53//#define DEBUG_TB_CHECK
54//#define DEBUG_TLB_CHECK
fd6ce8f6 55
1196be37 56//#define DEBUG_IOPORT
db7b5426 57//#define DEBUG_SUBPAGE
1196be37 58
99773bd4
PB
59#if !defined(CONFIG_USER_ONLY)
60/* TB consistency checks only implemented for usermode emulation. */
61#undef DEBUG_TB_CHECK
62#endif
63
9fa3e853
FB
64#define SMC_BITMAP_USE_THRESHOLD 10
65
66#define MMAP_AREA_START 0x00000000
67#define MMAP_AREA_END 0xa8000000
fd6ce8f6 68
108c49b8
FB
69#if defined(TARGET_SPARC64)
70#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
71#elif defined(TARGET_SPARC)
72#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
73#elif defined(TARGET_ALPHA)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
76#elif defined(TARGET_PPC64)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
78#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 42
80#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
82#else
83/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84#define TARGET_PHYS_ADDR_SPACE_BITS 32
85#endif
86
bdaf78e0 87static TranslationBlock *tbs;
26a5f13b 88int code_gen_max_blocks;
9fa3e853 89TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 90static int nb_tbs;
eb51d102
FB
91/* any access to the tbs or the page table must use this lock */
92spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 93
141ac468
BS
94#if defined(__arm__) || defined(__sparc_v9__)
95/* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
97 section close to code segment. */
98#define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101#else
102#define code_gen_section \
103 __attribute__((aligned (32)))
104#endif
105
106uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
107static uint8_t *code_gen_buffer;
108static unsigned long code_gen_buffer_size;
26a5f13b 109/* threshold to flush the translated code buffer */
bdaf78e0 110static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
111uint8_t *code_gen_ptr;
112
e2eef170 113#if !defined(CONFIG_USER_ONLY)
00f82b8a 114ram_addr_t phys_ram_size;
9fa3e853
FB
115int phys_ram_fd;
116uint8_t *phys_ram_base;
1ccde1cb 117uint8_t *phys_ram_dirty;
74576198 118static int in_migration;
e9a1ab19 119static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 120#endif
9fa3e853 121
6a00d601
FB
122CPUState *first_cpu;
123/* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
5fafdf24 125CPUState *cpu_single_env;
2e70f6ef 126/* 0 = Do not count executed instructions.
bf20dc07 127 1 = Precise instruction counting.
2e70f6ef
PB
128 2 = Adaptive rate instruction counting. */
129int use_icount = 0;
130/* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132int64_t qemu_icount;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
92e873b9 146typedef struct PhysPageDesc {
0f459d16 147 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 148 ram_addr_t phys_offset;
8da3ff18 149 ram_addr_t region_offset;
92e873b9
FB
150} PhysPageDesc;
151
54936004 152#define L2_BITS 10
bedb69ea
JM
153#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154/* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
157 */
158#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159#else
03875444 160#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 161#endif
54936004
FB
162
163#define L1_SIZE (1 << L1_BITS)
164#define L2_SIZE (1 << L2_BITS)
165
83fb7adf
FB
166unsigned long qemu_real_host_page_size;
167unsigned long qemu_host_page_bits;
168unsigned long qemu_host_page_size;
169unsigned long qemu_host_page_mask;
54936004 170
92e873b9 171/* XXX: for system emulation, it could just be an array */
54936004 172static PageDesc *l1_map[L1_SIZE];
bdaf78e0 173static PhysPageDesc **l1_phys_map;
54936004 174
e2eef170
PB
175#if !defined(CONFIG_USER_ONLY)
176static void io_mem_init(void);
177
33417e70 178/* io memory support */
33417e70
FB
179CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 181void *io_mem_opaque[IO_MEM_NB_ENTRIES];
88715657 182char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
183static int io_mem_watch;
184#endif
33417e70 185
34865134 186/* log support */
d9b630fd 187static const char *logfilename = "/tmp/qemu.log";
34865134
FB
188FILE *logfile;
189int loglevel;
e735b91c 190static int log_append = 0;
34865134 191
e3db7226
FB
192/* statistics */
193static int tlb_flush_count;
194static int tb_flush_count;
195static int tb_phys_invalidate_count;
196
db7b5426
BS
197#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198typedef struct subpage_t {
199 target_phys_addr_t base;
3ee89922
BS
200 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202 void *opaque[TARGET_PAGE_SIZE][2][4];
8da3ff18 203 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
204} subpage_t;
205
7cb69cae
FB
206#ifdef _WIN32
207static void map_exec(void *addr, long size)
208{
209 DWORD old_protect;
210 VirtualProtect(addr, size,
211 PAGE_EXECUTE_READWRITE, &old_protect);
212
213}
214#else
215static void map_exec(void *addr, long size)
216{
4369415f 217 unsigned long start, end, page_size;
7cb69cae 218
4369415f 219 page_size = getpagesize();
7cb69cae 220 start = (unsigned long)addr;
4369415f 221 start &= ~(page_size - 1);
7cb69cae
FB
222
223 end = (unsigned long)addr + size;
4369415f
FB
224 end += page_size - 1;
225 end &= ~(page_size - 1);
7cb69cae
FB
226
227 mprotect((void *)start, end - start,
228 PROT_READ | PROT_WRITE | PROT_EXEC);
229}
230#endif
231
b346ff46 232static void page_init(void)
54936004 233{
83fb7adf 234 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 235 TARGET_PAGE_SIZE */
c2b48b69
AL
236#ifdef _WIN32
237 {
238 SYSTEM_INFO system_info;
239
240 GetSystemInfo(&system_info);
241 qemu_real_host_page_size = system_info.dwPageSize;
242 }
243#else
244 qemu_real_host_page_size = getpagesize();
245#endif
83fb7adf
FB
246 if (qemu_host_page_size == 0)
247 qemu_host_page_size = qemu_real_host_page_size;
248 if (qemu_host_page_size < TARGET_PAGE_SIZE)
249 qemu_host_page_size = TARGET_PAGE_SIZE;
250 qemu_host_page_bits = 0;
251 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252 qemu_host_page_bits++;
253 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
254 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
256
257#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
258 {
259 long long startaddr, endaddr;
260 FILE *f;
261 int n;
262
c8a706fe 263 mmap_lock();
0776590d 264 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
265 f = fopen("/proc/self/maps", "r");
266 if (f) {
267 do {
268 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269 if (n == 2) {
e0b8d65a
BS
270 startaddr = MIN(startaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 endaddr = MIN(endaddr,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 274 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
275 TARGET_PAGE_ALIGN(endaddr),
276 PAGE_RESERVED);
277 }
278 } while (!feof(f));
279 fclose(f);
280 }
c8a706fe 281 mmap_unlock();
50a9569b
AZ
282 }
283#endif
54936004
FB
284}
285
434929bf 286static inline PageDesc **page_l1_map(target_ulong index)
54936004 287{
17e2377a
PB
288#if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
d8173e0f 291 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
292 return NULL;
293#endif
434929bf
AL
294 return &l1_map[index >> L2_BITS];
295}
296
297static inline PageDesc *page_find_alloc(target_ulong index)
298{
299 PageDesc **lp, *p;
300 lp = page_l1_map(index);
301 if (!lp)
302 return NULL;
303
54936004
FB
304 p = *lp;
305 if (!p) {
306 /* allocate if not found */
17e2377a 307#if defined(CONFIG_USER_ONLY)
17e2377a
PB
308 size_t len = sizeof(PageDesc) * L2_SIZE;
309 /* Don't use qemu_malloc because it may recurse. */
310 p = mmap(0, len, PROT_READ | PROT_WRITE,
311 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 312 *lp = p;
fb1c2cd7
AJ
313 if (h2g_valid(p)) {
314 unsigned long addr = h2g(p);
17e2377a
PB
315 page_set_flags(addr & TARGET_PAGE_MASK,
316 TARGET_PAGE_ALIGN(addr + len),
317 PAGE_RESERVED);
318 }
319#else
320 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321 *lp = p;
322#endif
54936004
FB
323 }
324 return p + (index & (L2_SIZE - 1));
325}
326
00f82b8a 327static inline PageDesc *page_find(target_ulong index)
54936004 328{
434929bf
AL
329 PageDesc **lp, *p;
330 lp = page_l1_map(index);
331 if (!lp)
332 return NULL;
54936004 333
434929bf 334 p = *lp;
54936004
FB
335 if (!p)
336 return 0;
fd6ce8f6
FB
337 return p + (index & (L2_SIZE - 1));
338}
339
108c49b8 340static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 341{
108c49b8 342 void **lp, **p;
e3f4e2a4 343 PhysPageDesc *pd;
92e873b9 344
108c49b8
FB
345 p = (void **)l1_phys_map;
346#if TARGET_PHYS_ADDR_SPACE_BITS > 32
347
348#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350#endif
351 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
352 p = *lp;
353 if (!p) {
354 /* allocate if not found */
108c49b8
FB
355 if (!alloc)
356 return NULL;
357 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358 memset(p, 0, sizeof(void *) * L1_SIZE);
359 *lp = p;
360 }
361#endif
362 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
363 pd = *lp;
364 if (!pd) {
365 int i;
108c49b8
FB
366 /* allocate if not found */
367 if (!alloc)
368 return NULL;
e3f4e2a4
PB
369 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370 *lp = pd;
67c4d23c 371 for (i = 0; i < L2_SIZE; i++) {
e3f4e2a4 372 pd[i].phys_offset = IO_MEM_UNASSIGNED;
67c4d23c
PB
373 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
374 }
92e873b9 375 }
e3f4e2a4 376 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
377}
378
108c49b8 379static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 380{
108c49b8 381 return phys_page_find_alloc(index, 0);
92e873b9
FB
382}
383
9fa3e853 384#if !defined(CONFIG_USER_ONLY)
6a00d601 385static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 386static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 387 target_ulong vaddr);
c8a706fe
PB
388#define mmap_lock() do { } while(0)
389#define mmap_unlock() do { } while(0)
9fa3e853 390#endif
fd6ce8f6 391
4369415f
FB
392#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
393
394#if defined(CONFIG_USER_ONLY)
395/* Currently it is not recommanded to allocate big chunks of data in
396 user mode. It will change when a dedicated libc will be used */
397#define USE_STATIC_CODE_GEN_BUFFER
398#endif
399
400#ifdef USE_STATIC_CODE_GEN_BUFFER
401static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
402#endif
403
8fcd3692 404static void code_gen_alloc(unsigned long tb_size)
26a5f13b 405{
4369415f
FB
406#ifdef USE_STATIC_CODE_GEN_BUFFER
407 code_gen_buffer = static_code_gen_buffer;
408 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
409 map_exec(code_gen_buffer, code_gen_buffer_size);
410#else
26a5f13b
FB
411 code_gen_buffer_size = tb_size;
412 if (code_gen_buffer_size == 0) {
4369415f
FB
413#if defined(CONFIG_USER_ONLY)
414 /* in user mode, phys_ram_size is not meaningful */
415 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
416#else
26a5f13b 417 /* XXX: needs ajustments */
174a9a1f 418 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
4369415f 419#endif
26a5f13b
FB
420 }
421 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
422 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
423 /* The code gen buffer location may have constraints depending on
424 the host cpu and OS */
425#if defined(__linux__)
426 {
427 int flags;
141ac468
BS
428 void *start = NULL;
429
26a5f13b
FB
430 flags = MAP_PRIVATE | MAP_ANONYMOUS;
431#if defined(__x86_64__)
432 flags |= MAP_32BIT;
433 /* Cannot map more than that */
434 if (code_gen_buffer_size > (800 * 1024 * 1024))
435 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
436#elif defined(__sparc_v9__)
437 // Map the buffer below 2G, so we can use direct calls and branches
438 flags |= MAP_FIXED;
439 start = (void *) 0x60000000UL;
440 if (code_gen_buffer_size > (512 * 1024 * 1024))
441 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 442#elif defined(__arm__)
63d41246 443 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
444 flags |= MAP_FIXED;
445 start = (void *) 0x01000000UL;
446 if (code_gen_buffer_size > 16 * 1024 * 1024)
447 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 448#endif
141ac468
BS
449 code_gen_buffer = mmap(start, code_gen_buffer_size,
450 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
451 flags, -1, 0);
452 if (code_gen_buffer == MAP_FAILED) {
453 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
454 exit(1);
455 }
456 }
06e67a82
AL
457#elif defined(__FreeBSD__)
458 {
459 int flags;
460 void *addr = NULL;
461 flags = MAP_PRIVATE | MAP_ANONYMOUS;
462#if defined(__x86_64__)
463 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
464 * 0x40000000 is free */
465 flags |= MAP_FIXED;
466 addr = (void *)0x40000000;
467 /* Cannot map more than that */
468 if (code_gen_buffer_size > (800 * 1024 * 1024))
469 code_gen_buffer_size = (800 * 1024 * 1024);
470#endif
471 code_gen_buffer = mmap(addr, code_gen_buffer_size,
472 PROT_WRITE | PROT_READ | PROT_EXEC,
473 flags, -1, 0);
474 if (code_gen_buffer == MAP_FAILED) {
475 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
476 exit(1);
477 }
478 }
26a5f13b
FB
479#else
480 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
481 map_exec(code_gen_buffer, code_gen_buffer_size);
482#endif
4369415f 483#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
484 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
485 code_gen_buffer_max_size = code_gen_buffer_size -
486 code_gen_max_block_size();
487 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
488 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
489}
490
491/* Must be called before using the QEMU cpus. 'tb_size' is the size
492 (in bytes) allocated to the translation buffer. Zero means default
493 size. */
494void cpu_exec_init_all(unsigned long tb_size)
495{
26a5f13b
FB
496 cpu_gen_init();
497 code_gen_alloc(tb_size);
498 code_gen_ptr = code_gen_buffer;
4369415f 499 page_init();
e2eef170 500#if !defined(CONFIG_USER_ONLY)
26a5f13b 501 io_mem_init();
e2eef170 502#endif
26a5f13b
FB
503}
504
9656f324
PB
505#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
506
507#define CPU_COMMON_SAVE_VERSION 1
508
509static void cpu_common_save(QEMUFile *f, void *opaque)
510{
511 CPUState *env = opaque;
512
513 qemu_put_be32s(f, &env->halted);
514 qemu_put_be32s(f, &env->interrupt_request);
515}
516
517static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
518{
519 CPUState *env = opaque;
520
521 if (version_id != CPU_COMMON_SAVE_VERSION)
522 return -EINVAL;
523
524 qemu_get_be32s(f, &env->halted);
75f482ae 525 qemu_get_be32s(f, &env->interrupt_request);
9656f324
PB
526 tlb_flush(env, 1);
527
528 return 0;
529}
530#endif
531
6a00d601 532void cpu_exec_init(CPUState *env)
fd6ce8f6 533{
6a00d601
FB
534 CPUState **penv;
535 int cpu_index;
536
6a00d601
FB
537 env->next_cpu = NULL;
538 penv = &first_cpu;
539 cpu_index = 0;
540 while (*penv != NULL) {
541 penv = (CPUState **)&(*penv)->next_cpu;
542 cpu_index++;
543 }
544 env->cpu_index = cpu_index;
c0ce998e
AL
545 TAILQ_INIT(&env->breakpoints);
546 TAILQ_INIT(&env->watchpoints);
6a00d601 547 *penv = env;
b3c7724c 548#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
549 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
550 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
551 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
552 cpu_save, cpu_load, env);
553#endif
fd6ce8f6
FB
554}
555
9fa3e853
FB
556static inline void invalidate_page_bitmap(PageDesc *p)
557{
558 if (p->code_bitmap) {
59817ccb 559 qemu_free(p->code_bitmap);
9fa3e853
FB
560 p->code_bitmap = NULL;
561 }
562 p->code_write_count = 0;
563}
564
fd6ce8f6
FB
565/* set to NULL all the 'first_tb' fields in all PageDescs */
566static void page_flush_tb(void)
567{
568 int i, j;
569 PageDesc *p;
570
571 for(i = 0; i < L1_SIZE; i++) {
572 p = l1_map[i];
573 if (p) {
9fa3e853
FB
574 for(j = 0; j < L2_SIZE; j++) {
575 p->first_tb = NULL;
576 invalidate_page_bitmap(p);
577 p++;
578 }
fd6ce8f6
FB
579 }
580 }
581}
582
583/* flush all the translation blocks */
d4e8164f 584/* XXX: tb_flush is currently not thread safe */
6a00d601 585void tb_flush(CPUState *env1)
fd6ce8f6 586{
6a00d601 587 CPUState *env;
0124311e 588#if defined(DEBUG_FLUSH)
ab3d1727
BS
589 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
590 (unsigned long)(code_gen_ptr - code_gen_buffer),
591 nb_tbs, nb_tbs > 0 ?
592 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 593#endif
26a5f13b 594 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
595 cpu_abort(env1, "Internal error: code buffer overflow\n");
596
fd6ce8f6 597 nb_tbs = 0;
3b46e624 598
6a00d601
FB
599 for(env = first_cpu; env != NULL; env = env->next_cpu) {
600 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
601 }
9fa3e853 602
8a8a608f 603 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 604 page_flush_tb();
9fa3e853 605
fd6ce8f6 606 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
607 /* XXX: flush processor icache at this point if cache flush is
608 expensive */
e3db7226 609 tb_flush_count++;
fd6ce8f6
FB
610}
611
612#ifdef DEBUG_TB_CHECK
613
bc98a7ef 614static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
615{
616 TranslationBlock *tb;
617 int i;
618 address &= TARGET_PAGE_MASK;
99773bd4
PB
619 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
620 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
621 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
622 address >= tb->pc + tb->size)) {
623 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 624 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
625 }
626 }
627 }
628}
629
630/* verify that all the pages have correct rights for code */
631static void tb_page_check(void)
632{
633 TranslationBlock *tb;
634 int i, flags1, flags2;
3b46e624 635
99773bd4
PB
636 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
637 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
638 flags1 = page_get_flags(tb->pc);
639 flags2 = page_get_flags(tb->pc + tb->size - 1);
640 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
641 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 642 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
643 }
644 }
645 }
646}
647
bdaf78e0 648static void tb_jmp_check(TranslationBlock *tb)
d4e8164f
FB
649{
650 TranslationBlock *tb1;
651 unsigned int n1;
652
653 /* suppress any remaining jumps to this TB */
654 tb1 = tb->jmp_first;
655 for(;;) {
656 n1 = (long)tb1 & 3;
657 tb1 = (TranslationBlock *)((long)tb1 & ~3);
658 if (n1 == 2)
659 break;
660 tb1 = tb1->jmp_next[n1];
661 }
662 /* check end of list */
663 if (tb1 != tb) {
664 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
665 }
666}
667
fd6ce8f6
FB
668#endif
669
670/* invalidate one TB */
671static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
672 int next_offset)
673{
674 TranslationBlock *tb1;
675 for(;;) {
676 tb1 = *ptb;
677 if (tb1 == tb) {
678 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
679 break;
680 }
681 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
682 }
683}
684
9fa3e853
FB
685static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
686{
687 TranslationBlock *tb1;
688 unsigned int n1;
689
690 for(;;) {
691 tb1 = *ptb;
692 n1 = (long)tb1 & 3;
693 tb1 = (TranslationBlock *)((long)tb1 & ~3);
694 if (tb1 == tb) {
695 *ptb = tb1->page_next[n1];
696 break;
697 }
698 ptb = &tb1->page_next[n1];
699 }
700}
701
d4e8164f
FB
702static inline void tb_jmp_remove(TranslationBlock *tb, int n)
703{
704 TranslationBlock *tb1, **ptb;
705 unsigned int n1;
706
707 ptb = &tb->jmp_next[n];
708 tb1 = *ptb;
709 if (tb1) {
710 /* find tb(n) in circular list */
711 for(;;) {
712 tb1 = *ptb;
713 n1 = (long)tb1 & 3;
714 tb1 = (TranslationBlock *)((long)tb1 & ~3);
715 if (n1 == n && tb1 == tb)
716 break;
717 if (n1 == 2) {
718 ptb = &tb1->jmp_first;
719 } else {
720 ptb = &tb1->jmp_next[n1];
721 }
722 }
723 /* now we can suppress tb(n) from the list */
724 *ptb = tb->jmp_next[n];
725
726 tb->jmp_next[n] = NULL;
727 }
728}
729
730/* reset the jump entry 'n' of a TB so that it is not chained to
731 another TB */
732static inline void tb_reset_jump(TranslationBlock *tb, int n)
733{
734 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
735}
736
2e70f6ef 737void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 738{
6a00d601 739 CPUState *env;
8a40a180 740 PageDesc *p;
d4e8164f 741 unsigned int h, n1;
00f82b8a 742 target_phys_addr_t phys_pc;
8a40a180 743 TranslationBlock *tb1, *tb2;
3b46e624 744
8a40a180
FB
745 /* remove the TB from the hash list */
746 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
747 h = tb_phys_hash_func(phys_pc);
5fafdf24 748 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
749 offsetof(TranslationBlock, phys_hash_next));
750
751 /* remove the TB from the page list */
752 if (tb->page_addr[0] != page_addr) {
753 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
754 tb_page_remove(&p->first_tb, tb);
755 invalidate_page_bitmap(p);
756 }
757 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
758 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
759 tb_page_remove(&p->first_tb, tb);
760 invalidate_page_bitmap(p);
761 }
762
36bdbe54 763 tb_invalidated_flag = 1;
59817ccb 764
fd6ce8f6 765 /* remove the TB from the hash list */
8a40a180 766 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
767 for(env = first_cpu; env != NULL; env = env->next_cpu) {
768 if (env->tb_jmp_cache[h] == tb)
769 env->tb_jmp_cache[h] = NULL;
770 }
d4e8164f
FB
771
772 /* suppress this TB from the two jump lists */
773 tb_jmp_remove(tb, 0);
774 tb_jmp_remove(tb, 1);
775
776 /* suppress any remaining jumps to this TB */
777 tb1 = tb->jmp_first;
778 for(;;) {
779 n1 = (long)tb1 & 3;
780 if (n1 == 2)
781 break;
782 tb1 = (TranslationBlock *)((long)tb1 & ~3);
783 tb2 = tb1->jmp_next[n1];
784 tb_reset_jump(tb1, n1);
785 tb1->jmp_next[n1] = NULL;
786 tb1 = tb2;
787 }
788 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 789
e3db7226 790 tb_phys_invalidate_count++;
9fa3e853
FB
791}
792
793static inline void set_bits(uint8_t *tab, int start, int len)
794{
795 int end, mask, end1;
796
797 end = start + len;
798 tab += start >> 3;
799 mask = 0xff << (start & 7);
800 if ((start & ~7) == (end & ~7)) {
801 if (start < end) {
802 mask &= ~(0xff << (end & 7));
803 *tab |= mask;
804 }
805 } else {
806 *tab++ |= mask;
807 start = (start + 8) & ~7;
808 end1 = end & ~7;
809 while (start < end1) {
810 *tab++ = 0xff;
811 start += 8;
812 }
813 if (start < end) {
814 mask = ~(0xff << (end & 7));
815 *tab |= mask;
816 }
817 }
818}
819
820static void build_page_bitmap(PageDesc *p)
821{
822 int n, tb_start, tb_end;
823 TranslationBlock *tb;
3b46e624 824
b2a7081a 825 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
826
827 tb = p->first_tb;
828 while (tb != NULL) {
829 n = (long)tb & 3;
830 tb = (TranslationBlock *)((long)tb & ~3);
831 /* NOTE: this is subtle as a TB may span two physical pages */
832 if (n == 0) {
833 /* NOTE: tb_end may be after the end of the page, but
834 it is not a problem */
835 tb_start = tb->pc & ~TARGET_PAGE_MASK;
836 tb_end = tb_start + tb->size;
837 if (tb_end > TARGET_PAGE_SIZE)
838 tb_end = TARGET_PAGE_SIZE;
839 } else {
840 tb_start = 0;
841 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
842 }
843 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
844 tb = tb->page_next[n];
845 }
846}
847
2e70f6ef
PB
848TranslationBlock *tb_gen_code(CPUState *env,
849 target_ulong pc, target_ulong cs_base,
850 int flags, int cflags)
d720b93d
FB
851{
852 TranslationBlock *tb;
853 uint8_t *tc_ptr;
854 target_ulong phys_pc, phys_page2, virt_page2;
855 int code_gen_size;
856
c27004ec
FB
857 phys_pc = get_phys_addr_code(env, pc);
858 tb = tb_alloc(pc);
d720b93d
FB
859 if (!tb) {
860 /* flush must be done */
861 tb_flush(env);
862 /* cannot fail at this point */
c27004ec 863 tb = tb_alloc(pc);
2e70f6ef
PB
864 /* Don't forget to invalidate previous TB info. */
865 tb_invalidated_flag = 1;
d720b93d
FB
866 }
867 tc_ptr = code_gen_ptr;
868 tb->tc_ptr = tc_ptr;
869 tb->cs_base = cs_base;
870 tb->flags = flags;
871 tb->cflags = cflags;
d07bde88 872 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 873 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 874
d720b93d 875 /* check next page if needed */
c27004ec 876 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 877 phys_page2 = -1;
c27004ec 878 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
879 phys_page2 = get_phys_addr_code(env, virt_page2);
880 }
881 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 882 return tb;
d720b93d 883}
3b46e624 884
9fa3e853
FB
885/* invalidate all TBs which intersect with the target physical page
886 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
887 the same physical page. 'is_cpu_write_access' should be true if called
888 from a real cpu write access: the virtual CPU will exit the current
889 TB if code is modified inside this TB. */
00f82b8a 890void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
891 int is_cpu_write_access)
892{
6b917547 893 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 894 CPUState *env = cpu_single_env;
9fa3e853 895 target_ulong tb_start, tb_end;
6b917547
AL
896 PageDesc *p;
897 int n;
898#ifdef TARGET_HAS_PRECISE_SMC
899 int current_tb_not_found = is_cpu_write_access;
900 TranslationBlock *current_tb = NULL;
901 int current_tb_modified = 0;
902 target_ulong current_pc = 0;
903 target_ulong current_cs_base = 0;
904 int current_flags = 0;
905#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
906
907 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 908 if (!p)
9fa3e853 909 return;
5fafdf24 910 if (!p->code_bitmap &&
d720b93d
FB
911 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
912 is_cpu_write_access) {
9fa3e853
FB
913 /* build code bitmap */
914 build_page_bitmap(p);
915 }
916
917 /* we remove all the TBs in the range [start, end[ */
918 /* XXX: see if in some cases it could be faster to invalidate all the code */
919 tb = p->first_tb;
920 while (tb != NULL) {
921 n = (long)tb & 3;
922 tb = (TranslationBlock *)((long)tb & ~3);
923 tb_next = tb->page_next[n];
924 /* NOTE: this is subtle as a TB may span two physical pages */
925 if (n == 0) {
926 /* NOTE: tb_end may be after the end of the page, but
927 it is not a problem */
928 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
929 tb_end = tb_start + tb->size;
930 } else {
931 tb_start = tb->page_addr[1];
932 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
933 }
934 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
935#ifdef TARGET_HAS_PRECISE_SMC
936 if (current_tb_not_found) {
937 current_tb_not_found = 0;
938 current_tb = NULL;
2e70f6ef 939 if (env->mem_io_pc) {
d720b93d 940 /* now we have a real cpu fault */
2e70f6ef 941 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
942 }
943 }
944 if (current_tb == tb &&
2e70f6ef 945 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
946 /* If we are modifying the current TB, we must stop
947 its execution. We could be more precise by checking
948 that the modification is after the current PC, but it
949 would require a specialized function to partially
950 restore the CPU state */
3b46e624 951
d720b93d 952 current_tb_modified = 1;
5fafdf24 953 cpu_restore_state(current_tb, env,
2e70f6ef 954 env->mem_io_pc, NULL);
6b917547
AL
955 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
956 &current_flags);
d720b93d
FB
957 }
958#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
959 /* we need to do that to handle the case where a signal
960 occurs while doing tb_phys_invalidate() */
961 saved_tb = NULL;
962 if (env) {
963 saved_tb = env->current_tb;
964 env->current_tb = NULL;
965 }
9fa3e853 966 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
967 if (env) {
968 env->current_tb = saved_tb;
969 if (env->interrupt_request && env->current_tb)
970 cpu_interrupt(env, env->interrupt_request);
971 }
9fa3e853
FB
972 }
973 tb = tb_next;
974 }
975#if !defined(CONFIG_USER_ONLY)
976 /* if no code remaining, no need to continue to use slow writes */
977 if (!p->first_tb) {
978 invalidate_page_bitmap(p);
d720b93d 979 if (is_cpu_write_access) {
2e70f6ef 980 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
981 }
982 }
983#endif
984#ifdef TARGET_HAS_PRECISE_SMC
985 if (current_tb_modified) {
986 /* we generate a block containing just the instruction
987 modifying the memory. It will ensure that it cannot modify
988 itself */
ea1c1802 989 env->current_tb = NULL;
2e70f6ef 990 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 991 cpu_resume_from_signal(env, NULL);
9fa3e853 992 }
fd6ce8f6 993#endif
9fa3e853 994}
fd6ce8f6 995
9fa3e853 996/* len must be <= 8 and start must be a multiple of len */
00f82b8a 997static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
998{
999 PageDesc *p;
1000 int offset, b;
59817ccb 1001#if 0
a4193c8a 1002 if (1) {
93fcfe39
AL
1003 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1004 cpu_single_env->mem_io_vaddr, len,
1005 cpu_single_env->eip,
1006 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1007 }
1008#endif
9fa3e853 1009 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1010 if (!p)
9fa3e853
FB
1011 return;
1012 if (p->code_bitmap) {
1013 offset = start & ~TARGET_PAGE_MASK;
1014 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1015 if (b & ((1 << len) - 1))
1016 goto do_invalidate;
1017 } else {
1018 do_invalidate:
d720b93d 1019 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1020 }
1021}
1022
9fa3e853 1023#if !defined(CONFIG_SOFTMMU)
00f82b8a 1024static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1025 unsigned long pc, void *puc)
9fa3e853 1026{
6b917547 1027 TranslationBlock *tb;
9fa3e853 1028 PageDesc *p;
6b917547 1029 int n;
d720b93d 1030#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1031 TranslationBlock *current_tb = NULL;
d720b93d 1032 CPUState *env = cpu_single_env;
6b917547
AL
1033 int current_tb_modified = 0;
1034 target_ulong current_pc = 0;
1035 target_ulong current_cs_base = 0;
1036 int current_flags = 0;
d720b93d 1037#endif
9fa3e853
FB
1038
1039 addr &= TARGET_PAGE_MASK;
1040 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1041 if (!p)
9fa3e853
FB
1042 return;
1043 tb = p->first_tb;
d720b93d
FB
1044#ifdef TARGET_HAS_PRECISE_SMC
1045 if (tb && pc != 0) {
1046 current_tb = tb_find_pc(pc);
1047 }
1048#endif
9fa3e853
FB
1049 while (tb != NULL) {
1050 n = (long)tb & 3;
1051 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1052#ifdef TARGET_HAS_PRECISE_SMC
1053 if (current_tb == tb &&
2e70f6ef 1054 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1055 /* If we are modifying the current TB, we must stop
1056 its execution. We could be more precise by checking
1057 that the modification is after the current PC, but it
1058 would require a specialized function to partially
1059 restore the CPU state */
3b46e624 1060
d720b93d
FB
1061 current_tb_modified = 1;
1062 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1063 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1064 &current_flags);
d720b93d
FB
1065 }
1066#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1067 tb_phys_invalidate(tb, addr);
1068 tb = tb->page_next[n];
1069 }
fd6ce8f6 1070 p->first_tb = NULL;
d720b93d
FB
1071#ifdef TARGET_HAS_PRECISE_SMC
1072 if (current_tb_modified) {
1073 /* we generate a block containing just the instruction
1074 modifying the memory. It will ensure that it cannot modify
1075 itself */
ea1c1802 1076 env->current_tb = NULL;
2e70f6ef 1077 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1078 cpu_resume_from_signal(env, puc);
1079 }
1080#endif
fd6ce8f6 1081}
9fa3e853 1082#endif
fd6ce8f6
FB
1083
1084/* add the tb in the target page and protect it if necessary */
5fafdf24 1085static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1086 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1087{
1088 PageDesc *p;
9fa3e853
FB
1089 TranslationBlock *last_first_tb;
1090
1091 tb->page_addr[n] = page_addr;
3a7d929e 1092 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1093 tb->page_next[n] = p->first_tb;
1094 last_first_tb = p->first_tb;
1095 p->first_tb = (TranslationBlock *)((long)tb | n);
1096 invalidate_page_bitmap(p);
fd6ce8f6 1097
107db443 1098#if defined(TARGET_HAS_SMC) || 1
d720b93d 1099
9fa3e853 1100#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1101 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1102 target_ulong addr;
1103 PageDesc *p2;
9fa3e853
FB
1104 int prot;
1105
fd6ce8f6
FB
1106 /* force the host page as non writable (writes will have a
1107 page fault + mprotect overhead) */
53a5960a 1108 page_addr &= qemu_host_page_mask;
fd6ce8f6 1109 prot = 0;
53a5960a
PB
1110 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1111 addr += TARGET_PAGE_SIZE) {
1112
1113 p2 = page_find (addr >> TARGET_PAGE_BITS);
1114 if (!p2)
1115 continue;
1116 prot |= p2->flags;
1117 p2->flags &= ~PAGE_WRITE;
1118 page_get_flags(addr);
1119 }
5fafdf24 1120 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1121 (prot & PAGE_BITS) & ~PAGE_WRITE);
1122#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1123 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1124 page_addr);
fd6ce8f6 1125#endif
fd6ce8f6 1126 }
9fa3e853
FB
1127#else
1128 /* if some code is already present, then the pages are already
1129 protected. So we handle the case where only the first TB is
1130 allocated in a physical page */
1131 if (!last_first_tb) {
6a00d601 1132 tlb_protect_code(page_addr);
9fa3e853
FB
1133 }
1134#endif
d720b93d
FB
1135
1136#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1137}
1138
1139/* Allocate a new translation block. Flush the translation buffer if
1140 too many translation blocks or too much generated code. */
c27004ec 1141TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1142{
1143 TranslationBlock *tb;
fd6ce8f6 1144
26a5f13b
FB
1145 if (nb_tbs >= code_gen_max_blocks ||
1146 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1147 return NULL;
fd6ce8f6
FB
1148 tb = &tbs[nb_tbs++];
1149 tb->pc = pc;
b448f2f3 1150 tb->cflags = 0;
d4e8164f
FB
1151 return tb;
1152}
1153
2e70f6ef
PB
1154void tb_free(TranslationBlock *tb)
1155{
bf20dc07 1156 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1157 Ignore the hard cases and just back up if this TB happens to
1158 be the last one generated. */
1159 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1160 code_gen_ptr = tb->tc_ptr;
1161 nb_tbs--;
1162 }
1163}
1164
9fa3e853
FB
1165/* add a new TB and link it to the physical page tables. phys_page2 is
1166 (-1) to indicate that only one page contains the TB. */
5fafdf24 1167void tb_link_phys(TranslationBlock *tb,
9fa3e853 1168 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1169{
9fa3e853
FB
1170 unsigned int h;
1171 TranslationBlock **ptb;
1172
c8a706fe
PB
1173 /* Grab the mmap lock to stop another thread invalidating this TB
1174 before we are done. */
1175 mmap_lock();
9fa3e853
FB
1176 /* add in the physical hash table */
1177 h = tb_phys_hash_func(phys_pc);
1178 ptb = &tb_phys_hash[h];
1179 tb->phys_hash_next = *ptb;
1180 *ptb = tb;
fd6ce8f6
FB
1181
1182 /* add in the page list */
9fa3e853
FB
1183 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1184 if (phys_page2 != -1)
1185 tb_alloc_page(tb, 1, phys_page2);
1186 else
1187 tb->page_addr[1] = -1;
9fa3e853 1188
d4e8164f
FB
1189 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1190 tb->jmp_next[0] = NULL;
1191 tb->jmp_next[1] = NULL;
1192
1193 /* init original jump addresses */
1194 if (tb->tb_next_offset[0] != 0xffff)
1195 tb_reset_jump(tb, 0);
1196 if (tb->tb_next_offset[1] != 0xffff)
1197 tb_reset_jump(tb, 1);
8a40a180
FB
1198
1199#ifdef DEBUG_TB_CHECK
1200 tb_page_check();
1201#endif
c8a706fe 1202 mmap_unlock();
fd6ce8f6
FB
1203}
1204
9fa3e853
FB
1205/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1206 tb[1].tc_ptr. Return NULL if not found */
1207TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1208{
9fa3e853
FB
1209 int m_min, m_max, m;
1210 unsigned long v;
1211 TranslationBlock *tb;
a513fe19
FB
1212
1213 if (nb_tbs <= 0)
1214 return NULL;
1215 if (tc_ptr < (unsigned long)code_gen_buffer ||
1216 tc_ptr >= (unsigned long)code_gen_ptr)
1217 return NULL;
1218 /* binary search (cf Knuth) */
1219 m_min = 0;
1220 m_max = nb_tbs - 1;
1221 while (m_min <= m_max) {
1222 m = (m_min + m_max) >> 1;
1223 tb = &tbs[m];
1224 v = (unsigned long)tb->tc_ptr;
1225 if (v == tc_ptr)
1226 return tb;
1227 else if (tc_ptr < v) {
1228 m_max = m - 1;
1229 } else {
1230 m_min = m + 1;
1231 }
5fafdf24 1232 }
a513fe19
FB
1233 return &tbs[m_max];
1234}
7501267e 1235
ea041c0e
FB
1236static void tb_reset_jump_recursive(TranslationBlock *tb);
1237
1238static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1239{
1240 TranslationBlock *tb1, *tb_next, **ptb;
1241 unsigned int n1;
1242
1243 tb1 = tb->jmp_next[n];
1244 if (tb1 != NULL) {
1245 /* find head of list */
1246 for(;;) {
1247 n1 = (long)tb1 & 3;
1248 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1249 if (n1 == 2)
1250 break;
1251 tb1 = tb1->jmp_next[n1];
1252 }
1253 /* we are now sure now that tb jumps to tb1 */
1254 tb_next = tb1;
1255
1256 /* remove tb from the jmp_first list */
1257 ptb = &tb_next->jmp_first;
1258 for(;;) {
1259 tb1 = *ptb;
1260 n1 = (long)tb1 & 3;
1261 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1262 if (n1 == n && tb1 == tb)
1263 break;
1264 ptb = &tb1->jmp_next[n1];
1265 }
1266 *ptb = tb->jmp_next[n];
1267 tb->jmp_next[n] = NULL;
3b46e624 1268
ea041c0e
FB
1269 /* suppress the jump to next tb in generated code */
1270 tb_reset_jump(tb, n);
1271
0124311e 1272 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1273 tb_reset_jump_recursive(tb_next);
1274 }
1275}
1276
1277static void tb_reset_jump_recursive(TranslationBlock *tb)
1278{
1279 tb_reset_jump_recursive2(tb, 0);
1280 tb_reset_jump_recursive2(tb, 1);
1281}
1282
1fddef4b 1283#if defined(TARGET_HAS_ICE)
d720b93d
FB
1284static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1285{
9b3c35e0
JM
1286 target_phys_addr_t addr;
1287 target_ulong pd;
c2f07f81
PB
1288 ram_addr_t ram_addr;
1289 PhysPageDesc *p;
d720b93d 1290
c2f07f81
PB
1291 addr = cpu_get_phys_page_debug(env, pc);
1292 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1293 if (!p) {
1294 pd = IO_MEM_UNASSIGNED;
1295 } else {
1296 pd = p->phys_offset;
1297 }
1298 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1299 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1300}
c27004ec 1301#endif
d720b93d 1302
6658ffb8 1303/* Add a watchpoint. */
a1d1bb31
AL
1304int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1305 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1306{
b4051334 1307 target_ulong len_mask = ~(len - 1);
c0ce998e 1308 CPUWatchpoint *wp;
6658ffb8 1309
b4051334
AL
1310 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1311 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1312 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1313 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1314 return -EINVAL;
1315 }
a1d1bb31 1316 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1317
1318 wp->vaddr = addr;
b4051334 1319 wp->len_mask = len_mask;
a1d1bb31
AL
1320 wp->flags = flags;
1321
2dc9f411 1322 /* keep all GDB-injected watchpoints in front */
c0ce998e
AL
1323 if (flags & BP_GDB)
1324 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1325 else
1326 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1327
6658ffb8 1328 tlb_flush_page(env, addr);
a1d1bb31
AL
1329
1330 if (watchpoint)
1331 *watchpoint = wp;
1332 return 0;
6658ffb8
PB
1333}
1334
a1d1bb31
AL
1335/* Remove a specific watchpoint. */
1336int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1337 int flags)
6658ffb8 1338{
b4051334 1339 target_ulong len_mask = ~(len - 1);
a1d1bb31 1340 CPUWatchpoint *wp;
6658ffb8 1341
c0ce998e 1342 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1343 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1344 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1345 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1346 return 0;
1347 }
1348 }
a1d1bb31 1349 return -ENOENT;
6658ffb8
PB
1350}
1351
a1d1bb31
AL
1352/* Remove a specific watchpoint by reference. */
1353void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1354{
c0ce998e 1355 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1356
a1d1bb31
AL
1357 tlb_flush_page(env, watchpoint->vaddr);
1358
1359 qemu_free(watchpoint);
1360}
1361
1362/* Remove all matching watchpoints. */
1363void cpu_watchpoint_remove_all(CPUState *env, int mask)
1364{
c0ce998e 1365 CPUWatchpoint *wp, *next;
a1d1bb31 1366
c0ce998e 1367 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1368 if (wp->flags & mask)
1369 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1370 }
7d03f82f
EI
1371}
1372
a1d1bb31
AL
1373/* Add a breakpoint. */
1374int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1375 CPUBreakpoint **breakpoint)
4c3a88a2 1376{
1fddef4b 1377#if defined(TARGET_HAS_ICE)
c0ce998e 1378 CPUBreakpoint *bp;
3b46e624 1379
a1d1bb31 1380 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1381
a1d1bb31
AL
1382 bp->pc = pc;
1383 bp->flags = flags;
1384
2dc9f411 1385 /* keep all GDB-injected breakpoints in front */
c0ce998e
AL
1386 if (flags & BP_GDB)
1387 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1388 else
1389 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1390
d720b93d 1391 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1392
1393 if (breakpoint)
1394 *breakpoint = bp;
4c3a88a2
FB
1395 return 0;
1396#else
a1d1bb31 1397 return -ENOSYS;
4c3a88a2
FB
1398#endif
1399}
1400
a1d1bb31
AL
1401/* Remove a specific breakpoint. */
1402int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1403{
7d03f82f 1404#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1405 CPUBreakpoint *bp;
1406
c0ce998e 1407 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1408 if (bp->pc == pc && bp->flags == flags) {
1409 cpu_breakpoint_remove_by_ref(env, bp);
1410 return 0;
1411 }
7d03f82f 1412 }
a1d1bb31
AL
1413 return -ENOENT;
1414#else
1415 return -ENOSYS;
7d03f82f
EI
1416#endif
1417}
1418
a1d1bb31
AL
1419/* Remove a specific breakpoint by reference. */
1420void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1421{
1fddef4b 1422#if defined(TARGET_HAS_ICE)
c0ce998e 1423 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1424
a1d1bb31
AL
1425 breakpoint_invalidate(env, breakpoint->pc);
1426
1427 qemu_free(breakpoint);
1428#endif
1429}
1430
1431/* Remove all matching breakpoints. */
1432void cpu_breakpoint_remove_all(CPUState *env, int mask)
1433{
1434#if defined(TARGET_HAS_ICE)
c0ce998e 1435 CPUBreakpoint *bp, *next;
a1d1bb31 1436
c0ce998e 1437 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1438 if (bp->flags & mask)
1439 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1440 }
4c3a88a2
FB
1441#endif
1442}
1443
c33a346e
FB
1444/* enable or disable single step mode. EXCP_DEBUG is returned by the
1445 CPU loop after each instruction */
1446void cpu_single_step(CPUState *env, int enabled)
1447{
1fddef4b 1448#if defined(TARGET_HAS_ICE)
c33a346e
FB
1449 if (env->singlestep_enabled != enabled) {
1450 env->singlestep_enabled = enabled;
1451 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1452 /* XXX: only flush what is necessary */
0124311e 1453 tb_flush(env);
c33a346e
FB
1454 }
1455#endif
1456}
1457
34865134
FB
1458/* enable or disable low levels log */
1459void cpu_set_log(int log_flags)
1460{
1461 loglevel = log_flags;
1462 if (loglevel && !logfile) {
11fcfab4 1463 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1464 if (!logfile) {
1465 perror(logfilename);
1466 _exit(1);
1467 }
9fa3e853
FB
1468#if !defined(CONFIG_SOFTMMU)
1469 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1470 {
b55266b5 1471 static char logfile_buf[4096];
9fa3e853
FB
1472 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1473 }
1474#else
34865134 1475 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1476#endif
e735b91c
PB
1477 log_append = 1;
1478 }
1479 if (!loglevel && logfile) {
1480 fclose(logfile);
1481 logfile = NULL;
34865134
FB
1482 }
1483}
1484
1485void cpu_set_log_filename(const char *filename)
1486{
1487 logfilename = strdup(filename);
e735b91c
PB
1488 if (logfile) {
1489 fclose(logfile);
1490 logfile = NULL;
1491 }
1492 cpu_set_log(loglevel);
34865134 1493}
c33a346e 1494
0124311e 1495/* mask must never be zero, except for A20 change call */
68a79315 1496void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1497{
d5975363 1498#if !defined(USE_NPTL)
ea041c0e 1499 TranslationBlock *tb;
15a51156 1500 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1501#endif
2e70f6ef 1502 int old_mask;
59817ccb 1503
be214e6c
AJ
1504 if (mask & CPU_INTERRUPT_EXIT) {
1505 env->exit_request = 1;
1506 mask &= ~CPU_INTERRUPT_EXIT;
1507 }
1508
2e70f6ef 1509 old_mask = env->interrupt_request;
68a79315 1510 env->interrupt_request |= mask;
d5975363
PB
1511#if defined(USE_NPTL)
1512 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1513 problem and hope the cpu will stop of its own accord. For userspace
1514 emulation this often isn't actually as bad as it sounds. Often
1515 signals are used primarily to interrupt blocking syscalls. */
1516#else
2e70f6ef 1517 if (use_icount) {
266910c4 1518 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1519#ifndef CONFIG_USER_ONLY
2e70f6ef 1520 if (!can_do_io(env)
be214e6c 1521 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1522 cpu_abort(env, "Raised interrupt while not in I/O function");
1523 }
1524#endif
1525 } else {
1526 tb = env->current_tb;
1527 /* if the cpu is currently executing code, we must unlink it and
1528 all the potentially executing TB */
1529 if (tb && !testandset(&interrupt_lock)) {
1530 env->current_tb = NULL;
1531 tb_reset_jump_recursive(tb);
1532 resetlock(&interrupt_lock);
1533 }
ea041c0e 1534 }
d5975363 1535#endif
ea041c0e
FB
1536}
1537
b54ad049
FB
1538void cpu_reset_interrupt(CPUState *env, int mask)
1539{
1540 env->interrupt_request &= ~mask;
1541}
1542
c7cd6a37 1543const CPULogItem cpu_log_items[] = {
5fafdf24 1544 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1545 "show generated host assembly code for each compiled TB" },
1546 { CPU_LOG_TB_IN_ASM, "in_asm",
1547 "show target assembly code for each compiled TB" },
5fafdf24 1548 { CPU_LOG_TB_OP, "op",
57fec1fe 1549 "show micro ops for each compiled TB" },
f193c797 1550 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1551 "show micro ops "
1552#ifdef TARGET_I386
1553 "before eflags optimization and "
f193c797 1554#endif
e01a1157 1555 "after liveness analysis" },
f193c797
FB
1556 { CPU_LOG_INT, "int",
1557 "show interrupts/exceptions in short format" },
1558 { CPU_LOG_EXEC, "exec",
1559 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1560 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1561 "show CPU state before block translation" },
f193c797
FB
1562#ifdef TARGET_I386
1563 { CPU_LOG_PCALL, "pcall",
1564 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1565 { CPU_LOG_RESET, "cpu_reset",
1566 "show CPU state before CPU resets" },
f193c797 1567#endif
8e3a9fd2 1568#ifdef DEBUG_IOPORT
fd872598
FB
1569 { CPU_LOG_IOPORT, "ioport",
1570 "show all i/o ports accesses" },
8e3a9fd2 1571#endif
f193c797
FB
1572 { 0, NULL, NULL },
1573};
1574
1575static int cmp1(const char *s1, int n, const char *s2)
1576{
1577 if (strlen(s2) != n)
1578 return 0;
1579 return memcmp(s1, s2, n) == 0;
1580}
3b46e624 1581
f193c797
FB
1582/* takes a comma separated list of log masks. Return 0 if error. */
1583int cpu_str_to_log_mask(const char *str)
1584{
c7cd6a37 1585 const CPULogItem *item;
f193c797
FB
1586 int mask;
1587 const char *p, *p1;
1588
1589 p = str;
1590 mask = 0;
1591 for(;;) {
1592 p1 = strchr(p, ',');
1593 if (!p1)
1594 p1 = p + strlen(p);
8e3a9fd2
FB
1595 if(cmp1(p,p1-p,"all")) {
1596 for(item = cpu_log_items; item->mask != 0; item++) {
1597 mask |= item->mask;
1598 }
1599 } else {
f193c797
FB
1600 for(item = cpu_log_items; item->mask != 0; item++) {
1601 if (cmp1(p, p1 - p, item->name))
1602 goto found;
1603 }
1604 return 0;
8e3a9fd2 1605 }
f193c797
FB
1606 found:
1607 mask |= item->mask;
1608 if (*p1 != ',')
1609 break;
1610 p = p1 + 1;
1611 }
1612 return mask;
1613}
ea041c0e 1614
7501267e
FB
1615void cpu_abort(CPUState *env, const char *fmt, ...)
1616{
1617 va_list ap;
493ae1f0 1618 va_list ap2;
7501267e
FB
1619
1620 va_start(ap, fmt);
493ae1f0 1621 va_copy(ap2, ap);
7501267e
FB
1622 fprintf(stderr, "qemu: fatal: ");
1623 vfprintf(stderr, fmt, ap);
1624 fprintf(stderr, "\n");
1625#ifdef TARGET_I386
7fe48483
FB
1626 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1627#else
1628 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1629#endif
93fcfe39
AL
1630 if (qemu_log_enabled()) {
1631 qemu_log("qemu: fatal: ");
1632 qemu_log_vprintf(fmt, ap2);
1633 qemu_log("\n");
f9373291 1634#ifdef TARGET_I386
93fcfe39 1635 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1636#else
93fcfe39 1637 log_cpu_state(env, 0);
f9373291 1638#endif
31b1a7b4 1639 qemu_log_flush();
93fcfe39 1640 qemu_log_close();
924edcae 1641 }
493ae1f0 1642 va_end(ap2);
f9373291 1643 va_end(ap);
7501267e
FB
1644 abort();
1645}
1646
c5be9f08
TS
1647CPUState *cpu_copy(CPUState *env)
1648{
01ba9816 1649 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1650 CPUState *next_cpu = new_env->next_cpu;
1651 int cpu_index = new_env->cpu_index;
5a38f081
AL
1652#if defined(TARGET_HAS_ICE)
1653 CPUBreakpoint *bp;
1654 CPUWatchpoint *wp;
1655#endif
1656
c5be9f08 1657 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1658
1659 /* Preserve chaining and index. */
c5be9f08
TS
1660 new_env->next_cpu = next_cpu;
1661 new_env->cpu_index = cpu_index;
5a38f081
AL
1662
1663 /* Clone all break/watchpoints.
1664 Note: Once we support ptrace with hw-debug register access, make sure
1665 BP_CPU break/watchpoints are handled correctly on clone. */
1666 TAILQ_INIT(&env->breakpoints);
1667 TAILQ_INIT(&env->watchpoints);
1668#if defined(TARGET_HAS_ICE)
1669 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1670 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1671 }
1672 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1673 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1674 wp->flags, NULL);
1675 }
1676#endif
1677
c5be9f08
TS
1678 return new_env;
1679}
1680
0124311e
FB
1681#if !defined(CONFIG_USER_ONLY)
1682
5c751e99
EI
1683static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1684{
1685 unsigned int i;
1686
1687 /* Discard jump cache entries for any tb which might potentially
1688 overlap the flushed page. */
1689 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1690 memset (&env->tb_jmp_cache[i], 0,
1691 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1692
1693 i = tb_jmp_cache_hash_page(addr);
1694 memset (&env->tb_jmp_cache[i], 0,
1695 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1696}
1697
ee8b7021
FB
1698/* NOTE: if flush_global is true, also flush global entries (not
1699 implemented yet) */
1700void tlb_flush(CPUState *env, int flush_global)
33417e70 1701{
33417e70 1702 int i;
0124311e 1703
9fa3e853
FB
1704#if defined(DEBUG_TLB)
1705 printf("tlb_flush:\n");
1706#endif
0124311e
FB
1707 /* must reset current TB so that interrupts cannot modify the
1708 links while we are modifying them */
1709 env->current_tb = NULL;
1710
33417e70 1711 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1712 env->tlb_table[0][i].addr_read = -1;
1713 env->tlb_table[0][i].addr_write = -1;
1714 env->tlb_table[0][i].addr_code = -1;
1715 env->tlb_table[1][i].addr_read = -1;
1716 env->tlb_table[1][i].addr_write = -1;
1717 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1718#if (NB_MMU_MODES >= 3)
1719 env->tlb_table[2][i].addr_read = -1;
1720 env->tlb_table[2][i].addr_write = -1;
1721 env->tlb_table[2][i].addr_code = -1;
1722#if (NB_MMU_MODES == 4)
1723 env->tlb_table[3][i].addr_read = -1;
1724 env->tlb_table[3][i].addr_write = -1;
1725 env->tlb_table[3][i].addr_code = -1;
1726#endif
1727#endif
33417e70 1728 }
9fa3e853 1729
8a40a180 1730 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1731
0a962c02
FB
1732#ifdef USE_KQEMU
1733 if (env->kqemu_enabled) {
1734 kqemu_flush(env, flush_global);
1735 }
9fa3e853 1736#endif
e3db7226 1737 tlb_flush_count++;
33417e70
FB
1738}
1739
274da6b2 1740static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1741{
5fafdf24 1742 if (addr == (tlb_entry->addr_read &
84b7b8e7 1743 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1744 addr == (tlb_entry->addr_write &
84b7b8e7 1745 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1746 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1747 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1748 tlb_entry->addr_read = -1;
1749 tlb_entry->addr_write = -1;
1750 tlb_entry->addr_code = -1;
1751 }
61382a50
FB
1752}
1753
2e12669a 1754void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1755{
8a40a180 1756 int i;
0124311e 1757
9fa3e853 1758#if defined(DEBUG_TLB)
108c49b8 1759 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1760#endif
0124311e
FB
1761 /* must reset current TB so that interrupts cannot modify the
1762 links while we are modifying them */
1763 env->current_tb = NULL;
61382a50
FB
1764
1765 addr &= TARGET_PAGE_MASK;
1766 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1767 tlb_flush_entry(&env->tlb_table[0][i], addr);
1768 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1769#if (NB_MMU_MODES >= 3)
1770 tlb_flush_entry(&env->tlb_table[2][i], addr);
1771#if (NB_MMU_MODES == 4)
1772 tlb_flush_entry(&env->tlb_table[3][i], addr);
1773#endif
1774#endif
0124311e 1775
5c751e99 1776 tlb_flush_jmp_cache(env, addr);
9fa3e853 1777
0a962c02
FB
1778#ifdef USE_KQEMU
1779 if (env->kqemu_enabled) {
1780 kqemu_flush_page(env, addr);
1781 }
1782#endif
9fa3e853
FB
1783}
1784
9fa3e853
FB
1785/* update the TLBs so that writes to code in the virtual page 'addr'
1786 can be detected */
6a00d601 1787static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1788{
5fafdf24 1789 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1790 ram_addr + TARGET_PAGE_SIZE,
1791 CODE_DIRTY_FLAG);
9fa3e853
FB
1792}
1793
9fa3e853 1794/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1795 tested for self modifying code */
5fafdf24 1796static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1797 target_ulong vaddr)
9fa3e853 1798{
3a7d929e 1799 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1800}
1801
5fafdf24 1802static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1803 unsigned long start, unsigned long length)
1804{
1805 unsigned long addr;
84b7b8e7
FB
1806 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1807 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1808 if ((addr - start) < length) {
0f459d16 1809 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1810 }
1811 }
1812}
1813
3a7d929e 1814void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1815 int dirty_flags)
1ccde1cb
FB
1816{
1817 CPUState *env;
4f2ac237 1818 unsigned long length, start1;
0a962c02
FB
1819 int i, mask, len;
1820 uint8_t *p;
1ccde1cb
FB
1821
1822 start &= TARGET_PAGE_MASK;
1823 end = TARGET_PAGE_ALIGN(end);
1824
1825 length = end - start;
1826 if (length == 0)
1827 return;
0a962c02 1828 len = length >> TARGET_PAGE_BITS;
3a7d929e 1829#ifdef USE_KQEMU
6a00d601
FB
1830 /* XXX: should not depend on cpu context */
1831 env = first_cpu;
3a7d929e 1832 if (env->kqemu_enabled) {
f23db169
FB
1833 ram_addr_t addr;
1834 addr = start;
1835 for(i = 0; i < len; i++) {
1836 kqemu_set_notdirty(env, addr);
1837 addr += TARGET_PAGE_SIZE;
1838 }
3a7d929e
FB
1839 }
1840#endif
f23db169
FB
1841 mask = ~dirty_flags;
1842 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1843 for(i = 0; i < len; i++)
1844 p[i] &= mask;
1845
1ccde1cb
FB
1846 /* we modify the TLB cache so that the dirty bit will be set again
1847 when accessing the range */
59817ccb 1848 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1849 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1850 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1851 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1852 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1853 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1854#if (NB_MMU_MODES >= 3)
1855 for(i = 0; i < CPU_TLB_SIZE; i++)
1856 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1857#if (NB_MMU_MODES == 4)
1858 for(i = 0; i < CPU_TLB_SIZE; i++)
1859 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1860#endif
1861#endif
6a00d601 1862 }
1ccde1cb
FB
1863}
1864
74576198
AL
1865int cpu_physical_memory_set_dirty_tracking(int enable)
1866{
1867 in_migration = enable;
1868 return 0;
1869}
1870
1871int cpu_physical_memory_get_dirty_tracking(void)
1872{
1873 return in_migration;
1874}
1875
2bec46dc
AL
1876void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1877{
1878 if (kvm_enabled())
1879 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1880}
1881
3a7d929e
FB
1882static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1883{
1884 ram_addr_t ram_addr;
1885
84b7b8e7 1886 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1887 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1888 tlb_entry->addend - (unsigned long)phys_ram_base;
1889 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1890 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1891 }
1892 }
1893}
1894
1895/* update the TLB according to the current state of the dirty bits */
1896void cpu_tlb_update_dirty(CPUState *env)
1897{
1898 int i;
1899 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1900 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1901 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1902 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1903#if (NB_MMU_MODES >= 3)
1904 for(i = 0; i < CPU_TLB_SIZE; i++)
1905 tlb_update_dirty(&env->tlb_table[2][i]);
1906#if (NB_MMU_MODES == 4)
1907 for(i = 0; i < CPU_TLB_SIZE; i++)
1908 tlb_update_dirty(&env->tlb_table[3][i]);
1909#endif
1910#endif
3a7d929e
FB
1911}
1912
0f459d16 1913static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1914{
0f459d16
PB
1915 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1916 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1917}
1918
0f459d16
PB
1919/* update the TLB corresponding to virtual page vaddr
1920 so that it is no longer dirty */
1921static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1922{
1ccde1cb
FB
1923 int i;
1924
0f459d16 1925 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1926 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1927 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1928 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1929#if (NB_MMU_MODES >= 3)
0f459d16 1930 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1931#if (NB_MMU_MODES == 4)
0f459d16 1932 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1933#endif
1934#endif
9fa3e853
FB
1935}
1936
59817ccb
FB
1937/* add a new TLB entry. At most one entry for a given virtual address
1938 is permitted. Return 0 if OK or 2 if the page could not be mapped
1939 (can only happen in non SOFTMMU mode for I/O pages or pages
1940 conflicting with the host address space). */
5fafdf24
TS
1941int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1942 target_phys_addr_t paddr, int prot,
6ebbf390 1943 int mmu_idx, int is_softmmu)
9fa3e853 1944{
92e873b9 1945 PhysPageDesc *p;
4f2ac237 1946 unsigned long pd;
9fa3e853 1947 unsigned int index;
4f2ac237 1948 target_ulong address;
0f459d16 1949 target_ulong code_address;
108c49b8 1950 target_phys_addr_t addend;
9fa3e853 1951 int ret;
84b7b8e7 1952 CPUTLBEntry *te;
a1d1bb31 1953 CPUWatchpoint *wp;
0f459d16 1954 target_phys_addr_t iotlb;
9fa3e853 1955
92e873b9 1956 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1957 if (!p) {
1958 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1959 } else {
1960 pd = p->phys_offset;
9fa3e853
FB
1961 }
1962#if defined(DEBUG_TLB)
6ebbf390
JM
1963 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1964 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1965#endif
1966
1967 ret = 0;
0f459d16
PB
1968 address = vaddr;
1969 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1970 /* IO memory case (romd handled later) */
1971 address |= TLB_MMIO;
1972 }
1973 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1974 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1975 /* Normal RAM. */
1976 iotlb = pd & TARGET_PAGE_MASK;
1977 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1978 iotlb |= IO_MEM_NOTDIRTY;
1979 else
1980 iotlb |= IO_MEM_ROM;
1981 } else {
1982 /* IO handlers are currently passed a phsical address.
1983 It would be nice to pass an offset from the base address
1984 of that region. This would avoid having to special case RAM,
1985 and avoid full address decoding in every device.
1986 We can't use the high bits of pd for this because
1987 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
1988 iotlb = (pd & ~TARGET_PAGE_MASK);
1989 if (p) {
8da3ff18
PB
1990 iotlb += p->region_offset;
1991 } else {
1992 iotlb += paddr;
1993 }
0f459d16
PB
1994 }
1995
1996 code_address = address;
1997 /* Make accesses to pages with watchpoints go via the
1998 watchpoint trap routines. */
c0ce998e 1999 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2000 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
2001 iotlb = io_mem_watch + paddr;
2002 /* TODO: The memory case can be optimized by not trapping
2003 reads of pages with a write breakpoint. */
2004 address |= TLB_MMIO;
6658ffb8 2005 }
0f459d16 2006 }
d79acba4 2007
0f459d16
PB
2008 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2009 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2010 te = &env->tlb_table[mmu_idx][index];
2011 te->addend = addend - vaddr;
2012 if (prot & PAGE_READ) {
2013 te->addr_read = address;
2014 } else {
2015 te->addr_read = -1;
2016 }
5c751e99 2017
0f459d16
PB
2018 if (prot & PAGE_EXEC) {
2019 te->addr_code = code_address;
2020 } else {
2021 te->addr_code = -1;
2022 }
2023 if (prot & PAGE_WRITE) {
2024 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2025 (pd & IO_MEM_ROMD)) {
2026 /* Write access calls the I/O callback. */
2027 te->addr_write = address | TLB_MMIO;
2028 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2029 !cpu_physical_memory_is_dirty(pd)) {
2030 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2031 } else {
0f459d16 2032 te->addr_write = address;
9fa3e853 2033 }
0f459d16
PB
2034 } else {
2035 te->addr_write = -1;
9fa3e853 2036 }
9fa3e853
FB
2037 return ret;
2038}
2039
0124311e
FB
2040#else
2041
ee8b7021 2042void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2043{
2044}
2045
2e12669a 2046void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2047{
2048}
2049
5fafdf24
TS
2050int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2051 target_phys_addr_t paddr, int prot,
6ebbf390 2052 int mmu_idx, int is_softmmu)
9fa3e853
FB
2053{
2054 return 0;
2055}
0124311e 2056
9fa3e853
FB
2057/* dump memory mappings */
2058void page_dump(FILE *f)
33417e70 2059{
9fa3e853
FB
2060 unsigned long start, end;
2061 int i, j, prot, prot1;
2062 PageDesc *p;
33417e70 2063
9fa3e853
FB
2064 fprintf(f, "%-8s %-8s %-8s %s\n",
2065 "start", "end", "size", "prot");
2066 start = -1;
2067 end = -1;
2068 prot = 0;
2069 for(i = 0; i <= L1_SIZE; i++) {
2070 if (i < L1_SIZE)
2071 p = l1_map[i];
2072 else
2073 p = NULL;
2074 for(j = 0;j < L2_SIZE; j++) {
2075 if (!p)
2076 prot1 = 0;
2077 else
2078 prot1 = p[j].flags;
2079 if (prot1 != prot) {
2080 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2081 if (start != -1) {
2082 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 2083 start, end, end - start,
9fa3e853
FB
2084 prot & PAGE_READ ? 'r' : '-',
2085 prot & PAGE_WRITE ? 'w' : '-',
2086 prot & PAGE_EXEC ? 'x' : '-');
2087 }
2088 if (prot1 != 0)
2089 start = end;
2090 else
2091 start = -1;
2092 prot = prot1;
2093 }
2094 if (!p)
2095 break;
2096 }
33417e70 2097 }
33417e70
FB
2098}
2099
53a5960a 2100int page_get_flags(target_ulong address)
33417e70 2101{
9fa3e853
FB
2102 PageDesc *p;
2103
2104 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2105 if (!p)
9fa3e853
FB
2106 return 0;
2107 return p->flags;
2108}
2109
2110/* modify the flags of a page and invalidate the code if
2111 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2112 depending on PAGE_WRITE */
53a5960a 2113void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2114{
2115 PageDesc *p;
53a5960a 2116 target_ulong addr;
9fa3e853 2117
c8a706fe 2118 /* mmap_lock should already be held. */
9fa3e853
FB
2119 start = start & TARGET_PAGE_MASK;
2120 end = TARGET_PAGE_ALIGN(end);
2121 if (flags & PAGE_WRITE)
2122 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2123 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2124 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2125 /* We may be called for host regions that are outside guest
2126 address space. */
2127 if (!p)
2128 return;
9fa3e853
FB
2129 /* if the write protection is set, then we invalidate the code
2130 inside */
5fafdf24 2131 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2132 (flags & PAGE_WRITE) &&
2133 p->first_tb) {
d720b93d 2134 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2135 }
2136 p->flags = flags;
2137 }
33417e70
FB
2138}
2139
3d97b40b
TS
2140int page_check_range(target_ulong start, target_ulong len, int flags)
2141{
2142 PageDesc *p;
2143 target_ulong end;
2144 target_ulong addr;
2145
55f280c9
AZ
2146 if (start + len < start)
2147 /* we've wrapped around */
2148 return -1;
2149
3d97b40b
TS
2150 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2151 start = start & TARGET_PAGE_MASK;
2152
3d97b40b
TS
2153 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2154 p = page_find(addr >> TARGET_PAGE_BITS);
2155 if( !p )
2156 return -1;
2157 if( !(p->flags & PAGE_VALID) )
2158 return -1;
2159
dae3270c 2160 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2161 return -1;
dae3270c
FB
2162 if (flags & PAGE_WRITE) {
2163 if (!(p->flags & PAGE_WRITE_ORG))
2164 return -1;
2165 /* unprotect the page if it was put read-only because it
2166 contains translated code */
2167 if (!(p->flags & PAGE_WRITE)) {
2168 if (!page_unprotect(addr, 0, NULL))
2169 return -1;
2170 }
2171 return 0;
2172 }
3d97b40b
TS
2173 }
2174 return 0;
2175}
2176
9fa3e853
FB
2177/* called from signal handler: invalidate the code and unprotect the
2178 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2179int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2180{
2181 unsigned int page_index, prot, pindex;
2182 PageDesc *p, *p1;
53a5960a 2183 target_ulong host_start, host_end, addr;
9fa3e853 2184
c8a706fe
PB
2185 /* Technically this isn't safe inside a signal handler. However we
2186 know this only ever happens in a synchronous SEGV handler, so in
2187 practice it seems to be ok. */
2188 mmap_lock();
2189
83fb7adf 2190 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2191 page_index = host_start >> TARGET_PAGE_BITS;
2192 p1 = page_find(page_index);
c8a706fe
PB
2193 if (!p1) {
2194 mmap_unlock();
9fa3e853 2195 return 0;
c8a706fe 2196 }
83fb7adf 2197 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2198 p = p1;
2199 prot = 0;
2200 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2201 prot |= p->flags;
2202 p++;
2203 }
2204 /* if the page was really writable, then we change its
2205 protection back to writable */
2206 if (prot & PAGE_WRITE_ORG) {
2207 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2208 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2209 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2210 (prot & PAGE_BITS) | PAGE_WRITE);
2211 p1[pindex].flags |= PAGE_WRITE;
2212 /* and since the content will be modified, we must invalidate
2213 the corresponding translated code. */
d720b93d 2214 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2215#ifdef DEBUG_TB_CHECK
2216 tb_invalidate_check(address);
2217#endif
c8a706fe 2218 mmap_unlock();
9fa3e853
FB
2219 return 1;
2220 }
2221 }
c8a706fe 2222 mmap_unlock();
9fa3e853
FB
2223 return 0;
2224}
2225
6a00d601
FB
2226static inline void tlb_set_dirty(CPUState *env,
2227 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2228{
2229}
9fa3e853
FB
2230#endif /* defined(CONFIG_USER_ONLY) */
2231
e2eef170 2232#if !defined(CONFIG_USER_ONLY)
8da3ff18 2233
db7b5426 2234static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2235 ram_addr_t memory, ram_addr_t region_offset);
00f82b8a 2236static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2237 ram_addr_t orig_memory, ram_addr_t region_offset);
db7b5426
BS
2238#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2239 need_subpage) \
2240 do { \
2241 if (addr > start_addr) \
2242 start_addr2 = 0; \
2243 else { \
2244 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2245 if (start_addr2 > 0) \
2246 need_subpage = 1; \
2247 } \
2248 \
49e9fba2 2249 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2250 end_addr2 = TARGET_PAGE_SIZE - 1; \
2251 else { \
2252 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2253 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2254 need_subpage = 1; \
2255 } \
2256 } while (0)
2257
33417e70
FB
2258/* register physical memory. 'size' must be a multiple of the target
2259 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2260 io memory page. The address used when calling the IO function is
2261 the offset from the start of the region, plus region_offset. Both
2262 start_region and regon_offset are rounded down to a page boundary
2263 before calculating this offset. This should not be a problem unless
2264 the low bits of start_addr and region_offset differ. */
2265void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2266 ram_addr_t size,
2267 ram_addr_t phys_offset,
2268 ram_addr_t region_offset)
33417e70 2269{
108c49b8 2270 target_phys_addr_t addr, end_addr;
92e873b9 2271 PhysPageDesc *p;
9d42037b 2272 CPUState *env;
00f82b8a 2273 ram_addr_t orig_size = size;
db7b5426 2274 void *subpage;
33417e70 2275
da260249
FB
2276#ifdef USE_KQEMU
2277 /* XXX: should not depend on cpu context */
2278 env = first_cpu;
2279 if (env->kqemu_enabled) {
2280 kqemu_set_phys_mem(start_addr, size, phys_offset);
2281 }
2282#endif
7ba1e619
AL
2283 if (kvm_enabled())
2284 kvm_set_phys_mem(start_addr, size, phys_offset);
2285
67c4d23c
PB
2286 if (phys_offset == IO_MEM_UNASSIGNED) {
2287 region_offset = start_addr;
2288 }
8da3ff18 2289 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2290 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2291 end_addr = start_addr + (target_phys_addr_t)size;
2292 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2293 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2294 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2295 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2296 target_phys_addr_t start_addr2, end_addr2;
2297 int need_subpage = 0;
2298
2299 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2300 need_subpage);
4254fab8 2301 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2302 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2303 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2304 &p->phys_offset, orig_memory,
2305 p->region_offset);
db7b5426
BS
2306 } else {
2307 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2308 >> IO_MEM_SHIFT];
2309 }
8da3ff18
PB
2310 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2311 region_offset);
2312 p->region_offset = 0;
db7b5426
BS
2313 } else {
2314 p->phys_offset = phys_offset;
2315 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2316 (phys_offset & IO_MEM_ROMD))
2317 phys_offset += TARGET_PAGE_SIZE;
2318 }
2319 } else {
2320 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2321 p->phys_offset = phys_offset;
8da3ff18 2322 p->region_offset = region_offset;
db7b5426 2323 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2324 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2325 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2326 } else {
db7b5426
BS
2327 target_phys_addr_t start_addr2, end_addr2;
2328 int need_subpage = 0;
2329
2330 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2331 end_addr2, need_subpage);
2332
4254fab8 2333 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426 2334 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2335 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2336 addr & TARGET_PAGE_MASK);
db7b5426 2337 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2338 phys_offset, region_offset);
2339 p->region_offset = 0;
db7b5426
BS
2340 }
2341 }
2342 }
8da3ff18 2343 region_offset += TARGET_PAGE_SIZE;
33417e70 2344 }
3b46e624 2345
9d42037b
FB
2346 /* since each CPU stores ram addresses in its TLB cache, we must
2347 reset the modified entries */
2348 /* XXX: slow ! */
2349 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2350 tlb_flush(env, 1);
2351 }
33417e70
FB
2352}
2353
ba863458 2354/* XXX: temporary until new memory mapping API */
00f82b8a 2355ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2356{
2357 PhysPageDesc *p;
2358
2359 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2360 if (!p)
2361 return IO_MEM_UNASSIGNED;
2362 return p->phys_offset;
2363}
2364
f65ed4c1
AL
2365void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2366{
2367 if (kvm_enabled())
2368 kvm_coalesce_mmio_region(addr, size);
2369}
2370
2371void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2372{
2373 if (kvm_enabled())
2374 kvm_uncoalesce_mmio_region(addr, size);
2375}
2376
e9a1ab19 2377/* XXX: better than nothing */
00f82b8a 2378ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2379{
2380 ram_addr_t addr;
7fb4fdcf 2381 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
012a7045 2382 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
ed441467 2383 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2384 abort();
2385 }
2386 addr = phys_ram_alloc_offset;
2387 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2388 return addr;
2389}
2390
2391void qemu_ram_free(ram_addr_t addr)
2392{
2393}
2394
a4193c8a 2395static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2396{
67d3b957 2397#ifdef DEBUG_UNASSIGNED
ab3d1727 2398 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2399#endif
0a6f8a6d 2400#if defined(TARGET_SPARC)
e18231a3
BS
2401 do_unassigned_access(addr, 0, 0, 0, 1);
2402#endif
2403 return 0;
2404}
2405
2406static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2407{
2408#ifdef DEBUG_UNASSIGNED
2409 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2410#endif
0a6f8a6d 2411#if defined(TARGET_SPARC)
e18231a3
BS
2412 do_unassigned_access(addr, 0, 0, 0, 2);
2413#endif
2414 return 0;
2415}
2416
2417static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2418{
2419#ifdef DEBUG_UNASSIGNED
2420 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2421#endif
0a6f8a6d 2422#if defined(TARGET_SPARC)
e18231a3 2423 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2424#endif
33417e70
FB
2425 return 0;
2426}
2427
a4193c8a 2428static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2429{
67d3b957 2430#ifdef DEBUG_UNASSIGNED
ab3d1727 2431 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2432#endif
0a6f8a6d 2433#if defined(TARGET_SPARC)
e18231a3
BS
2434 do_unassigned_access(addr, 1, 0, 0, 1);
2435#endif
2436}
2437
2438static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2439{
2440#ifdef DEBUG_UNASSIGNED
2441 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2442#endif
0a6f8a6d 2443#if defined(TARGET_SPARC)
e18231a3
BS
2444 do_unassigned_access(addr, 1, 0, 0, 2);
2445#endif
2446}
2447
2448static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2449{
2450#ifdef DEBUG_UNASSIGNED
2451 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2452#endif
0a6f8a6d 2453#if defined(TARGET_SPARC)
e18231a3 2454 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2455#endif
33417e70
FB
2456}
2457
2458static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2459 unassigned_mem_readb,
e18231a3
BS
2460 unassigned_mem_readw,
2461 unassigned_mem_readl,
33417e70
FB
2462};
2463
2464static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2465 unassigned_mem_writeb,
e18231a3
BS
2466 unassigned_mem_writew,
2467 unassigned_mem_writel,
33417e70
FB
2468};
2469
0f459d16
PB
2470static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2471 uint32_t val)
9fa3e853 2472{
3a7d929e 2473 int dirty_flags;
3a7d929e
FB
2474 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2475 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2476#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2477 tb_invalidate_phys_page_fast(ram_addr, 1);
2478 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2479#endif
3a7d929e 2480 }
0f459d16 2481 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2482#ifdef USE_KQEMU
2483 if (cpu_single_env->kqemu_enabled &&
2484 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2485 kqemu_modify_page(cpu_single_env, ram_addr);
2486#endif
f23db169
FB
2487 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2488 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2489 /* we remove the notdirty callback only if the code has been
2490 flushed */
2491 if (dirty_flags == 0xff)
2e70f6ef 2492 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2493}
2494
0f459d16
PB
2495static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2496 uint32_t val)
9fa3e853 2497{
3a7d929e 2498 int dirty_flags;
3a7d929e
FB
2499 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2500 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2501#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2502 tb_invalidate_phys_page_fast(ram_addr, 2);
2503 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2504#endif
3a7d929e 2505 }
0f459d16 2506 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2507#ifdef USE_KQEMU
2508 if (cpu_single_env->kqemu_enabled &&
2509 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2510 kqemu_modify_page(cpu_single_env, ram_addr);
2511#endif
f23db169
FB
2512 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2513 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2514 /* we remove the notdirty callback only if the code has been
2515 flushed */
2516 if (dirty_flags == 0xff)
2e70f6ef 2517 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2518}
2519
0f459d16
PB
2520static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2521 uint32_t val)
9fa3e853 2522{
3a7d929e 2523 int dirty_flags;
3a7d929e
FB
2524 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2525 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2526#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2527 tb_invalidate_phys_page_fast(ram_addr, 4);
2528 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2529#endif
3a7d929e 2530 }
0f459d16 2531 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2532#ifdef USE_KQEMU
2533 if (cpu_single_env->kqemu_enabled &&
2534 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2535 kqemu_modify_page(cpu_single_env, ram_addr);
2536#endif
f23db169
FB
2537 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2538 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2539 /* we remove the notdirty callback only if the code has been
2540 flushed */
2541 if (dirty_flags == 0xff)
2e70f6ef 2542 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2543}
2544
3a7d929e 2545static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2546 NULL, /* never used */
2547 NULL, /* never used */
2548 NULL, /* never used */
2549};
2550
1ccde1cb
FB
2551static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2552 notdirty_mem_writeb,
2553 notdirty_mem_writew,
2554 notdirty_mem_writel,
2555};
2556
0f459d16 2557/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2558static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
2559{
2560 CPUState *env = cpu_single_env;
06d55cc1
AL
2561 target_ulong pc, cs_base;
2562 TranslationBlock *tb;
0f459d16 2563 target_ulong vaddr;
a1d1bb31 2564 CPUWatchpoint *wp;
06d55cc1 2565 int cpu_flags;
0f459d16 2566
06d55cc1
AL
2567 if (env->watchpoint_hit) {
2568 /* We re-entered the check after replacing the TB. Now raise
2569 * the debug interrupt so that is will trigger after the
2570 * current instruction. */
2571 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2572 return;
2573 }
2e70f6ef 2574 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
c0ce998e 2575 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2576 if ((vaddr == (wp->vaddr & len_mask) ||
2577 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2578 wp->flags |= BP_WATCHPOINT_HIT;
2579 if (!env->watchpoint_hit) {
2580 env->watchpoint_hit = wp;
2581 tb = tb_find_pc(env->mem_io_pc);
2582 if (!tb) {
2583 cpu_abort(env, "check_watchpoint: could not find TB for "
2584 "pc=%p", (void *)env->mem_io_pc);
2585 }
2586 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2587 tb_phys_invalidate(tb, -1);
2588 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2589 env->exception_index = EXCP_DEBUG;
2590 } else {
2591 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2592 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2593 }
2594 cpu_resume_from_signal(env, NULL);
06d55cc1 2595 }
6e140f28
AL
2596 } else {
2597 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2598 }
2599 }
2600}
2601
6658ffb8
PB
2602/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2603 so these check for a hit then pass through to the normal out-of-line
2604 phys routines. */
2605static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2606{
b4051334 2607 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
2608 return ldub_phys(addr);
2609}
2610
2611static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2612{
b4051334 2613 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
2614 return lduw_phys(addr);
2615}
2616
2617static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2618{
b4051334 2619 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
2620 return ldl_phys(addr);
2621}
2622
6658ffb8
PB
2623static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2624 uint32_t val)
2625{
b4051334 2626 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
2627 stb_phys(addr, val);
2628}
2629
2630static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2631 uint32_t val)
2632{
b4051334 2633 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
2634 stw_phys(addr, val);
2635}
2636
2637static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2638 uint32_t val)
2639{
b4051334 2640 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
2641 stl_phys(addr, val);
2642}
2643
2644static CPUReadMemoryFunc *watch_mem_read[3] = {
2645 watch_mem_readb,
2646 watch_mem_readw,
2647 watch_mem_readl,
2648};
2649
2650static CPUWriteMemoryFunc *watch_mem_write[3] = {
2651 watch_mem_writeb,
2652 watch_mem_writew,
2653 watch_mem_writel,
2654};
6658ffb8 2655
db7b5426
BS
2656static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2657 unsigned int len)
2658{
db7b5426
BS
2659 uint32_t ret;
2660 unsigned int idx;
2661
8da3ff18 2662 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2663#if defined(DEBUG_SUBPAGE)
2664 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2665 mmio, len, addr, idx);
2666#endif
8da3ff18
PB
2667 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2668 addr + mmio->region_offset[idx][0][len]);
db7b5426
BS
2669
2670 return ret;
2671}
2672
2673static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2674 uint32_t value, unsigned int len)
2675{
db7b5426
BS
2676 unsigned int idx;
2677
8da3ff18 2678 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2679#if defined(DEBUG_SUBPAGE)
2680 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2681 mmio, len, addr, idx, value);
2682#endif
8da3ff18
PB
2683 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2684 addr + mmio->region_offset[idx][1][len],
2685 value);
db7b5426
BS
2686}
2687
2688static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2689{
2690#if defined(DEBUG_SUBPAGE)
2691 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2692#endif
2693
2694 return subpage_readlen(opaque, addr, 0);
2695}
2696
2697static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2698 uint32_t value)
2699{
2700#if defined(DEBUG_SUBPAGE)
2701 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2702#endif
2703 subpage_writelen(opaque, addr, value, 0);
2704}
2705
2706static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2707{
2708#if defined(DEBUG_SUBPAGE)
2709 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2710#endif
2711
2712 return subpage_readlen(opaque, addr, 1);
2713}
2714
2715static void subpage_writew (void *opaque, target_phys_addr_t addr,
2716 uint32_t value)
2717{
2718#if defined(DEBUG_SUBPAGE)
2719 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2720#endif
2721 subpage_writelen(opaque, addr, value, 1);
2722}
2723
2724static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2725{
2726#if defined(DEBUG_SUBPAGE)
2727 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2728#endif
2729
2730 return subpage_readlen(opaque, addr, 2);
2731}
2732
2733static void subpage_writel (void *opaque,
2734 target_phys_addr_t addr, uint32_t value)
2735{
2736#if defined(DEBUG_SUBPAGE)
2737 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2738#endif
2739 subpage_writelen(opaque, addr, value, 2);
2740}
2741
2742static CPUReadMemoryFunc *subpage_read[] = {
2743 &subpage_readb,
2744 &subpage_readw,
2745 &subpage_readl,
2746};
2747
2748static CPUWriteMemoryFunc *subpage_write[] = {
2749 &subpage_writeb,
2750 &subpage_writew,
2751 &subpage_writel,
2752};
2753
2754static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2755 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
2756{
2757 int idx, eidx;
4254fab8 2758 unsigned int i;
db7b5426
BS
2759
2760 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2761 return -1;
2762 idx = SUBPAGE_IDX(start);
2763 eidx = SUBPAGE_IDX(end);
2764#if defined(DEBUG_SUBPAGE)
2765 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2766 mmio, start, end, idx, eidx, memory);
2767#endif
2768 memory >>= IO_MEM_SHIFT;
2769 for (; idx <= eidx; idx++) {
4254fab8 2770 for (i = 0; i < 4; i++) {
3ee89922
BS
2771 if (io_mem_read[memory][i]) {
2772 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2773 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
8da3ff18 2774 mmio->region_offset[idx][0][i] = region_offset;
3ee89922
BS
2775 }
2776 if (io_mem_write[memory][i]) {
2777 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2778 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
8da3ff18 2779 mmio->region_offset[idx][1][i] = region_offset;
3ee89922 2780 }
4254fab8 2781 }
db7b5426
BS
2782 }
2783
2784 return 0;
2785}
2786
00f82b8a 2787static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2788 ram_addr_t orig_memory, ram_addr_t region_offset)
db7b5426
BS
2789{
2790 subpage_t *mmio;
2791 int subpage_memory;
2792
2793 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
2794
2795 mmio->base = base;
2796 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
db7b5426 2797#if defined(DEBUG_SUBPAGE)
1eec614b
AL
2798 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2799 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 2800#endif
1eec614b
AL
2801 *phys = subpage_memory | IO_MEM_SUBPAGE;
2802 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
8da3ff18 2803 region_offset);
db7b5426
BS
2804
2805 return mmio;
2806}
2807
88715657
AL
2808static int get_free_io_mem_idx(void)
2809{
2810 int i;
2811
2812 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2813 if (!io_mem_used[i]) {
2814 io_mem_used[i] = 1;
2815 return i;
2816 }
2817
2818 return -1;
2819}
2820
33417e70
FB
2821static void io_mem_init(void)
2822{
88715657
AL
2823 int i;
2824
3a7d929e 2825 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2826 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2827 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
88715657
AL
2828 for (i=0; i<5; i++)
2829 io_mem_used[i] = 1;
1ccde1cb 2830
0f459d16 2831 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2832 watch_mem_write, NULL);
1ccde1cb 2833 /* alloc dirty bits array */
0a962c02 2834 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2835 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2836}
2837
2838/* mem_read and mem_write are arrays of functions containing the
2839 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2840 2). Functions can be omitted with a NULL function pointer. The
2841 registered functions may be modified dynamically later.
2842 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2843 modified. If it is zero, a new io zone is allocated. The return
2844 value can be used with cpu_register_physical_memory(). (-1) is
2845 returned if error. */
33417e70
FB
2846int cpu_register_io_memory(int io_index,
2847 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2848 CPUWriteMemoryFunc **mem_write,
2849 void *opaque)
33417e70 2850{
4254fab8 2851 int i, subwidth = 0;
33417e70
FB
2852
2853 if (io_index <= 0) {
88715657
AL
2854 io_index = get_free_io_mem_idx();
2855 if (io_index == -1)
2856 return io_index;
33417e70
FB
2857 } else {
2858 if (io_index >= IO_MEM_NB_ENTRIES)
2859 return -1;
2860 }
b5ff1b31 2861
33417e70 2862 for(i = 0;i < 3; i++) {
4254fab8
BS
2863 if (!mem_read[i] || !mem_write[i])
2864 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2865 io_mem_read[io_index][i] = mem_read[i];
2866 io_mem_write[io_index][i] = mem_write[i];
2867 }
a4193c8a 2868 io_mem_opaque[io_index] = opaque;
4254fab8 2869 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2870}
61382a50 2871
88715657
AL
2872void cpu_unregister_io_memory(int io_table_address)
2873{
2874 int i;
2875 int io_index = io_table_address >> IO_MEM_SHIFT;
2876
2877 for (i=0;i < 3; i++) {
2878 io_mem_read[io_index][i] = unassigned_mem_read[i];
2879 io_mem_write[io_index][i] = unassigned_mem_write[i];
2880 }
2881 io_mem_opaque[io_index] = NULL;
2882 io_mem_used[io_index] = 0;
2883}
2884
8926b517
FB
2885CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2886{
2887 return io_mem_write[io_index >> IO_MEM_SHIFT];
2888}
2889
2890CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2891{
2892 return io_mem_read[io_index >> IO_MEM_SHIFT];
2893}
2894
e2eef170
PB
2895#endif /* !defined(CONFIG_USER_ONLY) */
2896
13eb76e0
FB
2897/* physical memory access (slow version, mainly for debug) */
2898#if defined(CONFIG_USER_ONLY)
5fafdf24 2899void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2900 int len, int is_write)
2901{
2902 int l, flags;
2903 target_ulong page;
53a5960a 2904 void * p;
13eb76e0
FB
2905
2906 while (len > 0) {
2907 page = addr & TARGET_PAGE_MASK;
2908 l = (page + TARGET_PAGE_SIZE) - addr;
2909 if (l > len)
2910 l = len;
2911 flags = page_get_flags(page);
2912 if (!(flags & PAGE_VALID))
2913 return;
2914 if (is_write) {
2915 if (!(flags & PAGE_WRITE))
2916 return;
579a97f7 2917 /* XXX: this code should not depend on lock_user */
72fb7daa 2918 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2919 /* FIXME - should this return an error rather than just fail? */
2920 return;
72fb7daa
AJ
2921 memcpy(p, buf, l);
2922 unlock_user(p, addr, l);
13eb76e0
FB
2923 } else {
2924 if (!(flags & PAGE_READ))
2925 return;
579a97f7 2926 /* XXX: this code should not depend on lock_user */
72fb7daa 2927 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2928 /* FIXME - should this return an error rather than just fail? */
2929 return;
72fb7daa 2930 memcpy(buf, p, l);
5b257578 2931 unlock_user(p, addr, 0);
13eb76e0
FB
2932 }
2933 len -= l;
2934 buf += l;
2935 addr += l;
2936 }
2937}
8df1cd07 2938
13eb76e0 2939#else
5fafdf24 2940void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2941 int len, int is_write)
2942{
2943 int l, io_index;
2944 uint8_t *ptr;
2945 uint32_t val;
2e12669a
FB
2946 target_phys_addr_t page;
2947 unsigned long pd;
92e873b9 2948 PhysPageDesc *p;
3b46e624 2949
13eb76e0
FB
2950 while (len > 0) {
2951 page = addr & TARGET_PAGE_MASK;
2952 l = (page + TARGET_PAGE_SIZE) - addr;
2953 if (l > len)
2954 l = len;
92e873b9 2955 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2956 if (!p) {
2957 pd = IO_MEM_UNASSIGNED;
2958 } else {
2959 pd = p->phys_offset;
2960 }
3b46e624 2961
13eb76e0 2962 if (is_write) {
3a7d929e 2963 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
6c2934db 2964 target_phys_addr_t addr1 = addr;
13eb76e0 2965 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 2966 if (p)
6c2934db 2967 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
2968 /* XXX: could force cpu_single_env to NULL to avoid
2969 potential bugs */
6c2934db 2970 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 2971 /* 32 bit write access */
c27004ec 2972 val = ldl_p(buf);
6c2934db 2973 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 2974 l = 4;
6c2934db 2975 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 2976 /* 16 bit write access */
c27004ec 2977 val = lduw_p(buf);
6c2934db 2978 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
2979 l = 2;
2980 } else {
1c213d19 2981 /* 8 bit write access */
c27004ec 2982 val = ldub_p(buf);
6c2934db 2983 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
2984 l = 1;
2985 }
2986 } else {
b448f2f3
FB
2987 unsigned long addr1;
2988 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2989 /* RAM case */
b448f2f3 2990 ptr = phys_ram_base + addr1;
13eb76e0 2991 memcpy(ptr, buf, l);
3a7d929e
FB
2992 if (!cpu_physical_memory_is_dirty(addr1)) {
2993 /* invalidate code */
2994 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2995 /* set dirty bit */
5fafdf24 2996 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2997 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2998 }
13eb76e0
FB
2999 }
3000 } else {
5fafdf24 3001 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3002 !(pd & IO_MEM_ROMD)) {
6c2934db 3003 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3004 /* I/O case */
3005 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3006 if (p)
6c2934db
AJ
3007 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3008 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3009 /* 32 bit read access */
6c2934db 3010 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3011 stl_p(buf, val);
13eb76e0 3012 l = 4;
6c2934db 3013 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3014 /* 16 bit read access */
6c2934db 3015 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3016 stw_p(buf, val);
13eb76e0
FB
3017 l = 2;
3018 } else {
1c213d19 3019 /* 8 bit read access */
6c2934db 3020 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3021 stb_p(buf, val);
13eb76e0
FB
3022 l = 1;
3023 }
3024 } else {
3025 /* RAM case */
5fafdf24 3026 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3027 (addr & ~TARGET_PAGE_MASK);
3028 memcpy(buf, ptr, l);
3029 }
3030 }
3031 len -= l;
3032 buf += l;
3033 addr += l;
3034 }
3035}
8df1cd07 3036
d0ecd2aa 3037/* used for ROM loading : can write in RAM and ROM */
5fafdf24 3038void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3039 const uint8_t *buf, int len)
3040{
3041 int l;
3042 uint8_t *ptr;
3043 target_phys_addr_t page;
3044 unsigned long pd;
3045 PhysPageDesc *p;
3b46e624 3046
d0ecd2aa
FB
3047 while (len > 0) {
3048 page = addr & TARGET_PAGE_MASK;
3049 l = (page + TARGET_PAGE_SIZE) - addr;
3050 if (l > len)
3051 l = len;
3052 p = phys_page_find(page >> TARGET_PAGE_BITS);
3053 if (!p) {
3054 pd = IO_MEM_UNASSIGNED;
3055 } else {
3056 pd = p->phys_offset;
3057 }
3b46e624 3058
d0ecd2aa 3059 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3060 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3061 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3062 /* do nothing */
3063 } else {
3064 unsigned long addr1;
3065 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3066 /* ROM/RAM case */
3067 ptr = phys_ram_base + addr1;
3068 memcpy(ptr, buf, l);
3069 }
3070 len -= l;
3071 buf += l;
3072 addr += l;
3073 }
3074}
3075
6d16c2f8
AL
3076typedef struct {
3077 void *buffer;
3078 target_phys_addr_t addr;
3079 target_phys_addr_t len;
3080} BounceBuffer;
3081
3082static BounceBuffer bounce;
3083
ba223c29
AL
3084typedef struct MapClient {
3085 void *opaque;
3086 void (*callback)(void *opaque);
3087 LIST_ENTRY(MapClient) link;
3088} MapClient;
3089
3090static LIST_HEAD(map_client_list, MapClient) map_client_list
3091 = LIST_HEAD_INITIALIZER(map_client_list);
3092
3093void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3094{
3095 MapClient *client = qemu_malloc(sizeof(*client));
3096
3097 client->opaque = opaque;
3098 client->callback = callback;
3099 LIST_INSERT_HEAD(&map_client_list, client, link);
3100 return client;
3101}
3102
3103void cpu_unregister_map_client(void *_client)
3104{
3105 MapClient *client = (MapClient *)_client;
3106
3107 LIST_REMOVE(client, link);
3108}
3109
3110static void cpu_notify_map_clients(void)
3111{
3112 MapClient *client;
3113
3114 while (!LIST_EMPTY(&map_client_list)) {
3115 client = LIST_FIRST(&map_client_list);
3116 client->callback(client->opaque);
3117 LIST_REMOVE(client, link);
3118 }
3119}
3120
6d16c2f8
AL
3121/* Map a physical memory region into a host virtual address.
3122 * May map a subset of the requested range, given by and returned in *plen.
3123 * May return NULL if resources needed to perform the mapping are exhausted.
3124 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3125 * Use cpu_register_map_client() to know when retrying the map operation is
3126 * likely to succeed.
6d16c2f8
AL
3127 */
3128void *cpu_physical_memory_map(target_phys_addr_t addr,
3129 target_phys_addr_t *plen,
3130 int is_write)
3131{
3132 target_phys_addr_t len = *plen;
3133 target_phys_addr_t done = 0;
3134 int l;
3135 uint8_t *ret = NULL;
3136 uint8_t *ptr;
3137 target_phys_addr_t page;
3138 unsigned long pd;
3139 PhysPageDesc *p;
3140 unsigned long addr1;
3141
3142 while (len > 0) {
3143 page = addr & TARGET_PAGE_MASK;
3144 l = (page + TARGET_PAGE_SIZE) - addr;
3145 if (l > len)
3146 l = len;
3147 p = phys_page_find(page >> TARGET_PAGE_BITS);
3148 if (!p) {
3149 pd = IO_MEM_UNASSIGNED;
3150 } else {
3151 pd = p->phys_offset;
3152 }
3153
3154 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3155 if (done || bounce.buffer) {
3156 break;
3157 }
3158 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3159 bounce.addr = addr;
3160 bounce.len = l;
3161 if (!is_write) {
3162 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3163 }
3164 ptr = bounce.buffer;
3165 } else {
3166 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3167 ptr = phys_ram_base + addr1;
3168 }
3169 if (!done) {
3170 ret = ptr;
3171 } else if (ret + done != ptr) {
3172 break;
3173 }
3174
3175 len -= l;
3176 addr += l;
3177 done += l;
3178 }
3179 *plen = done;
3180 return ret;
3181}
3182
3183/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3184 * Will also mark the memory as dirty if is_write == 1. access_len gives
3185 * the amount of memory that was actually read or written by the caller.
3186 */
3187void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3188 int is_write, target_phys_addr_t access_len)
3189{
3190 if (buffer != bounce.buffer) {
3191 if (is_write) {
3192 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3193 while (access_len) {
3194 unsigned l;
3195 l = TARGET_PAGE_SIZE;
3196 if (l > access_len)
3197 l = access_len;
3198 if (!cpu_physical_memory_is_dirty(addr1)) {
3199 /* invalidate code */
3200 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3201 /* set dirty bit */
3202 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3203 (0xff & ~CODE_DIRTY_FLAG);
3204 }
3205 addr1 += l;
3206 access_len -= l;
3207 }
3208 }
3209 return;
3210 }
3211 if (is_write) {
3212 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3213 }
3214 qemu_free(bounce.buffer);
3215 bounce.buffer = NULL;
ba223c29 3216 cpu_notify_map_clients();
6d16c2f8 3217}
d0ecd2aa 3218
8df1cd07
FB
3219/* warning: addr must be aligned */
3220uint32_t ldl_phys(target_phys_addr_t addr)
3221{
3222 int io_index;
3223 uint8_t *ptr;
3224 uint32_t val;
3225 unsigned long pd;
3226 PhysPageDesc *p;
3227
3228 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3229 if (!p) {
3230 pd = IO_MEM_UNASSIGNED;
3231 } else {
3232 pd = p->phys_offset;
3233 }
3b46e624 3234
5fafdf24 3235 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3236 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3237 /* I/O case */
3238 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3239 if (p)
3240 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3241 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3242 } else {
3243 /* RAM case */
5fafdf24 3244 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3245 (addr & ~TARGET_PAGE_MASK);
3246 val = ldl_p(ptr);
3247 }
3248 return val;
3249}
3250
84b7b8e7
FB
3251/* warning: addr must be aligned */
3252uint64_t ldq_phys(target_phys_addr_t addr)
3253{
3254 int io_index;
3255 uint8_t *ptr;
3256 uint64_t val;
3257 unsigned long pd;
3258 PhysPageDesc *p;
3259
3260 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3261 if (!p) {
3262 pd = IO_MEM_UNASSIGNED;
3263 } else {
3264 pd = p->phys_offset;
3265 }
3b46e624 3266
2a4188a3
FB
3267 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3268 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3269 /* I/O case */
3270 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3271 if (p)
3272 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
3273#ifdef TARGET_WORDS_BIGENDIAN
3274 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3275 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3276#else
3277 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3278 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3279#endif
3280 } else {
3281 /* RAM case */
5fafdf24 3282 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3283 (addr & ~TARGET_PAGE_MASK);
3284 val = ldq_p(ptr);
3285 }
3286 return val;
3287}
3288
aab33094
FB
3289/* XXX: optimize */
3290uint32_t ldub_phys(target_phys_addr_t addr)
3291{
3292 uint8_t val;
3293 cpu_physical_memory_read(addr, &val, 1);
3294 return val;
3295}
3296
3297/* XXX: optimize */
3298uint32_t lduw_phys(target_phys_addr_t addr)
3299{
3300 uint16_t val;
3301 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3302 return tswap16(val);
3303}
3304
8df1cd07
FB
3305/* warning: addr must be aligned. The ram page is not masked as dirty
3306 and the code inside is not invalidated. It is useful if the dirty
3307 bits are used to track modified PTEs */
3308void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3309{
3310 int io_index;
3311 uint8_t *ptr;
3312 unsigned long pd;
3313 PhysPageDesc *p;
3314
3315 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3316 if (!p) {
3317 pd = IO_MEM_UNASSIGNED;
3318 } else {
3319 pd = p->phys_offset;
3320 }
3b46e624 3321
3a7d929e 3322 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3323 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3324 if (p)
3325 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3326 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3327 } else {
74576198
AL
3328 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3329 ptr = phys_ram_base + addr1;
8df1cd07 3330 stl_p(ptr, val);
74576198
AL
3331
3332 if (unlikely(in_migration)) {
3333 if (!cpu_physical_memory_is_dirty(addr1)) {
3334 /* invalidate code */
3335 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3336 /* set dirty bit */
3337 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3338 (0xff & ~CODE_DIRTY_FLAG);
3339 }
3340 }
8df1cd07
FB
3341 }
3342}
3343
bc98a7ef
JM
3344void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3345{
3346 int io_index;
3347 uint8_t *ptr;
3348 unsigned long pd;
3349 PhysPageDesc *p;
3350
3351 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3352 if (!p) {
3353 pd = IO_MEM_UNASSIGNED;
3354 } else {
3355 pd = p->phys_offset;
3356 }
3b46e624 3357
bc98a7ef
JM
3358 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3359 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3360 if (p)
3361 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
3362#ifdef TARGET_WORDS_BIGENDIAN
3363 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3364 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3365#else
3366 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3367 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3368#endif
3369 } else {
5fafdf24 3370 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3371 (addr & ~TARGET_PAGE_MASK);
3372 stq_p(ptr, val);
3373 }
3374}
3375
8df1cd07 3376/* warning: addr must be aligned */
8df1cd07
FB
3377void stl_phys(target_phys_addr_t addr, uint32_t val)
3378{
3379 int io_index;
3380 uint8_t *ptr;
3381 unsigned long pd;
3382 PhysPageDesc *p;
3383
3384 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3385 if (!p) {
3386 pd = IO_MEM_UNASSIGNED;
3387 } else {
3388 pd = p->phys_offset;
3389 }
3b46e624 3390
3a7d929e 3391 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3392 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3393 if (p)
3394 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3395 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3396 } else {
3397 unsigned long addr1;
3398 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3399 /* RAM case */
3400 ptr = phys_ram_base + addr1;
3401 stl_p(ptr, val);
3a7d929e
FB
3402 if (!cpu_physical_memory_is_dirty(addr1)) {
3403 /* invalidate code */
3404 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3405 /* set dirty bit */
f23db169
FB
3406 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3407 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3408 }
8df1cd07
FB
3409 }
3410}
3411
aab33094
FB
3412/* XXX: optimize */
3413void stb_phys(target_phys_addr_t addr, uint32_t val)
3414{
3415 uint8_t v = val;
3416 cpu_physical_memory_write(addr, &v, 1);
3417}
3418
3419/* XXX: optimize */
3420void stw_phys(target_phys_addr_t addr, uint32_t val)
3421{
3422 uint16_t v = tswap16(val);
3423 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3424}
3425
3426/* XXX: optimize */
3427void stq_phys(target_phys_addr_t addr, uint64_t val)
3428{
3429 val = tswap64(val);
3430 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3431}
3432
13eb76e0
FB
3433#endif
3434
3435/* virtual memory access for debug */
5fafdf24 3436int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3437 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3438{
3439 int l;
9b3c35e0
JM
3440 target_phys_addr_t phys_addr;
3441 target_ulong page;
13eb76e0
FB
3442
3443 while (len > 0) {
3444 page = addr & TARGET_PAGE_MASK;
3445 phys_addr = cpu_get_phys_page_debug(env, page);
3446 /* if no physical page mapped, return an error */
3447 if (phys_addr == -1)
3448 return -1;
3449 l = (page + TARGET_PAGE_SIZE) - addr;
3450 if (l > len)
3451 l = len;
5fafdf24 3452 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3453 buf, l, is_write);
13eb76e0
FB
3454 len -= l;
3455 buf += l;
3456 addr += l;
3457 }
3458 return 0;
3459}
3460
2e70f6ef
PB
3461/* in deterministic execution mode, instructions doing device I/Os
3462 must be at the end of the TB */
3463void cpu_io_recompile(CPUState *env, void *retaddr)
3464{
3465 TranslationBlock *tb;
3466 uint32_t n, cflags;
3467 target_ulong pc, cs_base;
3468 uint64_t flags;
3469
3470 tb = tb_find_pc((unsigned long)retaddr);
3471 if (!tb) {
3472 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3473 retaddr);
3474 }
3475 n = env->icount_decr.u16.low + tb->icount;
3476 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3477 /* Calculate how many instructions had been executed before the fault
bf20dc07 3478 occurred. */
2e70f6ef
PB
3479 n = n - env->icount_decr.u16.low;
3480 /* Generate a new TB ending on the I/O insn. */
3481 n++;
3482 /* On MIPS and SH, delay slot instructions can only be restarted if
3483 they were already the first instruction in the TB. If this is not
bf20dc07 3484 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3485 branch. */
3486#if defined(TARGET_MIPS)
3487 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3488 env->active_tc.PC -= 4;
3489 env->icount_decr.u16.low++;
3490 env->hflags &= ~MIPS_HFLAG_BMASK;
3491 }
3492#elif defined(TARGET_SH4)
3493 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3494 && n > 1) {
3495 env->pc -= 2;
3496 env->icount_decr.u16.low++;
3497 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3498 }
3499#endif
3500 /* This should never happen. */
3501 if (n > CF_COUNT_MASK)
3502 cpu_abort(env, "TB too big during recompile");
3503
3504 cflags = n | CF_LAST_IO;
3505 pc = tb->pc;
3506 cs_base = tb->cs_base;
3507 flags = tb->flags;
3508 tb_phys_invalidate(tb, -1);
3509 /* FIXME: In theory this could raise an exception. In practice
3510 we have already translated the block once so it's probably ok. */
3511 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3512 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3513 the first in the TB) then we end up generating a whole new TB and
3514 repeating the fault, which is horribly inefficient.
3515 Better would be to execute just this insn uncached, or generate a
3516 second new TB. */
3517 cpu_resume_from_signal(env, NULL);
3518}
3519
e3db7226
FB
3520void dump_exec_info(FILE *f,
3521 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3522{
3523 int i, target_code_size, max_target_code_size;
3524 int direct_jmp_count, direct_jmp2_count, cross_page;
3525 TranslationBlock *tb;
3b46e624 3526
e3db7226
FB
3527 target_code_size = 0;
3528 max_target_code_size = 0;
3529 cross_page = 0;
3530 direct_jmp_count = 0;
3531 direct_jmp2_count = 0;
3532 for(i = 0; i < nb_tbs; i++) {
3533 tb = &tbs[i];
3534 target_code_size += tb->size;
3535 if (tb->size > max_target_code_size)
3536 max_target_code_size = tb->size;
3537 if (tb->page_addr[1] != -1)
3538 cross_page++;
3539 if (tb->tb_next_offset[0] != 0xffff) {
3540 direct_jmp_count++;
3541 if (tb->tb_next_offset[1] != 0xffff) {
3542 direct_jmp2_count++;
3543 }
3544 }
3545 }
3546 /* XXX: avoid using doubles ? */
57fec1fe 3547 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3548 cpu_fprintf(f, "gen code size %ld/%ld\n",
3549 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3550 cpu_fprintf(f, "TB count %d/%d\n",
3551 nb_tbs, code_gen_max_blocks);
5fafdf24 3552 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3553 nb_tbs ? target_code_size / nb_tbs : 0,
3554 max_target_code_size);
5fafdf24 3555 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3556 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3557 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3558 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3559 cross_page,
e3db7226
FB
3560 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3561 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3562 direct_jmp_count,
e3db7226
FB
3563 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3564 direct_jmp2_count,
3565 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3566 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3567 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3568 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3569 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3570 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3571}
3572
5fafdf24 3573#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3574
3575#define MMUSUFFIX _cmmu
3576#define GETPC() NULL
3577#define env cpu_single_env
b769d8fe 3578#define SOFTMMU_CODE_ACCESS
61382a50
FB
3579
3580#define SHIFT 0
3581#include "softmmu_template.h"
3582
3583#define SHIFT 1
3584#include "softmmu_template.h"
3585
3586#define SHIFT 2
3587#include "softmmu_template.h"
3588
3589#define SHIFT 3
3590#include "softmmu_template.h"
3591
3592#undef env
3593
3594#endif