]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
BSD user: initial support for i386 and x86_64 targets
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
54936004 19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
ca10f867 37#include "qemu-common.h"
b67d9a52 38#include "tcg.h"
b3c7724c 39#include "hw/hw.h"
74576198 40#include "osdep.h"
7ba1e619 41#include "kvm.h"
53a5960a
PB
42#if defined(CONFIG_USER_ONLY)
43#include <qemu.h>
44#endif
54936004 45
fd6ce8f6 46//#define DEBUG_TB_INVALIDATE
66e85a21 47//#define DEBUG_FLUSH
9fa3e853 48//#define DEBUG_TLB
67d3b957 49//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
50
51/* make various TB consistency checks */
5fafdf24
TS
52//#define DEBUG_TB_CHECK
53//#define DEBUG_TLB_CHECK
fd6ce8f6 54
1196be37 55//#define DEBUG_IOPORT
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
99773bd4
PB
58#if !defined(CONFIG_USER_ONLY)
59/* TB consistency checks only implemented for usermode emulation. */
60#undef DEBUG_TB_CHECK
61#endif
62
9fa3e853
FB
63#define SMC_BITMAP_USE_THRESHOLD 10
64
108c49b8
FB
65#if defined(TARGET_SPARC64)
66#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
67#elif defined(TARGET_SPARC)
68#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
69#elif defined(TARGET_ALPHA)
70#define TARGET_PHYS_ADDR_SPACE_BITS 42
71#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
72#elif defined(TARGET_PPC64)
73#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
74#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
75#define TARGET_PHYS_ADDR_SPACE_BITS 42
76#elif defined(TARGET_I386) && !defined(USE_KQEMU)
77#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
78#else
79/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80#define TARGET_PHYS_ADDR_SPACE_BITS 32
81#endif
82
bdaf78e0 83static TranslationBlock *tbs;
26a5f13b 84int code_gen_max_blocks;
9fa3e853 85TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 86static int nb_tbs;
eb51d102
FB
87/* any access to the tbs or the page table must use this lock */
88spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 89
141ac468
BS
90#if defined(__arm__) || defined(__sparc_v9__)
91/* The prologue must be reachable with a direct jump. ARM and Sparc64
92 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
93 section close to code segment. */
94#define code_gen_section \
95 __attribute__((__section__(".gen_code"))) \
96 __attribute__((aligned (32)))
97#else
98#define code_gen_section \
99 __attribute__((aligned (32)))
100#endif
101
102uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
103static uint8_t *code_gen_buffer;
104static unsigned long code_gen_buffer_size;
26a5f13b 105/* threshold to flush the translated code buffer */
bdaf78e0 106static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
107uint8_t *code_gen_ptr;
108
e2eef170 109#if !defined(CONFIG_USER_ONLY)
00f82b8a 110ram_addr_t phys_ram_size;
9fa3e853
FB
111int phys_ram_fd;
112uint8_t *phys_ram_base;
1ccde1cb 113uint8_t *phys_ram_dirty;
74576198 114static int in_migration;
e9a1ab19 115static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 116#endif
9fa3e853 117
6a00d601
FB
118CPUState *first_cpu;
119/* current CPU in the current thread. It is only valid inside
120 cpu_exec() */
5fafdf24 121CPUState *cpu_single_env;
2e70f6ef 122/* 0 = Do not count executed instructions.
bf20dc07 123 1 = Precise instruction counting.
2e70f6ef
PB
124 2 = Adaptive rate instruction counting. */
125int use_icount = 0;
126/* Current instruction counter. While executing translated code this may
127 include some instructions that have not yet been executed. */
128int64_t qemu_icount;
6a00d601 129
54936004 130typedef struct PageDesc {
92e873b9 131 /* list of TBs intersecting this ram page */
fd6ce8f6 132 TranslationBlock *first_tb;
9fa3e853
FB
133 /* in order to optimize self modifying code, we count the number
134 of lookups we do to a given page to use a bitmap */
135 unsigned int code_write_count;
136 uint8_t *code_bitmap;
137#if defined(CONFIG_USER_ONLY)
138 unsigned long flags;
139#endif
54936004
FB
140} PageDesc;
141
92e873b9 142typedef struct PhysPageDesc {
0f459d16 143 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 144 ram_addr_t phys_offset;
8da3ff18 145 ram_addr_t region_offset;
92e873b9
FB
146} PhysPageDesc;
147
54936004 148#define L2_BITS 10
bedb69ea
JM
149#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
150/* XXX: this is a temporary hack for alpha target.
151 * In the future, this is to be replaced by a multi-level table
152 * to actually be able to handle the complete 64 bits address space.
153 */
154#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
155#else
03875444 156#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 157#endif
54936004
FB
158
159#define L1_SIZE (1 << L1_BITS)
160#define L2_SIZE (1 << L2_BITS)
161
83fb7adf
FB
162unsigned long qemu_real_host_page_size;
163unsigned long qemu_host_page_bits;
164unsigned long qemu_host_page_size;
165unsigned long qemu_host_page_mask;
54936004 166
92e873b9 167/* XXX: for system emulation, it could just be an array */
54936004 168static PageDesc *l1_map[L1_SIZE];
bdaf78e0 169static PhysPageDesc **l1_phys_map;
54936004 170
e2eef170
PB
171#if !defined(CONFIG_USER_ONLY)
172static void io_mem_init(void);
173
33417e70 174/* io memory support */
33417e70
FB
175CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
176CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 177void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 178static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
179static int io_mem_watch;
180#endif
33417e70 181
34865134 182/* log support */
d9b630fd 183static const char *logfilename = "/tmp/qemu.log";
34865134
FB
184FILE *logfile;
185int loglevel;
e735b91c 186static int log_append = 0;
34865134 187
e3db7226
FB
188/* statistics */
189static int tlb_flush_count;
190static int tb_flush_count;
191static int tb_phys_invalidate_count;
192
db7b5426
BS
193#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
194typedef struct subpage_t {
195 target_phys_addr_t base;
3ee89922
BS
196 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
197 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
198 void *opaque[TARGET_PAGE_SIZE][2][4];
8da3ff18 199 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
200} subpage_t;
201
7cb69cae
FB
202#ifdef _WIN32
203static void map_exec(void *addr, long size)
204{
205 DWORD old_protect;
206 VirtualProtect(addr, size,
207 PAGE_EXECUTE_READWRITE, &old_protect);
208
209}
210#else
211static void map_exec(void *addr, long size)
212{
4369415f 213 unsigned long start, end, page_size;
7cb69cae 214
4369415f 215 page_size = getpagesize();
7cb69cae 216 start = (unsigned long)addr;
4369415f 217 start &= ~(page_size - 1);
7cb69cae
FB
218
219 end = (unsigned long)addr + size;
4369415f
FB
220 end += page_size - 1;
221 end &= ~(page_size - 1);
7cb69cae
FB
222
223 mprotect((void *)start, end - start,
224 PROT_READ | PROT_WRITE | PROT_EXEC);
225}
226#endif
227
b346ff46 228static void page_init(void)
54936004 229{
83fb7adf 230 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 231 TARGET_PAGE_SIZE */
c2b48b69
AL
232#ifdef _WIN32
233 {
234 SYSTEM_INFO system_info;
235
236 GetSystemInfo(&system_info);
237 qemu_real_host_page_size = system_info.dwPageSize;
238 }
239#else
240 qemu_real_host_page_size = getpagesize();
241#endif
83fb7adf
FB
242 if (qemu_host_page_size == 0)
243 qemu_host_page_size = qemu_real_host_page_size;
244 if (qemu_host_page_size < TARGET_PAGE_SIZE)
245 qemu_host_page_size = TARGET_PAGE_SIZE;
246 qemu_host_page_bits = 0;
247 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
248 qemu_host_page_bits++;
249 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
250 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
251 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
252
253#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
254 {
255 long long startaddr, endaddr;
256 FILE *f;
257 int n;
258
c8a706fe 259 mmap_lock();
0776590d 260 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
261 f = fopen("/proc/self/maps", "r");
262 if (f) {
263 do {
264 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
265 if (n == 2) {
e0b8d65a
BS
266 startaddr = MIN(startaddr,
267 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
268 endaddr = MIN(endaddr,
269 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 270 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
271 TARGET_PAGE_ALIGN(endaddr),
272 PAGE_RESERVED);
273 }
274 } while (!feof(f));
275 fclose(f);
276 }
c8a706fe 277 mmap_unlock();
50a9569b
AZ
278 }
279#endif
54936004
FB
280}
281
434929bf 282static inline PageDesc **page_l1_map(target_ulong index)
54936004 283{
17e2377a
PB
284#if TARGET_LONG_BITS > 32
285 /* Host memory outside guest VM. For 32-bit targets we have already
286 excluded high addresses. */
d8173e0f 287 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
288 return NULL;
289#endif
434929bf
AL
290 return &l1_map[index >> L2_BITS];
291}
292
293static inline PageDesc *page_find_alloc(target_ulong index)
294{
295 PageDesc **lp, *p;
296 lp = page_l1_map(index);
297 if (!lp)
298 return NULL;
299
54936004
FB
300 p = *lp;
301 if (!p) {
302 /* allocate if not found */
17e2377a 303#if defined(CONFIG_USER_ONLY)
17e2377a
PB
304 size_t len = sizeof(PageDesc) * L2_SIZE;
305 /* Don't use qemu_malloc because it may recurse. */
306 p = mmap(0, len, PROT_READ | PROT_WRITE,
307 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 308 *lp = p;
fb1c2cd7
AJ
309 if (h2g_valid(p)) {
310 unsigned long addr = h2g(p);
17e2377a
PB
311 page_set_flags(addr & TARGET_PAGE_MASK,
312 TARGET_PAGE_ALIGN(addr + len),
313 PAGE_RESERVED);
314 }
315#else
316 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
317 *lp = p;
318#endif
54936004
FB
319 }
320 return p + (index & (L2_SIZE - 1));
321}
322
00f82b8a 323static inline PageDesc *page_find(target_ulong index)
54936004 324{
434929bf
AL
325 PageDesc **lp, *p;
326 lp = page_l1_map(index);
327 if (!lp)
328 return NULL;
54936004 329
434929bf 330 p = *lp;
54936004
FB
331 if (!p)
332 return 0;
fd6ce8f6
FB
333 return p + (index & (L2_SIZE - 1));
334}
335
108c49b8 336static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 337{
108c49b8 338 void **lp, **p;
e3f4e2a4 339 PhysPageDesc *pd;
92e873b9 340
108c49b8
FB
341 p = (void **)l1_phys_map;
342#if TARGET_PHYS_ADDR_SPACE_BITS > 32
343
344#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
345#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
346#endif
347 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
348 p = *lp;
349 if (!p) {
350 /* allocate if not found */
108c49b8
FB
351 if (!alloc)
352 return NULL;
353 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
354 memset(p, 0, sizeof(void *) * L1_SIZE);
355 *lp = p;
356 }
357#endif
358 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
359 pd = *lp;
360 if (!pd) {
361 int i;
108c49b8
FB
362 /* allocate if not found */
363 if (!alloc)
364 return NULL;
e3f4e2a4
PB
365 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
366 *lp = pd;
67c4d23c 367 for (i = 0; i < L2_SIZE; i++) {
e3f4e2a4 368 pd[i].phys_offset = IO_MEM_UNASSIGNED;
67c4d23c
PB
369 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
370 }
92e873b9 371 }
e3f4e2a4 372 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
373}
374
108c49b8 375static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 376{
108c49b8 377 return phys_page_find_alloc(index, 0);
92e873b9
FB
378}
379
9fa3e853 380#if !defined(CONFIG_USER_ONLY)
6a00d601 381static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 382static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 383 target_ulong vaddr);
c8a706fe
PB
384#define mmap_lock() do { } while(0)
385#define mmap_unlock() do { } while(0)
9fa3e853 386#endif
fd6ce8f6 387
4369415f
FB
388#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
389
390#if defined(CONFIG_USER_ONLY)
391/* Currently it is not recommanded to allocate big chunks of data in
392 user mode. It will change when a dedicated libc will be used */
393#define USE_STATIC_CODE_GEN_BUFFER
394#endif
395
396#ifdef USE_STATIC_CODE_GEN_BUFFER
397static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
398#endif
399
8fcd3692 400static void code_gen_alloc(unsigned long tb_size)
26a5f13b 401{
4369415f
FB
402#ifdef USE_STATIC_CODE_GEN_BUFFER
403 code_gen_buffer = static_code_gen_buffer;
404 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
405 map_exec(code_gen_buffer, code_gen_buffer_size);
406#else
26a5f13b
FB
407 code_gen_buffer_size = tb_size;
408 if (code_gen_buffer_size == 0) {
4369415f
FB
409#if defined(CONFIG_USER_ONLY)
410 /* in user mode, phys_ram_size is not meaningful */
411 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
412#else
26a5f13b 413 /* XXX: needs ajustments */
174a9a1f 414 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
4369415f 415#endif
26a5f13b
FB
416 }
417 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
418 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
419 /* The code gen buffer location may have constraints depending on
420 the host cpu and OS */
421#if defined(__linux__)
422 {
423 int flags;
141ac468
BS
424 void *start = NULL;
425
26a5f13b
FB
426 flags = MAP_PRIVATE | MAP_ANONYMOUS;
427#if defined(__x86_64__)
428 flags |= MAP_32BIT;
429 /* Cannot map more than that */
430 if (code_gen_buffer_size > (800 * 1024 * 1024))
431 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
432#elif defined(__sparc_v9__)
433 // Map the buffer below 2G, so we can use direct calls and branches
434 flags |= MAP_FIXED;
435 start = (void *) 0x60000000UL;
436 if (code_gen_buffer_size > (512 * 1024 * 1024))
437 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 438#elif defined(__arm__)
63d41246 439 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
440 flags |= MAP_FIXED;
441 start = (void *) 0x01000000UL;
442 if (code_gen_buffer_size > 16 * 1024 * 1024)
443 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 444#endif
141ac468
BS
445 code_gen_buffer = mmap(start, code_gen_buffer_size,
446 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
447 flags, -1, 0);
448 if (code_gen_buffer == MAP_FAILED) {
449 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
450 exit(1);
451 }
452 }
c5e97233 453#elif defined(__FreeBSD__) || defined(__DragonFly__)
06e67a82
AL
454 {
455 int flags;
456 void *addr = NULL;
457 flags = MAP_PRIVATE | MAP_ANONYMOUS;
458#if defined(__x86_64__)
459 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
460 * 0x40000000 is free */
461 flags |= MAP_FIXED;
462 addr = (void *)0x40000000;
463 /* Cannot map more than that */
464 if (code_gen_buffer_size > (800 * 1024 * 1024))
465 code_gen_buffer_size = (800 * 1024 * 1024);
466#endif
467 code_gen_buffer = mmap(addr, code_gen_buffer_size,
468 PROT_WRITE | PROT_READ | PROT_EXEC,
469 flags, -1, 0);
470 if (code_gen_buffer == MAP_FAILED) {
471 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
472 exit(1);
473 }
474 }
26a5f13b
FB
475#else
476 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
477 map_exec(code_gen_buffer, code_gen_buffer_size);
478#endif
4369415f 479#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
480 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
481 code_gen_buffer_max_size = code_gen_buffer_size -
482 code_gen_max_block_size();
483 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
484 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
485}
486
487/* Must be called before using the QEMU cpus. 'tb_size' is the size
488 (in bytes) allocated to the translation buffer. Zero means default
489 size. */
490void cpu_exec_init_all(unsigned long tb_size)
491{
26a5f13b
FB
492 cpu_gen_init();
493 code_gen_alloc(tb_size);
494 code_gen_ptr = code_gen_buffer;
4369415f 495 page_init();
e2eef170 496#if !defined(CONFIG_USER_ONLY)
26a5f13b 497 io_mem_init();
e2eef170 498#endif
26a5f13b
FB
499}
500
9656f324
PB
501#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
502
503#define CPU_COMMON_SAVE_VERSION 1
504
505static void cpu_common_save(QEMUFile *f, void *opaque)
506{
507 CPUState *env = opaque;
508
509 qemu_put_be32s(f, &env->halted);
510 qemu_put_be32s(f, &env->interrupt_request);
511}
512
513static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
514{
515 CPUState *env = opaque;
516
517 if (version_id != CPU_COMMON_SAVE_VERSION)
518 return -EINVAL;
519
520 qemu_get_be32s(f, &env->halted);
75f482ae 521 qemu_get_be32s(f, &env->interrupt_request);
3098dba0
AJ
522 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
523 version_id is increased. */
524 env->interrupt_request &= ~0x01;
9656f324
PB
525 tlb_flush(env, 1);
526
527 return 0;
528}
529#endif
530
6a00d601 531void cpu_exec_init(CPUState *env)
fd6ce8f6 532{
6a00d601
FB
533 CPUState **penv;
534 int cpu_index;
535
c2764719
PB
536#if defined(CONFIG_USER_ONLY)
537 cpu_list_lock();
538#endif
6a00d601
FB
539 env->next_cpu = NULL;
540 penv = &first_cpu;
541 cpu_index = 0;
542 while (*penv != NULL) {
543 penv = (CPUState **)&(*penv)->next_cpu;
544 cpu_index++;
545 }
546 env->cpu_index = cpu_index;
c0ce998e
AL
547 TAILQ_INIT(&env->breakpoints);
548 TAILQ_INIT(&env->watchpoints);
6a00d601 549 *penv = env;
c2764719
PB
550#if defined(CONFIG_USER_ONLY)
551 cpu_list_unlock();
552#endif
b3c7724c 553#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
554 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
555 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
556 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
557 cpu_save, cpu_load, env);
558#endif
fd6ce8f6
FB
559}
560
9fa3e853
FB
561static inline void invalidate_page_bitmap(PageDesc *p)
562{
563 if (p->code_bitmap) {
59817ccb 564 qemu_free(p->code_bitmap);
9fa3e853
FB
565 p->code_bitmap = NULL;
566 }
567 p->code_write_count = 0;
568}
569
fd6ce8f6
FB
570/* set to NULL all the 'first_tb' fields in all PageDescs */
571static void page_flush_tb(void)
572{
573 int i, j;
574 PageDesc *p;
575
576 for(i = 0; i < L1_SIZE; i++) {
577 p = l1_map[i];
578 if (p) {
9fa3e853
FB
579 for(j = 0; j < L2_SIZE; j++) {
580 p->first_tb = NULL;
581 invalidate_page_bitmap(p);
582 p++;
583 }
fd6ce8f6
FB
584 }
585 }
586}
587
588/* flush all the translation blocks */
d4e8164f 589/* XXX: tb_flush is currently not thread safe */
6a00d601 590void tb_flush(CPUState *env1)
fd6ce8f6 591{
6a00d601 592 CPUState *env;
0124311e 593#if defined(DEBUG_FLUSH)
ab3d1727
BS
594 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
595 (unsigned long)(code_gen_ptr - code_gen_buffer),
596 nb_tbs, nb_tbs > 0 ?
597 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 598#endif
26a5f13b 599 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
600 cpu_abort(env1, "Internal error: code buffer overflow\n");
601
fd6ce8f6 602 nb_tbs = 0;
3b46e624 603
6a00d601
FB
604 for(env = first_cpu; env != NULL; env = env->next_cpu) {
605 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
606 }
9fa3e853 607
8a8a608f 608 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 609 page_flush_tb();
9fa3e853 610
fd6ce8f6 611 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
612 /* XXX: flush processor icache at this point if cache flush is
613 expensive */
e3db7226 614 tb_flush_count++;
fd6ce8f6
FB
615}
616
617#ifdef DEBUG_TB_CHECK
618
bc98a7ef 619static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
620{
621 TranslationBlock *tb;
622 int i;
623 address &= TARGET_PAGE_MASK;
99773bd4
PB
624 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
625 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
626 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
627 address >= tb->pc + tb->size)) {
628 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 629 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
630 }
631 }
632 }
633}
634
635/* verify that all the pages have correct rights for code */
636static void tb_page_check(void)
637{
638 TranslationBlock *tb;
639 int i, flags1, flags2;
3b46e624 640
99773bd4
PB
641 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
642 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
643 flags1 = page_get_flags(tb->pc);
644 flags2 = page_get_flags(tb->pc + tb->size - 1);
645 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
646 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 647 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
648 }
649 }
650 }
651}
652
bdaf78e0 653static void tb_jmp_check(TranslationBlock *tb)
d4e8164f
FB
654{
655 TranslationBlock *tb1;
656 unsigned int n1;
657
658 /* suppress any remaining jumps to this TB */
659 tb1 = tb->jmp_first;
660 for(;;) {
661 n1 = (long)tb1 & 3;
662 tb1 = (TranslationBlock *)((long)tb1 & ~3);
663 if (n1 == 2)
664 break;
665 tb1 = tb1->jmp_next[n1];
666 }
667 /* check end of list */
668 if (tb1 != tb) {
669 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
670 }
671}
672
fd6ce8f6
FB
673#endif
674
675/* invalidate one TB */
676static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
677 int next_offset)
678{
679 TranslationBlock *tb1;
680 for(;;) {
681 tb1 = *ptb;
682 if (tb1 == tb) {
683 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
684 break;
685 }
686 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
687 }
688}
689
9fa3e853
FB
690static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
691{
692 TranslationBlock *tb1;
693 unsigned int n1;
694
695 for(;;) {
696 tb1 = *ptb;
697 n1 = (long)tb1 & 3;
698 tb1 = (TranslationBlock *)((long)tb1 & ~3);
699 if (tb1 == tb) {
700 *ptb = tb1->page_next[n1];
701 break;
702 }
703 ptb = &tb1->page_next[n1];
704 }
705}
706
d4e8164f
FB
707static inline void tb_jmp_remove(TranslationBlock *tb, int n)
708{
709 TranslationBlock *tb1, **ptb;
710 unsigned int n1;
711
712 ptb = &tb->jmp_next[n];
713 tb1 = *ptb;
714 if (tb1) {
715 /* find tb(n) in circular list */
716 for(;;) {
717 tb1 = *ptb;
718 n1 = (long)tb1 & 3;
719 tb1 = (TranslationBlock *)((long)tb1 & ~3);
720 if (n1 == n && tb1 == tb)
721 break;
722 if (n1 == 2) {
723 ptb = &tb1->jmp_first;
724 } else {
725 ptb = &tb1->jmp_next[n1];
726 }
727 }
728 /* now we can suppress tb(n) from the list */
729 *ptb = tb->jmp_next[n];
730
731 tb->jmp_next[n] = NULL;
732 }
733}
734
735/* reset the jump entry 'n' of a TB so that it is not chained to
736 another TB */
737static inline void tb_reset_jump(TranslationBlock *tb, int n)
738{
739 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
740}
741
2e70f6ef 742void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 743{
6a00d601 744 CPUState *env;
8a40a180 745 PageDesc *p;
d4e8164f 746 unsigned int h, n1;
00f82b8a 747 target_phys_addr_t phys_pc;
8a40a180 748 TranslationBlock *tb1, *tb2;
3b46e624 749
8a40a180
FB
750 /* remove the TB from the hash list */
751 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
752 h = tb_phys_hash_func(phys_pc);
5fafdf24 753 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
754 offsetof(TranslationBlock, phys_hash_next));
755
756 /* remove the TB from the page list */
757 if (tb->page_addr[0] != page_addr) {
758 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
759 tb_page_remove(&p->first_tb, tb);
760 invalidate_page_bitmap(p);
761 }
762 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
763 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
764 tb_page_remove(&p->first_tb, tb);
765 invalidate_page_bitmap(p);
766 }
767
36bdbe54 768 tb_invalidated_flag = 1;
59817ccb 769
fd6ce8f6 770 /* remove the TB from the hash list */
8a40a180 771 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
772 for(env = first_cpu; env != NULL; env = env->next_cpu) {
773 if (env->tb_jmp_cache[h] == tb)
774 env->tb_jmp_cache[h] = NULL;
775 }
d4e8164f
FB
776
777 /* suppress this TB from the two jump lists */
778 tb_jmp_remove(tb, 0);
779 tb_jmp_remove(tb, 1);
780
781 /* suppress any remaining jumps to this TB */
782 tb1 = tb->jmp_first;
783 for(;;) {
784 n1 = (long)tb1 & 3;
785 if (n1 == 2)
786 break;
787 tb1 = (TranslationBlock *)((long)tb1 & ~3);
788 tb2 = tb1->jmp_next[n1];
789 tb_reset_jump(tb1, n1);
790 tb1->jmp_next[n1] = NULL;
791 tb1 = tb2;
792 }
793 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 794
e3db7226 795 tb_phys_invalidate_count++;
9fa3e853
FB
796}
797
798static inline void set_bits(uint8_t *tab, int start, int len)
799{
800 int end, mask, end1;
801
802 end = start + len;
803 tab += start >> 3;
804 mask = 0xff << (start & 7);
805 if ((start & ~7) == (end & ~7)) {
806 if (start < end) {
807 mask &= ~(0xff << (end & 7));
808 *tab |= mask;
809 }
810 } else {
811 *tab++ |= mask;
812 start = (start + 8) & ~7;
813 end1 = end & ~7;
814 while (start < end1) {
815 *tab++ = 0xff;
816 start += 8;
817 }
818 if (start < end) {
819 mask = ~(0xff << (end & 7));
820 *tab |= mask;
821 }
822 }
823}
824
825static void build_page_bitmap(PageDesc *p)
826{
827 int n, tb_start, tb_end;
828 TranslationBlock *tb;
3b46e624 829
b2a7081a 830 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
831
832 tb = p->first_tb;
833 while (tb != NULL) {
834 n = (long)tb & 3;
835 tb = (TranslationBlock *)((long)tb & ~3);
836 /* NOTE: this is subtle as a TB may span two physical pages */
837 if (n == 0) {
838 /* NOTE: tb_end may be after the end of the page, but
839 it is not a problem */
840 tb_start = tb->pc & ~TARGET_PAGE_MASK;
841 tb_end = tb_start + tb->size;
842 if (tb_end > TARGET_PAGE_SIZE)
843 tb_end = TARGET_PAGE_SIZE;
844 } else {
845 tb_start = 0;
846 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
847 }
848 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
849 tb = tb->page_next[n];
850 }
851}
852
2e70f6ef
PB
853TranslationBlock *tb_gen_code(CPUState *env,
854 target_ulong pc, target_ulong cs_base,
855 int flags, int cflags)
d720b93d
FB
856{
857 TranslationBlock *tb;
858 uint8_t *tc_ptr;
859 target_ulong phys_pc, phys_page2, virt_page2;
860 int code_gen_size;
861
c27004ec
FB
862 phys_pc = get_phys_addr_code(env, pc);
863 tb = tb_alloc(pc);
d720b93d
FB
864 if (!tb) {
865 /* flush must be done */
866 tb_flush(env);
867 /* cannot fail at this point */
c27004ec 868 tb = tb_alloc(pc);
2e70f6ef
PB
869 /* Don't forget to invalidate previous TB info. */
870 tb_invalidated_flag = 1;
d720b93d
FB
871 }
872 tc_ptr = code_gen_ptr;
873 tb->tc_ptr = tc_ptr;
874 tb->cs_base = cs_base;
875 tb->flags = flags;
876 tb->cflags = cflags;
d07bde88 877 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 878 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 879
d720b93d 880 /* check next page if needed */
c27004ec 881 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 882 phys_page2 = -1;
c27004ec 883 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
884 phys_page2 = get_phys_addr_code(env, virt_page2);
885 }
886 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 887 return tb;
d720b93d 888}
3b46e624 889
9fa3e853
FB
890/* invalidate all TBs which intersect with the target physical page
891 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
892 the same physical page. 'is_cpu_write_access' should be true if called
893 from a real cpu write access: the virtual CPU will exit the current
894 TB if code is modified inside this TB. */
00f82b8a 895void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
896 int is_cpu_write_access)
897{
6b917547 898 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 899 CPUState *env = cpu_single_env;
9fa3e853 900 target_ulong tb_start, tb_end;
6b917547
AL
901 PageDesc *p;
902 int n;
903#ifdef TARGET_HAS_PRECISE_SMC
904 int current_tb_not_found = is_cpu_write_access;
905 TranslationBlock *current_tb = NULL;
906 int current_tb_modified = 0;
907 target_ulong current_pc = 0;
908 target_ulong current_cs_base = 0;
909 int current_flags = 0;
910#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
911
912 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 913 if (!p)
9fa3e853 914 return;
5fafdf24 915 if (!p->code_bitmap &&
d720b93d
FB
916 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
917 is_cpu_write_access) {
9fa3e853
FB
918 /* build code bitmap */
919 build_page_bitmap(p);
920 }
921
922 /* we remove all the TBs in the range [start, end[ */
923 /* XXX: see if in some cases it could be faster to invalidate all the code */
924 tb = p->first_tb;
925 while (tb != NULL) {
926 n = (long)tb & 3;
927 tb = (TranslationBlock *)((long)tb & ~3);
928 tb_next = tb->page_next[n];
929 /* NOTE: this is subtle as a TB may span two physical pages */
930 if (n == 0) {
931 /* NOTE: tb_end may be after the end of the page, but
932 it is not a problem */
933 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
934 tb_end = tb_start + tb->size;
935 } else {
936 tb_start = tb->page_addr[1];
937 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
938 }
939 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
940#ifdef TARGET_HAS_PRECISE_SMC
941 if (current_tb_not_found) {
942 current_tb_not_found = 0;
943 current_tb = NULL;
2e70f6ef 944 if (env->mem_io_pc) {
d720b93d 945 /* now we have a real cpu fault */
2e70f6ef 946 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
947 }
948 }
949 if (current_tb == tb &&
2e70f6ef 950 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
951 /* If we are modifying the current TB, we must stop
952 its execution. We could be more precise by checking
953 that the modification is after the current PC, but it
954 would require a specialized function to partially
955 restore the CPU state */
3b46e624 956
d720b93d 957 current_tb_modified = 1;
5fafdf24 958 cpu_restore_state(current_tb, env,
2e70f6ef 959 env->mem_io_pc, NULL);
6b917547
AL
960 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
961 &current_flags);
d720b93d
FB
962 }
963#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
964 /* we need to do that to handle the case where a signal
965 occurs while doing tb_phys_invalidate() */
966 saved_tb = NULL;
967 if (env) {
968 saved_tb = env->current_tb;
969 env->current_tb = NULL;
970 }
9fa3e853 971 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
972 if (env) {
973 env->current_tb = saved_tb;
974 if (env->interrupt_request && env->current_tb)
975 cpu_interrupt(env, env->interrupt_request);
976 }
9fa3e853
FB
977 }
978 tb = tb_next;
979 }
980#if !defined(CONFIG_USER_ONLY)
981 /* if no code remaining, no need to continue to use slow writes */
982 if (!p->first_tb) {
983 invalidate_page_bitmap(p);
d720b93d 984 if (is_cpu_write_access) {
2e70f6ef 985 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
986 }
987 }
988#endif
989#ifdef TARGET_HAS_PRECISE_SMC
990 if (current_tb_modified) {
991 /* we generate a block containing just the instruction
992 modifying the memory. It will ensure that it cannot modify
993 itself */
ea1c1802 994 env->current_tb = NULL;
2e70f6ef 995 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 996 cpu_resume_from_signal(env, NULL);
9fa3e853 997 }
fd6ce8f6 998#endif
9fa3e853 999}
fd6ce8f6 1000
9fa3e853 1001/* len must be <= 8 and start must be a multiple of len */
00f82b8a 1002static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
1003{
1004 PageDesc *p;
1005 int offset, b;
59817ccb 1006#if 0
a4193c8a 1007 if (1) {
93fcfe39
AL
1008 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1009 cpu_single_env->mem_io_vaddr, len,
1010 cpu_single_env->eip,
1011 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1012 }
1013#endif
9fa3e853 1014 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1015 if (!p)
9fa3e853
FB
1016 return;
1017 if (p->code_bitmap) {
1018 offset = start & ~TARGET_PAGE_MASK;
1019 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1020 if (b & ((1 << len) - 1))
1021 goto do_invalidate;
1022 } else {
1023 do_invalidate:
d720b93d 1024 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1025 }
1026}
1027
9fa3e853 1028#if !defined(CONFIG_SOFTMMU)
00f82b8a 1029static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1030 unsigned long pc, void *puc)
9fa3e853 1031{
6b917547 1032 TranslationBlock *tb;
9fa3e853 1033 PageDesc *p;
6b917547 1034 int n;
d720b93d 1035#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1036 TranslationBlock *current_tb = NULL;
d720b93d 1037 CPUState *env = cpu_single_env;
6b917547
AL
1038 int current_tb_modified = 0;
1039 target_ulong current_pc = 0;
1040 target_ulong current_cs_base = 0;
1041 int current_flags = 0;
d720b93d 1042#endif
9fa3e853
FB
1043
1044 addr &= TARGET_PAGE_MASK;
1045 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1046 if (!p)
9fa3e853
FB
1047 return;
1048 tb = p->first_tb;
d720b93d
FB
1049#ifdef TARGET_HAS_PRECISE_SMC
1050 if (tb && pc != 0) {
1051 current_tb = tb_find_pc(pc);
1052 }
1053#endif
9fa3e853
FB
1054 while (tb != NULL) {
1055 n = (long)tb & 3;
1056 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1057#ifdef TARGET_HAS_PRECISE_SMC
1058 if (current_tb == tb &&
2e70f6ef 1059 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1060 /* If we are modifying the current TB, we must stop
1061 its execution. We could be more precise by checking
1062 that the modification is after the current PC, but it
1063 would require a specialized function to partially
1064 restore the CPU state */
3b46e624 1065
d720b93d
FB
1066 current_tb_modified = 1;
1067 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1068 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1069 &current_flags);
d720b93d
FB
1070 }
1071#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1072 tb_phys_invalidate(tb, addr);
1073 tb = tb->page_next[n];
1074 }
fd6ce8f6 1075 p->first_tb = NULL;
d720b93d
FB
1076#ifdef TARGET_HAS_PRECISE_SMC
1077 if (current_tb_modified) {
1078 /* we generate a block containing just the instruction
1079 modifying the memory. It will ensure that it cannot modify
1080 itself */
ea1c1802 1081 env->current_tb = NULL;
2e70f6ef 1082 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1083 cpu_resume_from_signal(env, puc);
1084 }
1085#endif
fd6ce8f6 1086}
9fa3e853 1087#endif
fd6ce8f6
FB
1088
1089/* add the tb in the target page and protect it if necessary */
5fafdf24 1090static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1091 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1092{
1093 PageDesc *p;
9fa3e853
FB
1094 TranslationBlock *last_first_tb;
1095
1096 tb->page_addr[n] = page_addr;
3a7d929e 1097 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1098 tb->page_next[n] = p->first_tb;
1099 last_first_tb = p->first_tb;
1100 p->first_tb = (TranslationBlock *)((long)tb | n);
1101 invalidate_page_bitmap(p);
fd6ce8f6 1102
107db443 1103#if defined(TARGET_HAS_SMC) || 1
d720b93d 1104
9fa3e853 1105#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1106 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1107 target_ulong addr;
1108 PageDesc *p2;
9fa3e853
FB
1109 int prot;
1110
fd6ce8f6
FB
1111 /* force the host page as non writable (writes will have a
1112 page fault + mprotect overhead) */
53a5960a 1113 page_addr &= qemu_host_page_mask;
fd6ce8f6 1114 prot = 0;
53a5960a
PB
1115 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1116 addr += TARGET_PAGE_SIZE) {
1117
1118 p2 = page_find (addr >> TARGET_PAGE_BITS);
1119 if (!p2)
1120 continue;
1121 prot |= p2->flags;
1122 p2->flags &= ~PAGE_WRITE;
1123 page_get_flags(addr);
1124 }
5fafdf24 1125 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1126 (prot & PAGE_BITS) & ~PAGE_WRITE);
1127#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1128 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1129 page_addr);
fd6ce8f6 1130#endif
fd6ce8f6 1131 }
9fa3e853
FB
1132#else
1133 /* if some code is already present, then the pages are already
1134 protected. So we handle the case where only the first TB is
1135 allocated in a physical page */
1136 if (!last_first_tb) {
6a00d601 1137 tlb_protect_code(page_addr);
9fa3e853
FB
1138 }
1139#endif
d720b93d
FB
1140
1141#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1142}
1143
1144/* Allocate a new translation block. Flush the translation buffer if
1145 too many translation blocks or too much generated code. */
c27004ec 1146TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1147{
1148 TranslationBlock *tb;
fd6ce8f6 1149
26a5f13b
FB
1150 if (nb_tbs >= code_gen_max_blocks ||
1151 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1152 return NULL;
fd6ce8f6
FB
1153 tb = &tbs[nb_tbs++];
1154 tb->pc = pc;
b448f2f3 1155 tb->cflags = 0;
d4e8164f
FB
1156 return tb;
1157}
1158
2e70f6ef
PB
1159void tb_free(TranslationBlock *tb)
1160{
bf20dc07 1161 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1162 Ignore the hard cases and just back up if this TB happens to
1163 be the last one generated. */
1164 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1165 code_gen_ptr = tb->tc_ptr;
1166 nb_tbs--;
1167 }
1168}
1169
9fa3e853
FB
1170/* add a new TB and link it to the physical page tables. phys_page2 is
1171 (-1) to indicate that only one page contains the TB. */
5fafdf24 1172void tb_link_phys(TranslationBlock *tb,
9fa3e853 1173 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1174{
9fa3e853
FB
1175 unsigned int h;
1176 TranslationBlock **ptb;
1177
c8a706fe
PB
1178 /* Grab the mmap lock to stop another thread invalidating this TB
1179 before we are done. */
1180 mmap_lock();
9fa3e853
FB
1181 /* add in the physical hash table */
1182 h = tb_phys_hash_func(phys_pc);
1183 ptb = &tb_phys_hash[h];
1184 tb->phys_hash_next = *ptb;
1185 *ptb = tb;
fd6ce8f6
FB
1186
1187 /* add in the page list */
9fa3e853
FB
1188 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1189 if (phys_page2 != -1)
1190 tb_alloc_page(tb, 1, phys_page2);
1191 else
1192 tb->page_addr[1] = -1;
9fa3e853 1193
d4e8164f
FB
1194 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1195 tb->jmp_next[0] = NULL;
1196 tb->jmp_next[1] = NULL;
1197
1198 /* init original jump addresses */
1199 if (tb->tb_next_offset[0] != 0xffff)
1200 tb_reset_jump(tb, 0);
1201 if (tb->tb_next_offset[1] != 0xffff)
1202 tb_reset_jump(tb, 1);
8a40a180
FB
1203
1204#ifdef DEBUG_TB_CHECK
1205 tb_page_check();
1206#endif
c8a706fe 1207 mmap_unlock();
fd6ce8f6
FB
1208}
1209
9fa3e853
FB
1210/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1211 tb[1].tc_ptr. Return NULL if not found */
1212TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1213{
9fa3e853
FB
1214 int m_min, m_max, m;
1215 unsigned long v;
1216 TranslationBlock *tb;
a513fe19
FB
1217
1218 if (nb_tbs <= 0)
1219 return NULL;
1220 if (tc_ptr < (unsigned long)code_gen_buffer ||
1221 tc_ptr >= (unsigned long)code_gen_ptr)
1222 return NULL;
1223 /* binary search (cf Knuth) */
1224 m_min = 0;
1225 m_max = nb_tbs - 1;
1226 while (m_min <= m_max) {
1227 m = (m_min + m_max) >> 1;
1228 tb = &tbs[m];
1229 v = (unsigned long)tb->tc_ptr;
1230 if (v == tc_ptr)
1231 return tb;
1232 else if (tc_ptr < v) {
1233 m_max = m - 1;
1234 } else {
1235 m_min = m + 1;
1236 }
5fafdf24 1237 }
a513fe19
FB
1238 return &tbs[m_max];
1239}
7501267e 1240
ea041c0e
FB
1241static void tb_reset_jump_recursive(TranslationBlock *tb);
1242
1243static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1244{
1245 TranslationBlock *tb1, *tb_next, **ptb;
1246 unsigned int n1;
1247
1248 tb1 = tb->jmp_next[n];
1249 if (tb1 != NULL) {
1250 /* find head of list */
1251 for(;;) {
1252 n1 = (long)tb1 & 3;
1253 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1254 if (n1 == 2)
1255 break;
1256 tb1 = tb1->jmp_next[n1];
1257 }
1258 /* we are now sure now that tb jumps to tb1 */
1259 tb_next = tb1;
1260
1261 /* remove tb from the jmp_first list */
1262 ptb = &tb_next->jmp_first;
1263 for(;;) {
1264 tb1 = *ptb;
1265 n1 = (long)tb1 & 3;
1266 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1267 if (n1 == n && tb1 == tb)
1268 break;
1269 ptb = &tb1->jmp_next[n1];
1270 }
1271 *ptb = tb->jmp_next[n];
1272 tb->jmp_next[n] = NULL;
3b46e624 1273
ea041c0e
FB
1274 /* suppress the jump to next tb in generated code */
1275 tb_reset_jump(tb, n);
1276
0124311e 1277 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1278 tb_reset_jump_recursive(tb_next);
1279 }
1280}
1281
1282static void tb_reset_jump_recursive(TranslationBlock *tb)
1283{
1284 tb_reset_jump_recursive2(tb, 0);
1285 tb_reset_jump_recursive2(tb, 1);
1286}
1287
1fddef4b 1288#if defined(TARGET_HAS_ICE)
d720b93d
FB
1289static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1290{
9b3c35e0
JM
1291 target_phys_addr_t addr;
1292 target_ulong pd;
c2f07f81
PB
1293 ram_addr_t ram_addr;
1294 PhysPageDesc *p;
d720b93d 1295
c2f07f81
PB
1296 addr = cpu_get_phys_page_debug(env, pc);
1297 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1298 if (!p) {
1299 pd = IO_MEM_UNASSIGNED;
1300 } else {
1301 pd = p->phys_offset;
1302 }
1303 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1304 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1305}
c27004ec 1306#endif
d720b93d 1307
6658ffb8 1308/* Add a watchpoint. */
a1d1bb31
AL
1309int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1310 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1311{
b4051334 1312 target_ulong len_mask = ~(len - 1);
c0ce998e 1313 CPUWatchpoint *wp;
6658ffb8 1314
b4051334
AL
1315 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1316 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1317 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1318 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1319 return -EINVAL;
1320 }
a1d1bb31 1321 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1322
1323 wp->vaddr = addr;
b4051334 1324 wp->len_mask = len_mask;
a1d1bb31
AL
1325 wp->flags = flags;
1326
2dc9f411 1327 /* keep all GDB-injected watchpoints in front */
c0ce998e
AL
1328 if (flags & BP_GDB)
1329 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1330 else
1331 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1332
6658ffb8 1333 tlb_flush_page(env, addr);
a1d1bb31
AL
1334
1335 if (watchpoint)
1336 *watchpoint = wp;
1337 return 0;
6658ffb8
PB
1338}
1339
a1d1bb31
AL
1340/* Remove a specific watchpoint. */
1341int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1342 int flags)
6658ffb8 1343{
b4051334 1344 target_ulong len_mask = ~(len - 1);
a1d1bb31 1345 CPUWatchpoint *wp;
6658ffb8 1346
c0ce998e 1347 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1348 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1349 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1350 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1351 return 0;
1352 }
1353 }
a1d1bb31 1354 return -ENOENT;
6658ffb8
PB
1355}
1356
a1d1bb31
AL
1357/* Remove a specific watchpoint by reference. */
1358void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1359{
c0ce998e 1360 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1361
a1d1bb31
AL
1362 tlb_flush_page(env, watchpoint->vaddr);
1363
1364 qemu_free(watchpoint);
1365}
1366
1367/* Remove all matching watchpoints. */
1368void cpu_watchpoint_remove_all(CPUState *env, int mask)
1369{
c0ce998e 1370 CPUWatchpoint *wp, *next;
a1d1bb31 1371
c0ce998e 1372 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1373 if (wp->flags & mask)
1374 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1375 }
7d03f82f
EI
1376}
1377
a1d1bb31
AL
1378/* Add a breakpoint. */
1379int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1380 CPUBreakpoint **breakpoint)
4c3a88a2 1381{
1fddef4b 1382#if defined(TARGET_HAS_ICE)
c0ce998e 1383 CPUBreakpoint *bp;
3b46e624 1384
a1d1bb31 1385 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1386
a1d1bb31
AL
1387 bp->pc = pc;
1388 bp->flags = flags;
1389
2dc9f411 1390 /* keep all GDB-injected breakpoints in front */
c0ce998e
AL
1391 if (flags & BP_GDB)
1392 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1393 else
1394 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1395
d720b93d 1396 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1397
1398 if (breakpoint)
1399 *breakpoint = bp;
4c3a88a2
FB
1400 return 0;
1401#else
a1d1bb31 1402 return -ENOSYS;
4c3a88a2
FB
1403#endif
1404}
1405
a1d1bb31
AL
1406/* Remove a specific breakpoint. */
1407int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1408{
7d03f82f 1409#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1410 CPUBreakpoint *bp;
1411
c0ce998e 1412 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1413 if (bp->pc == pc && bp->flags == flags) {
1414 cpu_breakpoint_remove_by_ref(env, bp);
1415 return 0;
1416 }
7d03f82f 1417 }
a1d1bb31
AL
1418 return -ENOENT;
1419#else
1420 return -ENOSYS;
7d03f82f
EI
1421#endif
1422}
1423
a1d1bb31
AL
1424/* Remove a specific breakpoint by reference. */
1425void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1426{
1fddef4b 1427#if defined(TARGET_HAS_ICE)
c0ce998e 1428 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1429
a1d1bb31
AL
1430 breakpoint_invalidate(env, breakpoint->pc);
1431
1432 qemu_free(breakpoint);
1433#endif
1434}
1435
1436/* Remove all matching breakpoints. */
1437void cpu_breakpoint_remove_all(CPUState *env, int mask)
1438{
1439#if defined(TARGET_HAS_ICE)
c0ce998e 1440 CPUBreakpoint *bp, *next;
a1d1bb31 1441
c0ce998e 1442 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1443 if (bp->flags & mask)
1444 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1445 }
4c3a88a2
FB
1446#endif
1447}
1448
c33a346e
FB
1449/* enable or disable single step mode. EXCP_DEBUG is returned by the
1450 CPU loop after each instruction */
1451void cpu_single_step(CPUState *env, int enabled)
1452{
1fddef4b 1453#if defined(TARGET_HAS_ICE)
c33a346e
FB
1454 if (env->singlestep_enabled != enabled) {
1455 env->singlestep_enabled = enabled;
e22a25c9
AL
1456 if (kvm_enabled())
1457 kvm_update_guest_debug(env, 0);
1458 else {
1459 /* must flush all the translated code to avoid inconsistancies */
1460 /* XXX: only flush what is necessary */
1461 tb_flush(env);
1462 }
c33a346e
FB
1463 }
1464#endif
1465}
1466
34865134
FB
1467/* enable or disable low levels log */
1468void cpu_set_log(int log_flags)
1469{
1470 loglevel = log_flags;
1471 if (loglevel && !logfile) {
11fcfab4 1472 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1473 if (!logfile) {
1474 perror(logfilename);
1475 _exit(1);
1476 }
9fa3e853
FB
1477#if !defined(CONFIG_SOFTMMU)
1478 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1479 {
b55266b5 1480 static char logfile_buf[4096];
9fa3e853
FB
1481 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1482 }
1483#else
34865134 1484 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1485#endif
e735b91c
PB
1486 log_append = 1;
1487 }
1488 if (!loglevel && logfile) {
1489 fclose(logfile);
1490 logfile = NULL;
34865134
FB
1491 }
1492}
1493
1494void cpu_set_log_filename(const char *filename)
1495{
1496 logfilename = strdup(filename);
e735b91c
PB
1497 if (logfile) {
1498 fclose(logfile);
1499 logfile = NULL;
1500 }
1501 cpu_set_log(loglevel);
34865134 1502}
c33a346e 1503
3098dba0 1504static void cpu_unlink_tb(CPUState *env)
ea041c0e 1505{
3098dba0
AJ
1506#if defined(USE_NPTL)
1507 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1508 problem and hope the cpu will stop of its own accord. For userspace
1509 emulation this often isn't actually as bad as it sounds. Often
1510 signals are used primarily to interrupt blocking syscalls. */
1511#else
ea041c0e 1512 TranslationBlock *tb;
15a51156 1513 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1514
3098dba0
AJ
1515 tb = env->current_tb;
1516 /* if the cpu is currently executing code, we must unlink it and
1517 all the potentially executing TB */
1518 if (tb && !testandset(&interrupt_lock)) {
1519 env->current_tb = NULL;
1520 tb_reset_jump_recursive(tb);
1521 resetlock(&interrupt_lock);
be214e6c 1522 }
3098dba0
AJ
1523#endif
1524}
1525
1526/* mask must never be zero, except for A20 change call */
1527void cpu_interrupt(CPUState *env, int mask)
1528{
1529 int old_mask;
be214e6c 1530
2e70f6ef 1531 old_mask = env->interrupt_request;
68a79315 1532 env->interrupt_request |= mask;
3098dba0 1533
2e70f6ef 1534 if (use_icount) {
266910c4 1535 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1536#ifndef CONFIG_USER_ONLY
2e70f6ef 1537 if (!can_do_io(env)
be214e6c 1538 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1539 cpu_abort(env, "Raised interrupt while not in I/O function");
1540 }
1541#endif
1542 } else {
3098dba0 1543 cpu_unlink_tb(env);
ea041c0e
FB
1544 }
1545}
1546
b54ad049
FB
1547void cpu_reset_interrupt(CPUState *env, int mask)
1548{
1549 env->interrupt_request &= ~mask;
1550}
1551
3098dba0
AJ
1552void cpu_exit(CPUState *env)
1553{
1554 env->exit_request = 1;
1555 cpu_unlink_tb(env);
1556}
1557
c7cd6a37 1558const CPULogItem cpu_log_items[] = {
5fafdf24 1559 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1560 "show generated host assembly code for each compiled TB" },
1561 { CPU_LOG_TB_IN_ASM, "in_asm",
1562 "show target assembly code for each compiled TB" },
5fafdf24 1563 { CPU_LOG_TB_OP, "op",
57fec1fe 1564 "show micro ops for each compiled TB" },
f193c797 1565 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1566 "show micro ops "
1567#ifdef TARGET_I386
1568 "before eflags optimization and "
f193c797 1569#endif
e01a1157 1570 "after liveness analysis" },
f193c797
FB
1571 { CPU_LOG_INT, "int",
1572 "show interrupts/exceptions in short format" },
1573 { CPU_LOG_EXEC, "exec",
1574 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1575 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1576 "show CPU state before block translation" },
f193c797
FB
1577#ifdef TARGET_I386
1578 { CPU_LOG_PCALL, "pcall",
1579 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1580 { CPU_LOG_RESET, "cpu_reset",
1581 "show CPU state before CPU resets" },
f193c797 1582#endif
8e3a9fd2 1583#ifdef DEBUG_IOPORT
fd872598
FB
1584 { CPU_LOG_IOPORT, "ioport",
1585 "show all i/o ports accesses" },
8e3a9fd2 1586#endif
f193c797
FB
1587 { 0, NULL, NULL },
1588};
1589
1590static int cmp1(const char *s1, int n, const char *s2)
1591{
1592 if (strlen(s2) != n)
1593 return 0;
1594 return memcmp(s1, s2, n) == 0;
1595}
3b46e624 1596
f193c797
FB
1597/* takes a comma separated list of log masks. Return 0 if error. */
1598int cpu_str_to_log_mask(const char *str)
1599{
c7cd6a37 1600 const CPULogItem *item;
f193c797
FB
1601 int mask;
1602 const char *p, *p1;
1603
1604 p = str;
1605 mask = 0;
1606 for(;;) {
1607 p1 = strchr(p, ',');
1608 if (!p1)
1609 p1 = p + strlen(p);
8e3a9fd2
FB
1610 if(cmp1(p,p1-p,"all")) {
1611 for(item = cpu_log_items; item->mask != 0; item++) {
1612 mask |= item->mask;
1613 }
1614 } else {
f193c797
FB
1615 for(item = cpu_log_items; item->mask != 0; item++) {
1616 if (cmp1(p, p1 - p, item->name))
1617 goto found;
1618 }
1619 return 0;
8e3a9fd2 1620 }
f193c797
FB
1621 found:
1622 mask |= item->mask;
1623 if (*p1 != ',')
1624 break;
1625 p = p1 + 1;
1626 }
1627 return mask;
1628}
ea041c0e 1629
7501267e
FB
1630void cpu_abort(CPUState *env, const char *fmt, ...)
1631{
1632 va_list ap;
493ae1f0 1633 va_list ap2;
7501267e
FB
1634
1635 va_start(ap, fmt);
493ae1f0 1636 va_copy(ap2, ap);
7501267e
FB
1637 fprintf(stderr, "qemu: fatal: ");
1638 vfprintf(stderr, fmt, ap);
1639 fprintf(stderr, "\n");
1640#ifdef TARGET_I386
7fe48483
FB
1641 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1642#else
1643 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1644#endif
93fcfe39
AL
1645 if (qemu_log_enabled()) {
1646 qemu_log("qemu: fatal: ");
1647 qemu_log_vprintf(fmt, ap2);
1648 qemu_log("\n");
f9373291 1649#ifdef TARGET_I386
93fcfe39 1650 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1651#else
93fcfe39 1652 log_cpu_state(env, 0);
f9373291 1653#endif
31b1a7b4 1654 qemu_log_flush();
93fcfe39 1655 qemu_log_close();
924edcae 1656 }
493ae1f0 1657 va_end(ap2);
f9373291 1658 va_end(ap);
7501267e
FB
1659 abort();
1660}
1661
c5be9f08
TS
1662CPUState *cpu_copy(CPUState *env)
1663{
01ba9816 1664 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1665 CPUState *next_cpu = new_env->next_cpu;
1666 int cpu_index = new_env->cpu_index;
5a38f081
AL
1667#if defined(TARGET_HAS_ICE)
1668 CPUBreakpoint *bp;
1669 CPUWatchpoint *wp;
1670#endif
1671
c5be9f08 1672 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1673
1674 /* Preserve chaining and index. */
c5be9f08
TS
1675 new_env->next_cpu = next_cpu;
1676 new_env->cpu_index = cpu_index;
5a38f081
AL
1677
1678 /* Clone all break/watchpoints.
1679 Note: Once we support ptrace with hw-debug register access, make sure
1680 BP_CPU break/watchpoints are handled correctly on clone. */
1681 TAILQ_INIT(&env->breakpoints);
1682 TAILQ_INIT(&env->watchpoints);
1683#if defined(TARGET_HAS_ICE)
1684 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1685 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1686 }
1687 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1688 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1689 wp->flags, NULL);
1690 }
1691#endif
1692
c5be9f08
TS
1693 return new_env;
1694}
1695
0124311e
FB
1696#if !defined(CONFIG_USER_ONLY)
1697
5c751e99
EI
1698static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1699{
1700 unsigned int i;
1701
1702 /* Discard jump cache entries for any tb which might potentially
1703 overlap the flushed page. */
1704 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1705 memset (&env->tb_jmp_cache[i], 0,
1706 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1707
1708 i = tb_jmp_cache_hash_page(addr);
1709 memset (&env->tb_jmp_cache[i], 0,
1710 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1711}
1712
ee8b7021
FB
1713/* NOTE: if flush_global is true, also flush global entries (not
1714 implemented yet) */
1715void tlb_flush(CPUState *env, int flush_global)
33417e70 1716{
33417e70 1717 int i;
0124311e 1718
9fa3e853
FB
1719#if defined(DEBUG_TLB)
1720 printf("tlb_flush:\n");
1721#endif
0124311e
FB
1722 /* must reset current TB so that interrupts cannot modify the
1723 links while we are modifying them */
1724 env->current_tb = NULL;
1725
33417e70 1726 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1727 env->tlb_table[0][i].addr_read = -1;
1728 env->tlb_table[0][i].addr_write = -1;
1729 env->tlb_table[0][i].addr_code = -1;
1730 env->tlb_table[1][i].addr_read = -1;
1731 env->tlb_table[1][i].addr_write = -1;
1732 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1733#if (NB_MMU_MODES >= 3)
1734 env->tlb_table[2][i].addr_read = -1;
1735 env->tlb_table[2][i].addr_write = -1;
1736 env->tlb_table[2][i].addr_code = -1;
e37e6ee6
AJ
1737#endif
1738#if (NB_MMU_MODES >= 4)
6fa4cea9
JM
1739 env->tlb_table[3][i].addr_read = -1;
1740 env->tlb_table[3][i].addr_write = -1;
1741 env->tlb_table[3][i].addr_code = -1;
1742#endif
e37e6ee6
AJ
1743#if (NB_MMU_MODES >= 5)
1744 env->tlb_table[4][i].addr_read = -1;
1745 env->tlb_table[4][i].addr_write = -1;
1746 env->tlb_table[4][i].addr_code = -1;
6fa4cea9 1747#endif
e37e6ee6 1748
33417e70 1749 }
9fa3e853 1750
8a40a180 1751 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1752
0a962c02
FB
1753#ifdef USE_KQEMU
1754 if (env->kqemu_enabled) {
1755 kqemu_flush(env, flush_global);
1756 }
9fa3e853 1757#endif
e3db7226 1758 tlb_flush_count++;
33417e70
FB
1759}
1760
274da6b2 1761static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1762{
5fafdf24 1763 if (addr == (tlb_entry->addr_read &
84b7b8e7 1764 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1765 addr == (tlb_entry->addr_write &
84b7b8e7 1766 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1767 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1768 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1769 tlb_entry->addr_read = -1;
1770 tlb_entry->addr_write = -1;
1771 tlb_entry->addr_code = -1;
1772 }
61382a50
FB
1773}
1774
2e12669a 1775void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1776{
8a40a180 1777 int i;
0124311e 1778
9fa3e853 1779#if defined(DEBUG_TLB)
108c49b8 1780 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1781#endif
0124311e
FB
1782 /* must reset current TB so that interrupts cannot modify the
1783 links while we are modifying them */
1784 env->current_tb = NULL;
61382a50
FB
1785
1786 addr &= TARGET_PAGE_MASK;
1787 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1788 tlb_flush_entry(&env->tlb_table[0][i], addr);
1789 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1790#if (NB_MMU_MODES >= 3)
1791 tlb_flush_entry(&env->tlb_table[2][i], addr);
e37e6ee6
AJ
1792#endif
1793#if (NB_MMU_MODES >= 4)
6fa4cea9
JM
1794 tlb_flush_entry(&env->tlb_table[3][i], addr);
1795#endif
e37e6ee6
AJ
1796#if (NB_MMU_MODES >= 5)
1797 tlb_flush_entry(&env->tlb_table[4][i], addr);
6fa4cea9 1798#endif
0124311e 1799
5c751e99 1800 tlb_flush_jmp_cache(env, addr);
9fa3e853 1801
0a962c02
FB
1802#ifdef USE_KQEMU
1803 if (env->kqemu_enabled) {
1804 kqemu_flush_page(env, addr);
1805 }
1806#endif
9fa3e853
FB
1807}
1808
9fa3e853
FB
1809/* update the TLBs so that writes to code in the virtual page 'addr'
1810 can be detected */
6a00d601 1811static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1812{
5fafdf24 1813 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1814 ram_addr + TARGET_PAGE_SIZE,
1815 CODE_DIRTY_FLAG);
9fa3e853
FB
1816}
1817
9fa3e853 1818/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1819 tested for self modifying code */
5fafdf24 1820static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1821 target_ulong vaddr)
9fa3e853 1822{
3a7d929e 1823 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1824}
1825
5fafdf24 1826static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1827 unsigned long start, unsigned long length)
1828{
1829 unsigned long addr;
84b7b8e7
FB
1830 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1831 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1832 if ((addr - start) < length) {
0f459d16 1833 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1834 }
1835 }
1836}
1837
3a7d929e 1838void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1839 int dirty_flags)
1ccde1cb
FB
1840{
1841 CPUState *env;
4f2ac237 1842 unsigned long length, start1;
0a962c02
FB
1843 int i, mask, len;
1844 uint8_t *p;
1ccde1cb
FB
1845
1846 start &= TARGET_PAGE_MASK;
1847 end = TARGET_PAGE_ALIGN(end);
1848
1849 length = end - start;
1850 if (length == 0)
1851 return;
0a962c02 1852 len = length >> TARGET_PAGE_BITS;
3a7d929e 1853#ifdef USE_KQEMU
6a00d601
FB
1854 /* XXX: should not depend on cpu context */
1855 env = first_cpu;
3a7d929e 1856 if (env->kqemu_enabled) {
f23db169
FB
1857 ram_addr_t addr;
1858 addr = start;
1859 for(i = 0; i < len; i++) {
1860 kqemu_set_notdirty(env, addr);
1861 addr += TARGET_PAGE_SIZE;
1862 }
3a7d929e
FB
1863 }
1864#endif
f23db169
FB
1865 mask = ~dirty_flags;
1866 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1867 for(i = 0; i < len; i++)
1868 p[i] &= mask;
1869
1ccde1cb
FB
1870 /* we modify the TLB cache so that the dirty bit will be set again
1871 when accessing the range */
59817ccb 1872 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1873 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1874 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1875 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1876 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1877 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1878#if (NB_MMU_MODES >= 3)
1879 for(i = 0; i < CPU_TLB_SIZE; i++)
1880 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
e37e6ee6
AJ
1881#endif
1882#if (NB_MMU_MODES >= 4)
6fa4cea9
JM
1883 for(i = 0; i < CPU_TLB_SIZE; i++)
1884 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1885#endif
e37e6ee6
AJ
1886#if (NB_MMU_MODES >= 5)
1887 for(i = 0; i < CPU_TLB_SIZE; i++)
1888 tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
6fa4cea9 1889#endif
6a00d601 1890 }
1ccde1cb
FB
1891}
1892
74576198
AL
1893int cpu_physical_memory_set_dirty_tracking(int enable)
1894{
1895 in_migration = enable;
1896 return 0;
1897}
1898
1899int cpu_physical_memory_get_dirty_tracking(void)
1900{
1901 return in_migration;
1902}
1903
2bec46dc
AL
1904void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1905{
1906 if (kvm_enabled())
1907 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1908}
1909
3a7d929e
FB
1910static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1911{
1912 ram_addr_t ram_addr;
1913
84b7b8e7 1914 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1915 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1916 tlb_entry->addend - (unsigned long)phys_ram_base;
1917 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1918 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1919 }
1920 }
1921}
1922
1923/* update the TLB according to the current state of the dirty bits */
1924void cpu_tlb_update_dirty(CPUState *env)
1925{
1926 int i;
1927 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1928 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1929 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1930 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1931#if (NB_MMU_MODES >= 3)
1932 for(i = 0; i < CPU_TLB_SIZE; i++)
1933 tlb_update_dirty(&env->tlb_table[2][i]);
e37e6ee6
AJ
1934#endif
1935#if (NB_MMU_MODES >= 4)
6fa4cea9
JM
1936 for(i = 0; i < CPU_TLB_SIZE; i++)
1937 tlb_update_dirty(&env->tlb_table[3][i]);
1938#endif
e37e6ee6
AJ
1939#if (NB_MMU_MODES >= 5)
1940 for(i = 0; i < CPU_TLB_SIZE; i++)
1941 tlb_update_dirty(&env->tlb_table[4][i]);
6fa4cea9 1942#endif
3a7d929e
FB
1943}
1944
0f459d16 1945static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1946{
0f459d16
PB
1947 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1948 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1949}
1950
0f459d16
PB
1951/* update the TLB corresponding to virtual page vaddr
1952 so that it is no longer dirty */
1953static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1954{
1ccde1cb
FB
1955 int i;
1956
0f459d16 1957 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1958 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1959 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1960 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1961#if (NB_MMU_MODES >= 3)
0f459d16 1962 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
e37e6ee6
AJ
1963#endif
1964#if (NB_MMU_MODES >= 4)
0f459d16 1965 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9 1966#endif
e37e6ee6
AJ
1967#if (NB_MMU_MODES >= 5)
1968 tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
6fa4cea9 1969#endif
9fa3e853
FB
1970}
1971
59817ccb
FB
1972/* add a new TLB entry. At most one entry for a given virtual address
1973 is permitted. Return 0 if OK or 2 if the page could not be mapped
1974 (can only happen in non SOFTMMU mode for I/O pages or pages
1975 conflicting with the host address space). */
5fafdf24
TS
1976int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1977 target_phys_addr_t paddr, int prot,
6ebbf390 1978 int mmu_idx, int is_softmmu)
9fa3e853 1979{
92e873b9 1980 PhysPageDesc *p;
4f2ac237 1981 unsigned long pd;
9fa3e853 1982 unsigned int index;
4f2ac237 1983 target_ulong address;
0f459d16 1984 target_ulong code_address;
108c49b8 1985 target_phys_addr_t addend;
9fa3e853 1986 int ret;
84b7b8e7 1987 CPUTLBEntry *te;
a1d1bb31 1988 CPUWatchpoint *wp;
0f459d16 1989 target_phys_addr_t iotlb;
9fa3e853 1990
92e873b9 1991 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1992 if (!p) {
1993 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1994 } else {
1995 pd = p->phys_offset;
9fa3e853
FB
1996 }
1997#if defined(DEBUG_TLB)
6ebbf390
JM
1998 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1999 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
2000#endif
2001
2002 ret = 0;
0f459d16
PB
2003 address = vaddr;
2004 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2005 /* IO memory case (romd handled later) */
2006 address |= TLB_MMIO;
2007 }
2008 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2009 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2010 /* Normal RAM. */
2011 iotlb = pd & TARGET_PAGE_MASK;
2012 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2013 iotlb |= IO_MEM_NOTDIRTY;
2014 else
2015 iotlb |= IO_MEM_ROM;
2016 } else {
2017 /* IO handlers are currently passed a phsical address.
2018 It would be nice to pass an offset from the base address
2019 of that region. This would avoid having to special case RAM,
2020 and avoid full address decoding in every device.
2021 We can't use the high bits of pd for this because
2022 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2023 iotlb = (pd & ~TARGET_PAGE_MASK);
2024 if (p) {
8da3ff18
PB
2025 iotlb += p->region_offset;
2026 } else {
2027 iotlb += paddr;
2028 }
0f459d16
PB
2029 }
2030
2031 code_address = address;
2032 /* Make accesses to pages with watchpoints go via the
2033 watchpoint trap routines. */
c0ce998e 2034 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2035 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
2036 iotlb = io_mem_watch + paddr;
2037 /* TODO: The memory case can be optimized by not trapping
2038 reads of pages with a write breakpoint. */
2039 address |= TLB_MMIO;
6658ffb8 2040 }
0f459d16 2041 }
d79acba4 2042
0f459d16
PB
2043 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2044 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2045 te = &env->tlb_table[mmu_idx][index];
2046 te->addend = addend - vaddr;
2047 if (prot & PAGE_READ) {
2048 te->addr_read = address;
2049 } else {
2050 te->addr_read = -1;
2051 }
5c751e99 2052
0f459d16
PB
2053 if (prot & PAGE_EXEC) {
2054 te->addr_code = code_address;
2055 } else {
2056 te->addr_code = -1;
2057 }
2058 if (prot & PAGE_WRITE) {
2059 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2060 (pd & IO_MEM_ROMD)) {
2061 /* Write access calls the I/O callback. */
2062 te->addr_write = address | TLB_MMIO;
2063 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2064 !cpu_physical_memory_is_dirty(pd)) {
2065 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2066 } else {
0f459d16 2067 te->addr_write = address;
9fa3e853 2068 }
0f459d16
PB
2069 } else {
2070 te->addr_write = -1;
9fa3e853 2071 }
9fa3e853
FB
2072 return ret;
2073}
2074
0124311e
FB
2075#else
2076
ee8b7021 2077void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2078{
2079}
2080
2e12669a 2081void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2082{
2083}
2084
5fafdf24
TS
2085int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2086 target_phys_addr_t paddr, int prot,
6ebbf390 2087 int mmu_idx, int is_softmmu)
9fa3e853
FB
2088{
2089 return 0;
2090}
0124311e 2091
9fa3e853
FB
2092/* dump memory mappings */
2093void page_dump(FILE *f)
33417e70 2094{
9fa3e853
FB
2095 unsigned long start, end;
2096 int i, j, prot, prot1;
2097 PageDesc *p;
33417e70 2098
9fa3e853
FB
2099 fprintf(f, "%-8s %-8s %-8s %s\n",
2100 "start", "end", "size", "prot");
2101 start = -1;
2102 end = -1;
2103 prot = 0;
2104 for(i = 0; i <= L1_SIZE; i++) {
2105 if (i < L1_SIZE)
2106 p = l1_map[i];
2107 else
2108 p = NULL;
2109 for(j = 0;j < L2_SIZE; j++) {
2110 if (!p)
2111 prot1 = 0;
2112 else
2113 prot1 = p[j].flags;
2114 if (prot1 != prot) {
2115 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2116 if (start != -1) {
2117 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 2118 start, end, end - start,
9fa3e853
FB
2119 prot & PAGE_READ ? 'r' : '-',
2120 prot & PAGE_WRITE ? 'w' : '-',
2121 prot & PAGE_EXEC ? 'x' : '-');
2122 }
2123 if (prot1 != 0)
2124 start = end;
2125 else
2126 start = -1;
2127 prot = prot1;
2128 }
2129 if (!p)
2130 break;
2131 }
33417e70 2132 }
33417e70
FB
2133}
2134
53a5960a 2135int page_get_flags(target_ulong address)
33417e70 2136{
9fa3e853
FB
2137 PageDesc *p;
2138
2139 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2140 if (!p)
9fa3e853
FB
2141 return 0;
2142 return p->flags;
2143}
2144
2145/* modify the flags of a page and invalidate the code if
2146 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2147 depending on PAGE_WRITE */
53a5960a 2148void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2149{
2150 PageDesc *p;
53a5960a 2151 target_ulong addr;
9fa3e853 2152
c8a706fe 2153 /* mmap_lock should already be held. */
9fa3e853
FB
2154 start = start & TARGET_PAGE_MASK;
2155 end = TARGET_PAGE_ALIGN(end);
2156 if (flags & PAGE_WRITE)
2157 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2158 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2159 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2160 /* We may be called for host regions that are outside guest
2161 address space. */
2162 if (!p)
2163 return;
9fa3e853
FB
2164 /* if the write protection is set, then we invalidate the code
2165 inside */
5fafdf24 2166 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2167 (flags & PAGE_WRITE) &&
2168 p->first_tb) {
d720b93d 2169 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2170 }
2171 p->flags = flags;
2172 }
33417e70
FB
2173}
2174
3d97b40b
TS
2175int page_check_range(target_ulong start, target_ulong len, int flags)
2176{
2177 PageDesc *p;
2178 target_ulong end;
2179 target_ulong addr;
2180
55f280c9
AZ
2181 if (start + len < start)
2182 /* we've wrapped around */
2183 return -1;
2184
3d97b40b
TS
2185 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2186 start = start & TARGET_PAGE_MASK;
2187
3d97b40b
TS
2188 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2189 p = page_find(addr >> TARGET_PAGE_BITS);
2190 if( !p )
2191 return -1;
2192 if( !(p->flags & PAGE_VALID) )
2193 return -1;
2194
dae3270c 2195 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2196 return -1;
dae3270c
FB
2197 if (flags & PAGE_WRITE) {
2198 if (!(p->flags & PAGE_WRITE_ORG))
2199 return -1;
2200 /* unprotect the page if it was put read-only because it
2201 contains translated code */
2202 if (!(p->flags & PAGE_WRITE)) {
2203 if (!page_unprotect(addr, 0, NULL))
2204 return -1;
2205 }
2206 return 0;
2207 }
3d97b40b
TS
2208 }
2209 return 0;
2210}
2211
9fa3e853
FB
2212/* called from signal handler: invalidate the code and unprotect the
2213 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2214int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2215{
2216 unsigned int page_index, prot, pindex;
2217 PageDesc *p, *p1;
53a5960a 2218 target_ulong host_start, host_end, addr;
9fa3e853 2219
c8a706fe
PB
2220 /* Technically this isn't safe inside a signal handler. However we
2221 know this only ever happens in a synchronous SEGV handler, so in
2222 practice it seems to be ok. */
2223 mmap_lock();
2224
83fb7adf 2225 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2226 page_index = host_start >> TARGET_PAGE_BITS;
2227 p1 = page_find(page_index);
c8a706fe
PB
2228 if (!p1) {
2229 mmap_unlock();
9fa3e853 2230 return 0;
c8a706fe 2231 }
83fb7adf 2232 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2233 p = p1;
2234 prot = 0;
2235 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2236 prot |= p->flags;
2237 p++;
2238 }
2239 /* if the page was really writable, then we change its
2240 protection back to writable */
2241 if (prot & PAGE_WRITE_ORG) {
2242 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2243 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2244 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2245 (prot & PAGE_BITS) | PAGE_WRITE);
2246 p1[pindex].flags |= PAGE_WRITE;
2247 /* and since the content will be modified, we must invalidate
2248 the corresponding translated code. */
d720b93d 2249 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2250#ifdef DEBUG_TB_CHECK
2251 tb_invalidate_check(address);
2252#endif
c8a706fe 2253 mmap_unlock();
9fa3e853
FB
2254 return 1;
2255 }
2256 }
c8a706fe 2257 mmap_unlock();
9fa3e853
FB
2258 return 0;
2259}
2260
6a00d601
FB
2261static inline void tlb_set_dirty(CPUState *env,
2262 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2263{
2264}
9fa3e853
FB
2265#endif /* defined(CONFIG_USER_ONLY) */
2266
e2eef170 2267#if !defined(CONFIG_USER_ONLY)
8da3ff18 2268
db7b5426 2269static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2270 ram_addr_t memory, ram_addr_t region_offset);
00f82b8a 2271static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2272 ram_addr_t orig_memory, ram_addr_t region_offset);
db7b5426
BS
2273#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2274 need_subpage) \
2275 do { \
2276 if (addr > start_addr) \
2277 start_addr2 = 0; \
2278 else { \
2279 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2280 if (start_addr2 > 0) \
2281 need_subpage = 1; \
2282 } \
2283 \
49e9fba2 2284 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2285 end_addr2 = TARGET_PAGE_SIZE - 1; \
2286 else { \
2287 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2288 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2289 need_subpage = 1; \
2290 } \
2291 } while (0)
2292
33417e70
FB
2293/* register physical memory. 'size' must be a multiple of the target
2294 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2295 io memory page. The address used when calling the IO function is
2296 the offset from the start of the region, plus region_offset. Both
2297 start_region and regon_offset are rounded down to a page boundary
2298 before calculating this offset. This should not be a problem unless
2299 the low bits of start_addr and region_offset differ. */
2300void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2301 ram_addr_t size,
2302 ram_addr_t phys_offset,
2303 ram_addr_t region_offset)
33417e70 2304{
108c49b8 2305 target_phys_addr_t addr, end_addr;
92e873b9 2306 PhysPageDesc *p;
9d42037b 2307 CPUState *env;
00f82b8a 2308 ram_addr_t orig_size = size;
db7b5426 2309 void *subpage;
33417e70 2310
da260249
FB
2311#ifdef USE_KQEMU
2312 /* XXX: should not depend on cpu context */
2313 env = first_cpu;
2314 if (env->kqemu_enabled) {
2315 kqemu_set_phys_mem(start_addr, size, phys_offset);
2316 }
2317#endif
7ba1e619
AL
2318 if (kvm_enabled())
2319 kvm_set_phys_mem(start_addr, size, phys_offset);
2320
67c4d23c
PB
2321 if (phys_offset == IO_MEM_UNASSIGNED) {
2322 region_offset = start_addr;
2323 }
8da3ff18 2324 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2325 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2326 end_addr = start_addr + (target_phys_addr_t)size;
2327 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2328 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2329 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2330 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2331 target_phys_addr_t start_addr2, end_addr2;
2332 int need_subpage = 0;
2333
2334 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2335 need_subpage);
4254fab8 2336 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2337 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2338 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2339 &p->phys_offset, orig_memory,
2340 p->region_offset);
db7b5426
BS
2341 } else {
2342 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2343 >> IO_MEM_SHIFT];
2344 }
8da3ff18
PB
2345 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2346 region_offset);
2347 p->region_offset = 0;
db7b5426
BS
2348 } else {
2349 p->phys_offset = phys_offset;
2350 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2351 (phys_offset & IO_MEM_ROMD))
2352 phys_offset += TARGET_PAGE_SIZE;
2353 }
2354 } else {
2355 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2356 p->phys_offset = phys_offset;
8da3ff18 2357 p->region_offset = region_offset;
db7b5426 2358 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2359 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2360 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2361 } else {
db7b5426
BS
2362 target_phys_addr_t start_addr2, end_addr2;
2363 int need_subpage = 0;
2364
2365 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2366 end_addr2, need_subpage);
2367
4254fab8 2368 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426 2369 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2370 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2371 addr & TARGET_PAGE_MASK);
db7b5426 2372 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2373 phys_offset, region_offset);
2374 p->region_offset = 0;
db7b5426
BS
2375 }
2376 }
2377 }
8da3ff18 2378 region_offset += TARGET_PAGE_SIZE;
33417e70 2379 }
3b46e624 2380
9d42037b
FB
2381 /* since each CPU stores ram addresses in its TLB cache, we must
2382 reset the modified entries */
2383 /* XXX: slow ! */
2384 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2385 tlb_flush(env, 1);
2386 }
33417e70
FB
2387}
2388
ba863458 2389/* XXX: temporary until new memory mapping API */
00f82b8a 2390ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2391{
2392 PhysPageDesc *p;
2393
2394 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2395 if (!p)
2396 return IO_MEM_UNASSIGNED;
2397 return p->phys_offset;
2398}
2399
f65ed4c1
AL
2400void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2401{
2402 if (kvm_enabled())
2403 kvm_coalesce_mmio_region(addr, size);
2404}
2405
2406void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2407{
2408 if (kvm_enabled())
2409 kvm_uncoalesce_mmio_region(addr, size);
2410}
2411
e9a1ab19 2412/* XXX: better than nothing */
00f82b8a 2413ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2414{
2415 ram_addr_t addr;
7fb4fdcf 2416 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
012a7045 2417 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
ed441467 2418 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2419 abort();
2420 }
2421 addr = phys_ram_alloc_offset;
2422 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2423 return addr;
2424}
2425
2426void qemu_ram_free(ram_addr_t addr)
2427{
2428}
2429
dc828ca1
PB
2430/* Return a host pointer to ram allocated with qemu_ram_alloc.
2431 This may only be used if you actually allocated the ram, and
2432 aready know how but the ram block is. */
2433void *qemu_get_ram_ptr(ram_addr_t addr)
2434{
2435 return phys_ram_base + addr;
2436}
2437
a4193c8a 2438static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2439{
67d3b957 2440#ifdef DEBUG_UNASSIGNED
ab3d1727 2441 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2442#endif
0a6f8a6d 2443#if defined(TARGET_SPARC)
e18231a3
BS
2444 do_unassigned_access(addr, 0, 0, 0, 1);
2445#endif
2446 return 0;
2447}
2448
2449static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2450{
2451#ifdef DEBUG_UNASSIGNED
2452 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2453#endif
0a6f8a6d 2454#if defined(TARGET_SPARC)
e18231a3
BS
2455 do_unassigned_access(addr, 0, 0, 0, 2);
2456#endif
2457 return 0;
2458}
2459
2460static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2461{
2462#ifdef DEBUG_UNASSIGNED
2463 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2464#endif
0a6f8a6d 2465#if defined(TARGET_SPARC)
e18231a3 2466 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2467#endif
33417e70
FB
2468 return 0;
2469}
2470
a4193c8a 2471static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2472{
67d3b957 2473#ifdef DEBUG_UNASSIGNED
ab3d1727 2474 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2475#endif
0a6f8a6d 2476#if defined(TARGET_SPARC)
e18231a3
BS
2477 do_unassigned_access(addr, 1, 0, 0, 1);
2478#endif
2479}
2480
2481static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2482{
2483#ifdef DEBUG_UNASSIGNED
2484 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2485#endif
0a6f8a6d 2486#if defined(TARGET_SPARC)
e18231a3
BS
2487 do_unassigned_access(addr, 1, 0, 0, 2);
2488#endif
2489}
2490
2491static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2492{
2493#ifdef DEBUG_UNASSIGNED
2494 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2495#endif
0a6f8a6d 2496#if defined(TARGET_SPARC)
e18231a3 2497 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2498#endif
33417e70
FB
2499}
2500
2501static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2502 unassigned_mem_readb,
e18231a3
BS
2503 unassigned_mem_readw,
2504 unassigned_mem_readl,
33417e70
FB
2505};
2506
2507static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2508 unassigned_mem_writeb,
e18231a3
BS
2509 unassigned_mem_writew,
2510 unassigned_mem_writel,
33417e70
FB
2511};
2512
0f459d16
PB
2513static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2514 uint32_t val)
9fa3e853 2515{
3a7d929e 2516 int dirty_flags;
3a7d929e
FB
2517 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2518 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2519#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2520 tb_invalidate_phys_page_fast(ram_addr, 1);
2521 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2522#endif
3a7d929e 2523 }
0f459d16 2524 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2525#ifdef USE_KQEMU
2526 if (cpu_single_env->kqemu_enabled &&
2527 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2528 kqemu_modify_page(cpu_single_env, ram_addr);
2529#endif
f23db169
FB
2530 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2531 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2532 /* we remove the notdirty callback only if the code has been
2533 flushed */
2534 if (dirty_flags == 0xff)
2e70f6ef 2535 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2536}
2537
0f459d16
PB
2538static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2539 uint32_t val)
9fa3e853 2540{
3a7d929e 2541 int dirty_flags;
3a7d929e
FB
2542 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2543 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2544#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2545 tb_invalidate_phys_page_fast(ram_addr, 2);
2546 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2547#endif
3a7d929e 2548 }
0f459d16 2549 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2550#ifdef USE_KQEMU
2551 if (cpu_single_env->kqemu_enabled &&
2552 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2553 kqemu_modify_page(cpu_single_env, ram_addr);
2554#endif
f23db169
FB
2555 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2556 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2557 /* we remove the notdirty callback only if the code has been
2558 flushed */
2559 if (dirty_flags == 0xff)
2e70f6ef 2560 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2561}
2562
0f459d16
PB
2563static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2564 uint32_t val)
9fa3e853 2565{
3a7d929e 2566 int dirty_flags;
3a7d929e
FB
2567 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2568 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2569#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2570 tb_invalidate_phys_page_fast(ram_addr, 4);
2571 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2572#endif
3a7d929e 2573 }
0f459d16 2574 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2575#ifdef USE_KQEMU
2576 if (cpu_single_env->kqemu_enabled &&
2577 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2578 kqemu_modify_page(cpu_single_env, ram_addr);
2579#endif
f23db169
FB
2580 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2581 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2582 /* we remove the notdirty callback only if the code has been
2583 flushed */
2584 if (dirty_flags == 0xff)
2e70f6ef 2585 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2586}
2587
3a7d929e 2588static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2589 NULL, /* never used */
2590 NULL, /* never used */
2591 NULL, /* never used */
2592};
2593
1ccde1cb
FB
2594static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2595 notdirty_mem_writeb,
2596 notdirty_mem_writew,
2597 notdirty_mem_writel,
2598};
2599
0f459d16 2600/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2601static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
2602{
2603 CPUState *env = cpu_single_env;
06d55cc1
AL
2604 target_ulong pc, cs_base;
2605 TranslationBlock *tb;
0f459d16 2606 target_ulong vaddr;
a1d1bb31 2607 CPUWatchpoint *wp;
06d55cc1 2608 int cpu_flags;
0f459d16 2609
06d55cc1
AL
2610 if (env->watchpoint_hit) {
2611 /* We re-entered the check after replacing the TB. Now raise
2612 * the debug interrupt so that is will trigger after the
2613 * current instruction. */
2614 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2615 return;
2616 }
2e70f6ef 2617 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
c0ce998e 2618 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2619 if ((vaddr == (wp->vaddr & len_mask) ||
2620 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2621 wp->flags |= BP_WATCHPOINT_HIT;
2622 if (!env->watchpoint_hit) {
2623 env->watchpoint_hit = wp;
2624 tb = tb_find_pc(env->mem_io_pc);
2625 if (!tb) {
2626 cpu_abort(env, "check_watchpoint: could not find TB for "
2627 "pc=%p", (void *)env->mem_io_pc);
2628 }
2629 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2630 tb_phys_invalidate(tb, -1);
2631 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2632 env->exception_index = EXCP_DEBUG;
2633 } else {
2634 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2635 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2636 }
2637 cpu_resume_from_signal(env, NULL);
06d55cc1 2638 }
6e140f28
AL
2639 } else {
2640 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2641 }
2642 }
2643}
2644
6658ffb8
PB
2645/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2646 so these check for a hit then pass through to the normal out-of-line
2647 phys routines. */
2648static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2649{
b4051334 2650 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
2651 return ldub_phys(addr);
2652}
2653
2654static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2655{
b4051334 2656 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
2657 return lduw_phys(addr);
2658}
2659
2660static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2661{
b4051334 2662 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
2663 return ldl_phys(addr);
2664}
2665
6658ffb8
PB
2666static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2667 uint32_t val)
2668{
b4051334 2669 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
2670 stb_phys(addr, val);
2671}
2672
2673static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2674 uint32_t val)
2675{
b4051334 2676 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
2677 stw_phys(addr, val);
2678}
2679
2680static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2681 uint32_t val)
2682{
b4051334 2683 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
2684 stl_phys(addr, val);
2685}
2686
2687static CPUReadMemoryFunc *watch_mem_read[3] = {
2688 watch_mem_readb,
2689 watch_mem_readw,
2690 watch_mem_readl,
2691};
2692
2693static CPUWriteMemoryFunc *watch_mem_write[3] = {
2694 watch_mem_writeb,
2695 watch_mem_writew,
2696 watch_mem_writel,
2697};
6658ffb8 2698
db7b5426
BS
2699static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2700 unsigned int len)
2701{
db7b5426
BS
2702 uint32_t ret;
2703 unsigned int idx;
2704
8da3ff18 2705 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2706#if defined(DEBUG_SUBPAGE)
2707 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2708 mmio, len, addr, idx);
2709#endif
8da3ff18
PB
2710 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2711 addr + mmio->region_offset[idx][0][len]);
db7b5426
BS
2712
2713 return ret;
2714}
2715
2716static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2717 uint32_t value, unsigned int len)
2718{
db7b5426
BS
2719 unsigned int idx;
2720
8da3ff18 2721 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2722#if defined(DEBUG_SUBPAGE)
2723 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2724 mmio, len, addr, idx, value);
2725#endif
8da3ff18
PB
2726 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2727 addr + mmio->region_offset[idx][1][len],
2728 value);
db7b5426
BS
2729}
2730
2731static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2732{
2733#if defined(DEBUG_SUBPAGE)
2734 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2735#endif
2736
2737 return subpage_readlen(opaque, addr, 0);
2738}
2739
2740static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2741 uint32_t value)
2742{
2743#if defined(DEBUG_SUBPAGE)
2744 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2745#endif
2746 subpage_writelen(opaque, addr, value, 0);
2747}
2748
2749static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2750{
2751#if defined(DEBUG_SUBPAGE)
2752 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2753#endif
2754
2755 return subpage_readlen(opaque, addr, 1);
2756}
2757
2758static void subpage_writew (void *opaque, target_phys_addr_t addr,
2759 uint32_t value)
2760{
2761#if defined(DEBUG_SUBPAGE)
2762 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2763#endif
2764 subpage_writelen(opaque, addr, value, 1);
2765}
2766
2767static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2768{
2769#if defined(DEBUG_SUBPAGE)
2770 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2771#endif
2772
2773 return subpage_readlen(opaque, addr, 2);
2774}
2775
2776static void subpage_writel (void *opaque,
2777 target_phys_addr_t addr, uint32_t value)
2778{
2779#if defined(DEBUG_SUBPAGE)
2780 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2781#endif
2782 subpage_writelen(opaque, addr, value, 2);
2783}
2784
2785static CPUReadMemoryFunc *subpage_read[] = {
2786 &subpage_readb,
2787 &subpage_readw,
2788 &subpage_readl,
2789};
2790
2791static CPUWriteMemoryFunc *subpage_write[] = {
2792 &subpage_writeb,
2793 &subpage_writew,
2794 &subpage_writel,
2795};
2796
2797static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2798 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
2799{
2800 int idx, eidx;
4254fab8 2801 unsigned int i;
db7b5426
BS
2802
2803 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2804 return -1;
2805 idx = SUBPAGE_IDX(start);
2806 eidx = SUBPAGE_IDX(end);
2807#if defined(DEBUG_SUBPAGE)
2808 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2809 mmio, start, end, idx, eidx, memory);
2810#endif
2811 memory >>= IO_MEM_SHIFT;
2812 for (; idx <= eidx; idx++) {
4254fab8 2813 for (i = 0; i < 4; i++) {
3ee89922
BS
2814 if (io_mem_read[memory][i]) {
2815 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2816 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
8da3ff18 2817 mmio->region_offset[idx][0][i] = region_offset;
3ee89922
BS
2818 }
2819 if (io_mem_write[memory][i]) {
2820 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2821 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
8da3ff18 2822 mmio->region_offset[idx][1][i] = region_offset;
3ee89922 2823 }
4254fab8 2824 }
db7b5426
BS
2825 }
2826
2827 return 0;
2828}
2829
00f82b8a 2830static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2831 ram_addr_t orig_memory, ram_addr_t region_offset)
db7b5426
BS
2832{
2833 subpage_t *mmio;
2834 int subpage_memory;
2835
2836 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
2837
2838 mmio->base = base;
2839 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
db7b5426 2840#if defined(DEBUG_SUBPAGE)
1eec614b
AL
2841 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2842 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 2843#endif
1eec614b
AL
2844 *phys = subpage_memory | IO_MEM_SUBPAGE;
2845 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
8da3ff18 2846 region_offset);
db7b5426
BS
2847
2848 return mmio;
2849}
2850
88715657
AL
2851static int get_free_io_mem_idx(void)
2852{
2853 int i;
2854
2855 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2856 if (!io_mem_used[i]) {
2857 io_mem_used[i] = 1;
2858 return i;
2859 }
2860
2861 return -1;
2862}
2863
33417e70
FB
2864static void io_mem_init(void)
2865{
88715657
AL
2866 int i;
2867
3a7d929e 2868 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2869 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2870 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
88715657
AL
2871 for (i=0; i<5; i++)
2872 io_mem_used[i] = 1;
1ccde1cb 2873
0f459d16 2874 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2875 watch_mem_write, NULL);
1ccde1cb 2876 /* alloc dirty bits array */
0a962c02 2877 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2878 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2879}
2880
2881/* mem_read and mem_write are arrays of functions containing the
2882 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2883 2). Functions can be omitted with a NULL function pointer. The
2884 registered functions may be modified dynamically later.
2885 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2886 modified. If it is zero, a new io zone is allocated. The return
2887 value can be used with cpu_register_physical_memory(). (-1) is
2888 returned if error. */
33417e70
FB
2889int cpu_register_io_memory(int io_index,
2890 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2891 CPUWriteMemoryFunc **mem_write,
2892 void *opaque)
33417e70 2893{
4254fab8 2894 int i, subwidth = 0;
33417e70
FB
2895
2896 if (io_index <= 0) {
88715657
AL
2897 io_index = get_free_io_mem_idx();
2898 if (io_index == -1)
2899 return io_index;
33417e70
FB
2900 } else {
2901 if (io_index >= IO_MEM_NB_ENTRIES)
2902 return -1;
2903 }
b5ff1b31 2904
33417e70 2905 for(i = 0;i < 3; i++) {
4254fab8
BS
2906 if (!mem_read[i] || !mem_write[i])
2907 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2908 io_mem_read[io_index][i] = mem_read[i];
2909 io_mem_write[io_index][i] = mem_write[i];
2910 }
a4193c8a 2911 io_mem_opaque[io_index] = opaque;
4254fab8 2912 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2913}
61382a50 2914
88715657
AL
2915void cpu_unregister_io_memory(int io_table_address)
2916{
2917 int i;
2918 int io_index = io_table_address >> IO_MEM_SHIFT;
2919
2920 for (i=0;i < 3; i++) {
2921 io_mem_read[io_index][i] = unassigned_mem_read[i];
2922 io_mem_write[io_index][i] = unassigned_mem_write[i];
2923 }
2924 io_mem_opaque[io_index] = NULL;
2925 io_mem_used[io_index] = 0;
2926}
2927
8926b517
FB
2928CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2929{
2930 return io_mem_write[io_index >> IO_MEM_SHIFT];
2931}
2932
2933CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2934{
2935 return io_mem_read[io_index >> IO_MEM_SHIFT];
2936}
2937
e2eef170
PB
2938#endif /* !defined(CONFIG_USER_ONLY) */
2939
13eb76e0
FB
2940/* physical memory access (slow version, mainly for debug) */
2941#if defined(CONFIG_USER_ONLY)
5fafdf24 2942void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2943 int len, int is_write)
2944{
2945 int l, flags;
2946 target_ulong page;
53a5960a 2947 void * p;
13eb76e0
FB
2948
2949 while (len > 0) {
2950 page = addr & TARGET_PAGE_MASK;
2951 l = (page + TARGET_PAGE_SIZE) - addr;
2952 if (l > len)
2953 l = len;
2954 flags = page_get_flags(page);
2955 if (!(flags & PAGE_VALID))
2956 return;
2957 if (is_write) {
2958 if (!(flags & PAGE_WRITE))
2959 return;
579a97f7 2960 /* XXX: this code should not depend on lock_user */
72fb7daa 2961 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2962 /* FIXME - should this return an error rather than just fail? */
2963 return;
72fb7daa
AJ
2964 memcpy(p, buf, l);
2965 unlock_user(p, addr, l);
13eb76e0
FB
2966 } else {
2967 if (!(flags & PAGE_READ))
2968 return;
579a97f7 2969 /* XXX: this code should not depend on lock_user */
72fb7daa 2970 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2971 /* FIXME - should this return an error rather than just fail? */
2972 return;
72fb7daa 2973 memcpy(buf, p, l);
5b257578 2974 unlock_user(p, addr, 0);
13eb76e0
FB
2975 }
2976 len -= l;
2977 buf += l;
2978 addr += l;
2979 }
2980}
8df1cd07 2981
13eb76e0 2982#else
5fafdf24 2983void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2984 int len, int is_write)
2985{
2986 int l, io_index;
2987 uint8_t *ptr;
2988 uint32_t val;
2e12669a
FB
2989 target_phys_addr_t page;
2990 unsigned long pd;
92e873b9 2991 PhysPageDesc *p;
3b46e624 2992
13eb76e0
FB
2993 while (len > 0) {
2994 page = addr & TARGET_PAGE_MASK;
2995 l = (page + TARGET_PAGE_SIZE) - addr;
2996 if (l > len)
2997 l = len;
92e873b9 2998 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2999 if (!p) {
3000 pd = IO_MEM_UNASSIGNED;
3001 } else {
3002 pd = p->phys_offset;
3003 }
3b46e624 3004
13eb76e0 3005 if (is_write) {
3a7d929e 3006 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
6c2934db 3007 target_phys_addr_t addr1 = addr;
13eb76e0 3008 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3009 if (p)
6c2934db 3010 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3011 /* XXX: could force cpu_single_env to NULL to avoid
3012 potential bugs */
6c2934db 3013 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3014 /* 32 bit write access */
c27004ec 3015 val = ldl_p(buf);
6c2934db 3016 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3017 l = 4;
6c2934db 3018 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3019 /* 16 bit write access */
c27004ec 3020 val = lduw_p(buf);
6c2934db 3021 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3022 l = 2;
3023 } else {
1c213d19 3024 /* 8 bit write access */
c27004ec 3025 val = ldub_p(buf);
6c2934db 3026 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3027 l = 1;
3028 }
3029 } else {
b448f2f3
FB
3030 unsigned long addr1;
3031 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3032 /* RAM case */
b448f2f3 3033 ptr = phys_ram_base + addr1;
13eb76e0 3034 memcpy(ptr, buf, l);
3a7d929e
FB
3035 if (!cpu_physical_memory_is_dirty(addr1)) {
3036 /* invalidate code */
3037 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3038 /* set dirty bit */
5fafdf24 3039 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 3040 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3041 }
13eb76e0
FB
3042 }
3043 } else {
5fafdf24 3044 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3045 !(pd & IO_MEM_ROMD)) {
6c2934db 3046 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3047 /* I/O case */
3048 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3049 if (p)
6c2934db
AJ
3050 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3051 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3052 /* 32 bit read access */
6c2934db 3053 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3054 stl_p(buf, val);
13eb76e0 3055 l = 4;
6c2934db 3056 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3057 /* 16 bit read access */
6c2934db 3058 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3059 stw_p(buf, val);
13eb76e0
FB
3060 l = 2;
3061 } else {
1c213d19 3062 /* 8 bit read access */
6c2934db 3063 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3064 stb_p(buf, val);
13eb76e0
FB
3065 l = 1;
3066 }
3067 } else {
3068 /* RAM case */
5fafdf24 3069 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3070 (addr & ~TARGET_PAGE_MASK);
3071 memcpy(buf, ptr, l);
3072 }
3073 }
3074 len -= l;
3075 buf += l;
3076 addr += l;
3077 }
3078}
8df1cd07 3079
d0ecd2aa 3080/* used for ROM loading : can write in RAM and ROM */
5fafdf24 3081void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3082 const uint8_t *buf, int len)
3083{
3084 int l;
3085 uint8_t *ptr;
3086 target_phys_addr_t page;
3087 unsigned long pd;
3088 PhysPageDesc *p;
3b46e624 3089
d0ecd2aa
FB
3090 while (len > 0) {
3091 page = addr & TARGET_PAGE_MASK;
3092 l = (page + TARGET_PAGE_SIZE) - addr;
3093 if (l > len)
3094 l = len;
3095 p = phys_page_find(page >> TARGET_PAGE_BITS);
3096 if (!p) {
3097 pd = IO_MEM_UNASSIGNED;
3098 } else {
3099 pd = p->phys_offset;
3100 }
3b46e624 3101
d0ecd2aa 3102 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3103 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3104 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3105 /* do nothing */
3106 } else {
3107 unsigned long addr1;
3108 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3109 /* ROM/RAM case */
3110 ptr = phys_ram_base + addr1;
3111 memcpy(ptr, buf, l);
3112 }
3113 len -= l;
3114 buf += l;
3115 addr += l;
3116 }
3117}
3118
6d16c2f8
AL
3119typedef struct {
3120 void *buffer;
3121 target_phys_addr_t addr;
3122 target_phys_addr_t len;
3123} BounceBuffer;
3124
3125static BounceBuffer bounce;
3126
ba223c29
AL
3127typedef struct MapClient {
3128 void *opaque;
3129 void (*callback)(void *opaque);
3130 LIST_ENTRY(MapClient) link;
3131} MapClient;
3132
3133static LIST_HEAD(map_client_list, MapClient) map_client_list
3134 = LIST_HEAD_INITIALIZER(map_client_list);
3135
3136void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3137{
3138 MapClient *client = qemu_malloc(sizeof(*client));
3139
3140 client->opaque = opaque;
3141 client->callback = callback;
3142 LIST_INSERT_HEAD(&map_client_list, client, link);
3143 return client;
3144}
3145
3146void cpu_unregister_map_client(void *_client)
3147{
3148 MapClient *client = (MapClient *)_client;
3149
3150 LIST_REMOVE(client, link);
3151}
3152
3153static void cpu_notify_map_clients(void)
3154{
3155 MapClient *client;
3156
3157 while (!LIST_EMPTY(&map_client_list)) {
3158 client = LIST_FIRST(&map_client_list);
3159 client->callback(client->opaque);
3160 LIST_REMOVE(client, link);
3161 }
3162}
3163
6d16c2f8
AL
3164/* Map a physical memory region into a host virtual address.
3165 * May map a subset of the requested range, given by and returned in *plen.
3166 * May return NULL if resources needed to perform the mapping are exhausted.
3167 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3168 * Use cpu_register_map_client() to know when retrying the map operation is
3169 * likely to succeed.
6d16c2f8
AL
3170 */
3171void *cpu_physical_memory_map(target_phys_addr_t addr,
3172 target_phys_addr_t *plen,
3173 int is_write)
3174{
3175 target_phys_addr_t len = *plen;
3176 target_phys_addr_t done = 0;
3177 int l;
3178 uint8_t *ret = NULL;
3179 uint8_t *ptr;
3180 target_phys_addr_t page;
3181 unsigned long pd;
3182 PhysPageDesc *p;
3183 unsigned long addr1;
3184
3185 while (len > 0) {
3186 page = addr & TARGET_PAGE_MASK;
3187 l = (page + TARGET_PAGE_SIZE) - addr;
3188 if (l > len)
3189 l = len;
3190 p = phys_page_find(page >> TARGET_PAGE_BITS);
3191 if (!p) {
3192 pd = IO_MEM_UNASSIGNED;
3193 } else {
3194 pd = p->phys_offset;
3195 }
3196
3197 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3198 if (done || bounce.buffer) {
3199 break;
3200 }
3201 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3202 bounce.addr = addr;
3203 bounce.len = l;
3204 if (!is_write) {
3205 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3206 }
3207 ptr = bounce.buffer;
3208 } else {
3209 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3210 ptr = phys_ram_base + addr1;
3211 }
3212 if (!done) {
3213 ret = ptr;
3214 } else if (ret + done != ptr) {
3215 break;
3216 }
3217
3218 len -= l;
3219 addr += l;
3220 done += l;
3221 }
3222 *plen = done;
3223 return ret;
3224}
3225
3226/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3227 * Will also mark the memory as dirty if is_write == 1. access_len gives
3228 * the amount of memory that was actually read or written by the caller.
3229 */
3230void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3231 int is_write, target_phys_addr_t access_len)
3232{
3233 if (buffer != bounce.buffer) {
3234 if (is_write) {
3235 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3236 while (access_len) {
3237 unsigned l;
3238 l = TARGET_PAGE_SIZE;
3239 if (l > access_len)
3240 l = access_len;
3241 if (!cpu_physical_memory_is_dirty(addr1)) {
3242 /* invalidate code */
3243 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3244 /* set dirty bit */
3245 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3246 (0xff & ~CODE_DIRTY_FLAG);
3247 }
3248 addr1 += l;
3249 access_len -= l;
3250 }
3251 }
3252 return;
3253 }
3254 if (is_write) {
3255 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3256 }
3257 qemu_free(bounce.buffer);
3258 bounce.buffer = NULL;
ba223c29 3259 cpu_notify_map_clients();
6d16c2f8 3260}
d0ecd2aa 3261
8df1cd07
FB
3262/* warning: addr must be aligned */
3263uint32_t ldl_phys(target_phys_addr_t addr)
3264{
3265 int io_index;
3266 uint8_t *ptr;
3267 uint32_t val;
3268 unsigned long pd;
3269 PhysPageDesc *p;
3270
3271 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3272 if (!p) {
3273 pd = IO_MEM_UNASSIGNED;
3274 } else {
3275 pd = p->phys_offset;
3276 }
3b46e624 3277
5fafdf24 3278 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3279 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3280 /* I/O case */
3281 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3282 if (p)
3283 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3284 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3285 } else {
3286 /* RAM case */
5fafdf24 3287 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3288 (addr & ~TARGET_PAGE_MASK);
3289 val = ldl_p(ptr);
3290 }
3291 return val;
3292}
3293
84b7b8e7
FB
3294/* warning: addr must be aligned */
3295uint64_t ldq_phys(target_phys_addr_t addr)
3296{
3297 int io_index;
3298 uint8_t *ptr;
3299 uint64_t val;
3300 unsigned long pd;
3301 PhysPageDesc *p;
3302
3303 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3304 if (!p) {
3305 pd = IO_MEM_UNASSIGNED;
3306 } else {
3307 pd = p->phys_offset;
3308 }
3b46e624 3309
2a4188a3
FB
3310 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3311 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3312 /* I/O case */
3313 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3314 if (p)
3315 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
3316#ifdef TARGET_WORDS_BIGENDIAN
3317 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3318 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3319#else
3320 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3321 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3322#endif
3323 } else {
3324 /* RAM case */
5fafdf24 3325 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3326 (addr & ~TARGET_PAGE_MASK);
3327 val = ldq_p(ptr);
3328 }
3329 return val;
3330}
3331
aab33094
FB
3332/* XXX: optimize */
3333uint32_t ldub_phys(target_phys_addr_t addr)
3334{
3335 uint8_t val;
3336 cpu_physical_memory_read(addr, &val, 1);
3337 return val;
3338}
3339
3340/* XXX: optimize */
3341uint32_t lduw_phys(target_phys_addr_t addr)
3342{
3343 uint16_t val;
3344 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3345 return tswap16(val);
3346}
3347
8df1cd07
FB
3348/* warning: addr must be aligned. The ram page is not masked as dirty
3349 and the code inside is not invalidated. It is useful if the dirty
3350 bits are used to track modified PTEs */
3351void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3352{
3353 int io_index;
3354 uint8_t *ptr;
3355 unsigned long pd;
3356 PhysPageDesc *p;
3357
3358 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3359 if (!p) {
3360 pd = IO_MEM_UNASSIGNED;
3361 } else {
3362 pd = p->phys_offset;
3363 }
3b46e624 3364
3a7d929e 3365 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3366 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3367 if (p)
3368 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3369 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3370 } else {
74576198
AL
3371 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3372 ptr = phys_ram_base + addr1;
8df1cd07 3373 stl_p(ptr, val);
74576198
AL
3374
3375 if (unlikely(in_migration)) {
3376 if (!cpu_physical_memory_is_dirty(addr1)) {
3377 /* invalidate code */
3378 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3379 /* set dirty bit */
3380 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3381 (0xff & ~CODE_DIRTY_FLAG);
3382 }
3383 }
8df1cd07
FB
3384 }
3385}
3386
bc98a7ef
JM
3387void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3388{
3389 int io_index;
3390 uint8_t *ptr;
3391 unsigned long pd;
3392 PhysPageDesc *p;
3393
3394 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3395 if (!p) {
3396 pd = IO_MEM_UNASSIGNED;
3397 } else {
3398 pd = p->phys_offset;
3399 }
3b46e624 3400
bc98a7ef
JM
3401 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3402 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3403 if (p)
3404 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
3405#ifdef TARGET_WORDS_BIGENDIAN
3406 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3407 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3408#else
3409 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3410 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3411#endif
3412 } else {
5fafdf24 3413 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3414 (addr & ~TARGET_PAGE_MASK);
3415 stq_p(ptr, val);
3416 }
3417}
3418
8df1cd07 3419/* warning: addr must be aligned */
8df1cd07
FB
3420void stl_phys(target_phys_addr_t addr, uint32_t val)
3421{
3422 int io_index;
3423 uint8_t *ptr;
3424 unsigned long pd;
3425 PhysPageDesc *p;
3426
3427 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3428 if (!p) {
3429 pd = IO_MEM_UNASSIGNED;
3430 } else {
3431 pd = p->phys_offset;
3432 }
3b46e624 3433
3a7d929e 3434 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3435 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3436 if (p)
3437 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3438 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3439 } else {
3440 unsigned long addr1;
3441 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3442 /* RAM case */
3443 ptr = phys_ram_base + addr1;
3444 stl_p(ptr, val);
3a7d929e
FB
3445 if (!cpu_physical_memory_is_dirty(addr1)) {
3446 /* invalidate code */
3447 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3448 /* set dirty bit */
f23db169
FB
3449 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3450 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3451 }
8df1cd07
FB
3452 }
3453}
3454
aab33094
FB
3455/* XXX: optimize */
3456void stb_phys(target_phys_addr_t addr, uint32_t val)
3457{
3458 uint8_t v = val;
3459 cpu_physical_memory_write(addr, &v, 1);
3460}
3461
3462/* XXX: optimize */
3463void stw_phys(target_phys_addr_t addr, uint32_t val)
3464{
3465 uint16_t v = tswap16(val);
3466 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3467}
3468
3469/* XXX: optimize */
3470void stq_phys(target_phys_addr_t addr, uint64_t val)
3471{
3472 val = tswap64(val);
3473 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3474}
3475
13eb76e0
FB
3476#endif
3477
5e2972fd 3478/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 3479int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3480 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3481{
3482 int l;
9b3c35e0
JM
3483 target_phys_addr_t phys_addr;
3484 target_ulong page;
13eb76e0
FB
3485
3486 while (len > 0) {
3487 page = addr & TARGET_PAGE_MASK;
3488 phys_addr = cpu_get_phys_page_debug(env, page);
3489 /* if no physical page mapped, return an error */
3490 if (phys_addr == -1)
3491 return -1;
3492 l = (page + TARGET_PAGE_SIZE) - addr;
3493 if (l > len)
3494 l = len;
5e2972fd
AL
3495 phys_addr += (addr & ~TARGET_PAGE_MASK);
3496#if !defined(CONFIG_USER_ONLY)
3497 if (is_write)
3498 cpu_physical_memory_write_rom(phys_addr, buf, l);
3499 else
3500#endif
3501 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
3502 len -= l;
3503 buf += l;
3504 addr += l;
3505 }
3506 return 0;
3507}
3508
2e70f6ef
PB
3509/* in deterministic execution mode, instructions doing device I/Os
3510 must be at the end of the TB */
3511void cpu_io_recompile(CPUState *env, void *retaddr)
3512{
3513 TranslationBlock *tb;
3514 uint32_t n, cflags;
3515 target_ulong pc, cs_base;
3516 uint64_t flags;
3517
3518 tb = tb_find_pc((unsigned long)retaddr);
3519 if (!tb) {
3520 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3521 retaddr);
3522 }
3523 n = env->icount_decr.u16.low + tb->icount;
3524 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3525 /* Calculate how many instructions had been executed before the fault
bf20dc07 3526 occurred. */
2e70f6ef
PB
3527 n = n - env->icount_decr.u16.low;
3528 /* Generate a new TB ending on the I/O insn. */
3529 n++;
3530 /* On MIPS and SH, delay slot instructions can only be restarted if
3531 they were already the first instruction in the TB. If this is not
bf20dc07 3532 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3533 branch. */
3534#if defined(TARGET_MIPS)
3535 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3536 env->active_tc.PC -= 4;
3537 env->icount_decr.u16.low++;
3538 env->hflags &= ~MIPS_HFLAG_BMASK;
3539 }
3540#elif defined(TARGET_SH4)
3541 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3542 && n > 1) {
3543 env->pc -= 2;
3544 env->icount_decr.u16.low++;
3545 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3546 }
3547#endif
3548 /* This should never happen. */
3549 if (n > CF_COUNT_MASK)
3550 cpu_abort(env, "TB too big during recompile");
3551
3552 cflags = n | CF_LAST_IO;
3553 pc = tb->pc;
3554 cs_base = tb->cs_base;
3555 flags = tb->flags;
3556 tb_phys_invalidate(tb, -1);
3557 /* FIXME: In theory this could raise an exception. In practice
3558 we have already translated the block once so it's probably ok. */
3559 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3560 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3561 the first in the TB) then we end up generating a whole new TB and
3562 repeating the fault, which is horribly inefficient.
3563 Better would be to execute just this insn uncached, or generate a
3564 second new TB. */
3565 cpu_resume_from_signal(env, NULL);
3566}
3567
e3db7226
FB
3568void dump_exec_info(FILE *f,
3569 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3570{
3571 int i, target_code_size, max_target_code_size;
3572 int direct_jmp_count, direct_jmp2_count, cross_page;
3573 TranslationBlock *tb;
3b46e624 3574
e3db7226
FB
3575 target_code_size = 0;
3576 max_target_code_size = 0;
3577 cross_page = 0;
3578 direct_jmp_count = 0;
3579 direct_jmp2_count = 0;
3580 for(i = 0; i < nb_tbs; i++) {
3581 tb = &tbs[i];
3582 target_code_size += tb->size;
3583 if (tb->size > max_target_code_size)
3584 max_target_code_size = tb->size;
3585 if (tb->page_addr[1] != -1)
3586 cross_page++;
3587 if (tb->tb_next_offset[0] != 0xffff) {
3588 direct_jmp_count++;
3589 if (tb->tb_next_offset[1] != 0xffff) {
3590 direct_jmp2_count++;
3591 }
3592 }
3593 }
3594 /* XXX: avoid using doubles ? */
57fec1fe 3595 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3596 cpu_fprintf(f, "gen code size %ld/%ld\n",
3597 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3598 cpu_fprintf(f, "TB count %d/%d\n",
3599 nb_tbs, code_gen_max_blocks);
5fafdf24 3600 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3601 nb_tbs ? target_code_size / nb_tbs : 0,
3602 max_target_code_size);
5fafdf24 3603 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3604 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3605 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3606 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3607 cross_page,
e3db7226
FB
3608 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3609 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3610 direct_jmp_count,
e3db7226
FB
3611 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3612 direct_jmp2_count,
3613 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3614 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3615 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3616 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3617 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3618 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3619}
3620
5fafdf24 3621#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3622
3623#define MMUSUFFIX _cmmu
3624#define GETPC() NULL
3625#define env cpu_single_env
b769d8fe 3626#define SOFTMMU_CODE_ACCESS
61382a50
FB
3627
3628#define SHIFT 0
3629#include "softmmu_template.h"
3630
3631#define SHIFT 1
3632#include "softmmu_template.h"
3633
3634#define SHIFT 2
3635#include "softmmu_template.h"
3636
3637#define SHIFT 3
3638#include "softmmu_template.h"
3639
3640#undef env
3641
3642#endif