]> git.proxmox.com Git - qemu.git/blame - exec.c
Use the DMA api to map virtio elements.
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
54936004 19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
b3c7724c 40#include "hw/hw.h"
74576198 41#include "osdep.h"
7ba1e619 42#include "kvm.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
45#endif
54936004 46
fd6ce8f6 47//#define DEBUG_TB_INVALIDATE
66e85a21 48//#define DEBUG_FLUSH
9fa3e853 49//#define DEBUG_TLB
67d3b957 50//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
51
52/* make various TB consistency checks */
5fafdf24
TS
53//#define DEBUG_TB_CHECK
54//#define DEBUG_TLB_CHECK
fd6ce8f6 55
1196be37 56//#define DEBUG_IOPORT
db7b5426 57//#define DEBUG_SUBPAGE
1196be37 58
99773bd4
PB
59#if !defined(CONFIG_USER_ONLY)
60/* TB consistency checks only implemented for usermode emulation. */
61#undef DEBUG_TB_CHECK
62#endif
63
9fa3e853
FB
64#define SMC_BITMAP_USE_THRESHOLD 10
65
108c49b8
FB
66#if defined(TARGET_SPARC64)
67#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
68#elif defined(TARGET_SPARC)
69#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
70#elif defined(TARGET_ALPHA)
71#define TARGET_PHYS_ADDR_SPACE_BITS 42
72#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
73#elif defined(TARGET_PPC64)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
75#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76#define TARGET_PHYS_ADDR_SPACE_BITS 42
77#elif defined(TARGET_I386) && !defined(USE_KQEMU)
78#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
79#else
80/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81#define TARGET_PHYS_ADDR_SPACE_BITS 32
82#endif
83
bdaf78e0 84static TranslationBlock *tbs;
26a5f13b 85int code_gen_max_blocks;
9fa3e853 86TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 87static int nb_tbs;
eb51d102
FB
88/* any access to the tbs or the page table must use this lock */
89spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 90
141ac468
BS
91#if defined(__arm__) || defined(__sparc_v9__)
92/* The prologue must be reachable with a direct jump. ARM and Sparc64
93 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
94 section close to code segment. */
95#define code_gen_section \
96 __attribute__((__section__(".gen_code"))) \
97 __attribute__((aligned (32)))
98#else
99#define code_gen_section \
100 __attribute__((aligned (32)))
101#endif
102
103uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
104static uint8_t *code_gen_buffer;
105static unsigned long code_gen_buffer_size;
26a5f13b 106/* threshold to flush the translated code buffer */
bdaf78e0 107static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
108uint8_t *code_gen_ptr;
109
e2eef170 110#if !defined(CONFIG_USER_ONLY)
00f82b8a 111ram_addr_t phys_ram_size;
9fa3e853
FB
112int phys_ram_fd;
113uint8_t *phys_ram_base;
1ccde1cb 114uint8_t *phys_ram_dirty;
74576198 115static int in_migration;
e9a1ab19 116static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 117#endif
9fa3e853 118
6a00d601
FB
119CPUState *first_cpu;
120/* current CPU in the current thread. It is only valid inside
121 cpu_exec() */
5fafdf24 122CPUState *cpu_single_env;
2e70f6ef 123/* 0 = Do not count executed instructions.
bf20dc07 124 1 = Precise instruction counting.
2e70f6ef
PB
125 2 = Adaptive rate instruction counting. */
126int use_icount = 0;
127/* Current instruction counter. While executing translated code this may
128 include some instructions that have not yet been executed. */
129int64_t qemu_icount;
6a00d601 130
54936004 131typedef struct PageDesc {
92e873b9 132 /* list of TBs intersecting this ram page */
fd6ce8f6 133 TranslationBlock *first_tb;
9fa3e853
FB
134 /* in order to optimize self modifying code, we count the number
135 of lookups we do to a given page to use a bitmap */
136 unsigned int code_write_count;
137 uint8_t *code_bitmap;
138#if defined(CONFIG_USER_ONLY)
139 unsigned long flags;
140#endif
54936004
FB
141} PageDesc;
142
92e873b9 143typedef struct PhysPageDesc {
0f459d16 144 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 145 ram_addr_t phys_offset;
8da3ff18 146 ram_addr_t region_offset;
92e873b9
FB
147} PhysPageDesc;
148
54936004 149#define L2_BITS 10
bedb69ea
JM
150#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
151/* XXX: this is a temporary hack for alpha target.
152 * In the future, this is to be replaced by a multi-level table
153 * to actually be able to handle the complete 64 bits address space.
154 */
155#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
156#else
03875444 157#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 158#endif
54936004
FB
159
160#define L1_SIZE (1 << L1_BITS)
161#define L2_SIZE (1 << L2_BITS)
162
83fb7adf
FB
163unsigned long qemu_real_host_page_size;
164unsigned long qemu_host_page_bits;
165unsigned long qemu_host_page_size;
166unsigned long qemu_host_page_mask;
54936004 167
92e873b9 168/* XXX: for system emulation, it could just be an array */
54936004 169static PageDesc *l1_map[L1_SIZE];
bdaf78e0 170static PhysPageDesc **l1_phys_map;
54936004 171
e2eef170
PB
172#if !defined(CONFIG_USER_ONLY)
173static void io_mem_init(void);
174
33417e70 175/* io memory support */
33417e70
FB
176CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
177CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 178void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 179static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
180static int io_mem_watch;
181#endif
33417e70 182
34865134 183/* log support */
d9b630fd 184static const char *logfilename = "/tmp/qemu.log";
34865134
FB
185FILE *logfile;
186int loglevel;
e735b91c 187static int log_append = 0;
34865134 188
e3db7226
FB
189/* statistics */
190static int tlb_flush_count;
191static int tb_flush_count;
192static int tb_phys_invalidate_count;
193
db7b5426
BS
194#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
195typedef struct subpage_t {
196 target_phys_addr_t base;
3ee89922
BS
197 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
198 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
199 void *opaque[TARGET_PAGE_SIZE][2][4];
8da3ff18 200 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
201} subpage_t;
202
7cb69cae
FB
203#ifdef _WIN32
204static void map_exec(void *addr, long size)
205{
206 DWORD old_protect;
207 VirtualProtect(addr, size,
208 PAGE_EXECUTE_READWRITE, &old_protect);
209
210}
211#else
212static void map_exec(void *addr, long size)
213{
4369415f 214 unsigned long start, end, page_size;
7cb69cae 215
4369415f 216 page_size = getpagesize();
7cb69cae 217 start = (unsigned long)addr;
4369415f 218 start &= ~(page_size - 1);
7cb69cae
FB
219
220 end = (unsigned long)addr + size;
4369415f
FB
221 end += page_size - 1;
222 end &= ~(page_size - 1);
7cb69cae
FB
223
224 mprotect((void *)start, end - start,
225 PROT_READ | PROT_WRITE | PROT_EXEC);
226}
227#endif
228
b346ff46 229static void page_init(void)
54936004 230{
83fb7adf 231 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 232 TARGET_PAGE_SIZE */
c2b48b69
AL
233#ifdef _WIN32
234 {
235 SYSTEM_INFO system_info;
236
237 GetSystemInfo(&system_info);
238 qemu_real_host_page_size = system_info.dwPageSize;
239 }
240#else
241 qemu_real_host_page_size = getpagesize();
242#endif
83fb7adf
FB
243 if (qemu_host_page_size == 0)
244 qemu_host_page_size = qemu_real_host_page_size;
245 if (qemu_host_page_size < TARGET_PAGE_SIZE)
246 qemu_host_page_size = TARGET_PAGE_SIZE;
247 qemu_host_page_bits = 0;
248 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
249 qemu_host_page_bits++;
250 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
251 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
252 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
253
254#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
255 {
256 long long startaddr, endaddr;
257 FILE *f;
258 int n;
259
c8a706fe 260 mmap_lock();
0776590d 261 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
262 f = fopen("/proc/self/maps", "r");
263 if (f) {
264 do {
265 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
266 if (n == 2) {
e0b8d65a
BS
267 startaddr = MIN(startaddr,
268 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
269 endaddr = MIN(endaddr,
270 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 271 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
272 TARGET_PAGE_ALIGN(endaddr),
273 PAGE_RESERVED);
274 }
275 } while (!feof(f));
276 fclose(f);
277 }
c8a706fe 278 mmap_unlock();
50a9569b
AZ
279 }
280#endif
54936004
FB
281}
282
434929bf 283static inline PageDesc **page_l1_map(target_ulong index)
54936004 284{
17e2377a
PB
285#if TARGET_LONG_BITS > 32
286 /* Host memory outside guest VM. For 32-bit targets we have already
287 excluded high addresses. */
d8173e0f 288 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
289 return NULL;
290#endif
434929bf
AL
291 return &l1_map[index >> L2_BITS];
292}
293
294static inline PageDesc *page_find_alloc(target_ulong index)
295{
296 PageDesc **lp, *p;
297 lp = page_l1_map(index);
298 if (!lp)
299 return NULL;
300
54936004
FB
301 p = *lp;
302 if (!p) {
303 /* allocate if not found */
17e2377a 304#if defined(CONFIG_USER_ONLY)
17e2377a
PB
305 size_t len = sizeof(PageDesc) * L2_SIZE;
306 /* Don't use qemu_malloc because it may recurse. */
307 p = mmap(0, len, PROT_READ | PROT_WRITE,
308 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 309 *lp = p;
fb1c2cd7
AJ
310 if (h2g_valid(p)) {
311 unsigned long addr = h2g(p);
17e2377a
PB
312 page_set_flags(addr & TARGET_PAGE_MASK,
313 TARGET_PAGE_ALIGN(addr + len),
314 PAGE_RESERVED);
315 }
316#else
317 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
318 *lp = p;
319#endif
54936004
FB
320 }
321 return p + (index & (L2_SIZE - 1));
322}
323
00f82b8a 324static inline PageDesc *page_find(target_ulong index)
54936004 325{
434929bf
AL
326 PageDesc **lp, *p;
327 lp = page_l1_map(index);
328 if (!lp)
329 return NULL;
54936004 330
434929bf 331 p = *lp;
54936004
FB
332 if (!p)
333 return 0;
fd6ce8f6
FB
334 return p + (index & (L2_SIZE - 1));
335}
336
108c49b8 337static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 338{
108c49b8 339 void **lp, **p;
e3f4e2a4 340 PhysPageDesc *pd;
92e873b9 341
108c49b8
FB
342 p = (void **)l1_phys_map;
343#if TARGET_PHYS_ADDR_SPACE_BITS > 32
344
345#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
346#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
347#endif
348 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
349 p = *lp;
350 if (!p) {
351 /* allocate if not found */
108c49b8
FB
352 if (!alloc)
353 return NULL;
354 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
355 memset(p, 0, sizeof(void *) * L1_SIZE);
356 *lp = p;
357 }
358#endif
359 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
360 pd = *lp;
361 if (!pd) {
362 int i;
108c49b8
FB
363 /* allocate if not found */
364 if (!alloc)
365 return NULL;
e3f4e2a4
PB
366 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
367 *lp = pd;
67c4d23c 368 for (i = 0; i < L2_SIZE; i++) {
e3f4e2a4 369 pd[i].phys_offset = IO_MEM_UNASSIGNED;
67c4d23c
PB
370 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
371 }
92e873b9 372 }
e3f4e2a4 373 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
374}
375
108c49b8 376static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 377{
108c49b8 378 return phys_page_find_alloc(index, 0);
92e873b9
FB
379}
380
9fa3e853 381#if !defined(CONFIG_USER_ONLY)
6a00d601 382static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 383static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 384 target_ulong vaddr);
c8a706fe
PB
385#define mmap_lock() do { } while(0)
386#define mmap_unlock() do { } while(0)
9fa3e853 387#endif
fd6ce8f6 388
4369415f
FB
389#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
390
391#if defined(CONFIG_USER_ONLY)
392/* Currently it is not recommanded to allocate big chunks of data in
393 user mode. It will change when a dedicated libc will be used */
394#define USE_STATIC_CODE_GEN_BUFFER
395#endif
396
397#ifdef USE_STATIC_CODE_GEN_BUFFER
398static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
399#endif
400
8fcd3692 401static void code_gen_alloc(unsigned long tb_size)
26a5f13b 402{
4369415f
FB
403#ifdef USE_STATIC_CODE_GEN_BUFFER
404 code_gen_buffer = static_code_gen_buffer;
405 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
406 map_exec(code_gen_buffer, code_gen_buffer_size);
407#else
26a5f13b
FB
408 code_gen_buffer_size = tb_size;
409 if (code_gen_buffer_size == 0) {
4369415f
FB
410#if defined(CONFIG_USER_ONLY)
411 /* in user mode, phys_ram_size is not meaningful */
412 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
413#else
26a5f13b 414 /* XXX: needs ajustments */
174a9a1f 415 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
4369415f 416#endif
26a5f13b
FB
417 }
418 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
419 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
420 /* The code gen buffer location may have constraints depending on
421 the host cpu and OS */
422#if defined(__linux__)
423 {
424 int flags;
141ac468
BS
425 void *start = NULL;
426
26a5f13b
FB
427 flags = MAP_PRIVATE | MAP_ANONYMOUS;
428#if defined(__x86_64__)
429 flags |= MAP_32BIT;
430 /* Cannot map more than that */
431 if (code_gen_buffer_size > (800 * 1024 * 1024))
432 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
433#elif defined(__sparc_v9__)
434 // Map the buffer below 2G, so we can use direct calls and branches
435 flags |= MAP_FIXED;
436 start = (void *) 0x60000000UL;
437 if (code_gen_buffer_size > (512 * 1024 * 1024))
438 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 439#elif defined(__arm__)
63d41246 440 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
441 flags |= MAP_FIXED;
442 start = (void *) 0x01000000UL;
443 if (code_gen_buffer_size > 16 * 1024 * 1024)
444 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 445#endif
141ac468
BS
446 code_gen_buffer = mmap(start, code_gen_buffer_size,
447 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
448 flags, -1, 0);
449 if (code_gen_buffer == MAP_FAILED) {
450 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
451 exit(1);
452 }
453 }
c5e97233 454#elif defined(__FreeBSD__) || defined(__DragonFly__)
06e67a82
AL
455 {
456 int flags;
457 void *addr = NULL;
458 flags = MAP_PRIVATE | MAP_ANONYMOUS;
459#if defined(__x86_64__)
460 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
461 * 0x40000000 is free */
462 flags |= MAP_FIXED;
463 addr = (void *)0x40000000;
464 /* Cannot map more than that */
465 if (code_gen_buffer_size > (800 * 1024 * 1024))
466 code_gen_buffer_size = (800 * 1024 * 1024);
467#endif
468 code_gen_buffer = mmap(addr, code_gen_buffer_size,
469 PROT_WRITE | PROT_READ | PROT_EXEC,
470 flags, -1, 0);
471 if (code_gen_buffer == MAP_FAILED) {
472 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
473 exit(1);
474 }
475 }
26a5f13b
FB
476#else
477 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
478 map_exec(code_gen_buffer, code_gen_buffer_size);
479#endif
4369415f 480#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
481 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
482 code_gen_buffer_max_size = code_gen_buffer_size -
483 code_gen_max_block_size();
484 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
485 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
486}
487
488/* Must be called before using the QEMU cpus. 'tb_size' is the size
489 (in bytes) allocated to the translation buffer. Zero means default
490 size. */
491void cpu_exec_init_all(unsigned long tb_size)
492{
26a5f13b
FB
493 cpu_gen_init();
494 code_gen_alloc(tb_size);
495 code_gen_ptr = code_gen_buffer;
4369415f 496 page_init();
e2eef170 497#if !defined(CONFIG_USER_ONLY)
26a5f13b 498 io_mem_init();
e2eef170 499#endif
26a5f13b
FB
500}
501
9656f324
PB
502#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
503
504#define CPU_COMMON_SAVE_VERSION 1
505
506static void cpu_common_save(QEMUFile *f, void *opaque)
507{
508 CPUState *env = opaque;
509
510 qemu_put_be32s(f, &env->halted);
511 qemu_put_be32s(f, &env->interrupt_request);
512}
513
514static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
515{
516 CPUState *env = opaque;
517
518 if (version_id != CPU_COMMON_SAVE_VERSION)
519 return -EINVAL;
520
521 qemu_get_be32s(f, &env->halted);
75f482ae 522 qemu_get_be32s(f, &env->interrupt_request);
3098dba0
AJ
523 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
524 version_id is increased. */
525 env->interrupt_request &= ~0x01;
9656f324
PB
526 tlb_flush(env, 1);
527
528 return 0;
529}
530#endif
531
6a00d601 532void cpu_exec_init(CPUState *env)
fd6ce8f6 533{
6a00d601
FB
534 CPUState **penv;
535 int cpu_index;
536
c2764719
PB
537#if defined(CONFIG_USER_ONLY)
538 cpu_list_lock();
539#endif
6a00d601
FB
540 env->next_cpu = NULL;
541 penv = &first_cpu;
542 cpu_index = 0;
543 while (*penv != NULL) {
544 penv = (CPUState **)&(*penv)->next_cpu;
545 cpu_index++;
546 }
547 env->cpu_index = cpu_index;
c0ce998e
AL
548 TAILQ_INIT(&env->breakpoints);
549 TAILQ_INIT(&env->watchpoints);
6a00d601 550 *penv = env;
c2764719
PB
551#if defined(CONFIG_USER_ONLY)
552 cpu_list_unlock();
553#endif
b3c7724c 554#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
555 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
556 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
557 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
558 cpu_save, cpu_load, env);
559#endif
fd6ce8f6
FB
560}
561
9fa3e853
FB
562static inline void invalidate_page_bitmap(PageDesc *p)
563{
564 if (p->code_bitmap) {
59817ccb 565 qemu_free(p->code_bitmap);
9fa3e853
FB
566 p->code_bitmap = NULL;
567 }
568 p->code_write_count = 0;
569}
570
fd6ce8f6
FB
571/* set to NULL all the 'first_tb' fields in all PageDescs */
572static void page_flush_tb(void)
573{
574 int i, j;
575 PageDesc *p;
576
577 for(i = 0; i < L1_SIZE; i++) {
578 p = l1_map[i];
579 if (p) {
9fa3e853
FB
580 for(j = 0; j < L2_SIZE; j++) {
581 p->first_tb = NULL;
582 invalidate_page_bitmap(p);
583 p++;
584 }
fd6ce8f6
FB
585 }
586 }
587}
588
589/* flush all the translation blocks */
d4e8164f 590/* XXX: tb_flush is currently not thread safe */
6a00d601 591void tb_flush(CPUState *env1)
fd6ce8f6 592{
6a00d601 593 CPUState *env;
0124311e 594#if defined(DEBUG_FLUSH)
ab3d1727
BS
595 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
596 (unsigned long)(code_gen_ptr - code_gen_buffer),
597 nb_tbs, nb_tbs > 0 ?
598 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 599#endif
26a5f13b 600 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
601 cpu_abort(env1, "Internal error: code buffer overflow\n");
602
fd6ce8f6 603 nb_tbs = 0;
3b46e624 604
6a00d601
FB
605 for(env = first_cpu; env != NULL; env = env->next_cpu) {
606 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
607 }
9fa3e853 608
8a8a608f 609 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 610 page_flush_tb();
9fa3e853 611
fd6ce8f6 612 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
613 /* XXX: flush processor icache at this point if cache flush is
614 expensive */
e3db7226 615 tb_flush_count++;
fd6ce8f6
FB
616}
617
618#ifdef DEBUG_TB_CHECK
619
bc98a7ef 620static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
621{
622 TranslationBlock *tb;
623 int i;
624 address &= TARGET_PAGE_MASK;
99773bd4
PB
625 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
626 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
627 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
628 address >= tb->pc + tb->size)) {
629 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 630 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
631 }
632 }
633 }
634}
635
636/* verify that all the pages have correct rights for code */
637static void tb_page_check(void)
638{
639 TranslationBlock *tb;
640 int i, flags1, flags2;
3b46e624 641
99773bd4
PB
642 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
643 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
644 flags1 = page_get_flags(tb->pc);
645 flags2 = page_get_flags(tb->pc + tb->size - 1);
646 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
647 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 648 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
649 }
650 }
651 }
652}
653
bdaf78e0 654static void tb_jmp_check(TranslationBlock *tb)
d4e8164f
FB
655{
656 TranslationBlock *tb1;
657 unsigned int n1;
658
659 /* suppress any remaining jumps to this TB */
660 tb1 = tb->jmp_first;
661 for(;;) {
662 n1 = (long)tb1 & 3;
663 tb1 = (TranslationBlock *)((long)tb1 & ~3);
664 if (n1 == 2)
665 break;
666 tb1 = tb1->jmp_next[n1];
667 }
668 /* check end of list */
669 if (tb1 != tb) {
670 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
671 }
672}
673
fd6ce8f6
FB
674#endif
675
676/* invalidate one TB */
677static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
678 int next_offset)
679{
680 TranslationBlock *tb1;
681 for(;;) {
682 tb1 = *ptb;
683 if (tb1 == tb) {
684 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
685 break;
686 }
687 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
688 }
689}
690
9fa3e853
FB
691static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
692{
693 TranslationBlock *tb1;
694 unsigned int n1;
695
696 for(;;) {
697 tb1 = *ptb;
698 n1 = (long)tb1 & 3;
699 tb1 = (TranslationBlock *)((long)tb1 & ~3);
700 if (tb1 == tb) {
701 *ptb = tb1->page_next[n1];
702 break;
703 }
704 ptb = &tb1->page_next[n1];
705 }
706}
707
d4e8164f
FB
708static inline void tb_jmp_remove(TranslationBlock *tb, int n)
709{
710 TranslationBlock *tb1, **ptb;
711 unsigned int n1;
712
713 ptb = &tb->jmp_next[n];
714 tb1 = *ptb;
715 if (tb1) {
716 /* find tb(n) in circular list */
717 for(;;) {
718 tb1 = *ptb;
719 n1 = (long)tb1 & 3;
720 tb1 = (TranslationBlock *)((long)tb1 & ~3);
721 if (n1 == n && tb1 == tb)
722 break;
723 if (n1 == 2) {
724 ptb = &tb1->jmp_first;
725 } else {
726 ptb = &tb1->jmp_next[n1];
727 }
728 }
729 /* now we can suppress tb(n) from the list */
730 *ptb = tb->jmp_next[n];
731
732 tb->jmp_next[n] = NULL;
733 }
734}
735
736/* reset the jump entry 'n' of a TB so that it is not chained to
737 another TB */
738static inline void tb_reset_jump(TranslationBlock *tb, int n)
739{
740 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
741}
742
2e70f6ef 743void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 744{
6a00d601 745 CPUState *env;
8a40a180 746 PageDesc *p;
d4e8164f 747 unsigned int h, n1;
00f82b8a 748 target_phys_addr_t phys_pc;
8a40a180 749 TranslationBlock *tb1, *tb2;
3b46e624 750
8a40a180
FB
751 /* remove the TB from the hash list */
752 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
753 h = tb_phys_hash_func(phys_pc);
5fafdf24 754 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
755 offsetof(TranslationBlock, phys_hash_next));
756
757 /* remove the TB from the page list */
758 if (tb->page_addr[0] != page_addr) {
759 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
760 tb_page_remove(&p->first_tb, tb);
761 invalidate_page_bitmap(p);
762 }
763 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
764 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
765 tb_page_remove(&p->first_tb, tb);
766 invalidate_page_bitmap(p);
767 }
768
36bdbe54 769 tb_invalidated_flag = 1;
59817ccb 770
fd6ce8f6 771 /* remove the TB from the hash list */
8a40a180 772 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
773 for(env = first_cpu; env != NULL; env = env->next_cpu) {
774 if (env->tb_jmp_cache[h] == tb)
775 env->tb_jmp_cache[h] = NULL;
776 }
d4e8164f
FB
777
778 /* suppress this TB from the two jump lists */
779 tb_jmp_remove(tb, 0);
780 tb_jmp_remove(tb, 1);
781
782 /* suppress any remaining jumps to this TB */
783 tb1 = tb->jmp_first;
784 for(;;) {
785 n1 = (long)tb1 & 3;
786 if (n1 == 2)
787 break;
788 tb1 = (TranslationBlock *)((long)tb1 & ~3);
789 tb2 = tb1->jmp_next[n1];
790 tb_reset_jump(tb1, n1);
791 tb1->jmp_next[n1] = NULL;
792 tb1 = tb2;
793 }
794 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 795
e3db7226 796 tb_phys_invalidate_count++;
9fa3e853
FB
797}
798
799static inline void set_bits(uint8_t *tab, int start, int len)
800{
801 int end, mask, end1;
802
803 end = start + len;
804 tab += start >> 3;
805 mask = 0xff << (start & 7);
806 if ((start & ~7) == (end & ~7)) {
807 if (start < end) {
808 mask &= ~(0xff << (end & 7));
809 *tab |= mask;
810 }
811 } else {
812 *tab++ |= mask;
813 start = (start + 8) & ~7;
814 end1 = end & ~7;
815 while (start < end1) {
816 *tab++ = 0xff;
817 start += 8;
818 }
819 if (start < end) {
820 mask = ~(0xff << (end & 7));
821 *tab |= mask;
822 }
823 }
824}
825
826static void build_page_bitmap(PageDesc *p)
827{
828 int n, tb_start, tb_end;
829 TranslationBlock *tb;
3b46e624 830
b2a7081a 831 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
832
833 tb = p->first_tb;
834 while (tb != NULL) {
835 n = (long)tb & 3;
836 tb = (TranslationBlock *)((long)tb & ~3);
837 /* NOTE: this is subtle as a TB may span two physical pages */
838 if (n == 0) {
839 /* NOTE: tb_end may be after the end of the page, but
840 it is not a problem */
841 tb_start = tb->pc & ~TARGET_PAGE_MASK;
842 tb_end = tb_start + tb->size;
843 if (tb_end > TARGET_PAGE_SIZE)
844 tb_end = TARGET_PAGE_SIZE;
845 } else {
846 tb_start = 0;
847 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
848 }
849 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
850 tb = tb->page_next[n];
851 }
852}
853
2e70f6ef
PB
854TranslationBlock *tb_gen_code(CPUState *env,
855 target_ulong pc, target_ulong cs_base,
856 int flags, int cflags)
d720b93d
FB
857{
858 TranslationBlock *tb;
859 uint8_t *tc_ptr;
860 target_ulong phys_pc, phys_page2, virt_page2;
861 int code_gen_size;
862
c27004ec
FB
863 phys_pc = get_phys_addr_code(env, pc);
864 tb = tb_alloc(pc);
d720b93d
FB
865 if (!tb) {
866 /* flush must be done */
867 tb_flush(env);
868 /* cannot fail at this point */
c27004ec 869 tb = tb_alloc(pc);
2e70f6ef
PB
870 /* Don't forget to invalidate previous TB info. */
871 tb_invalidated_flag = 1;
d720b93d
FB
872 }
873 tc_ptr = code_gen_ptr;
874 tb->tc_ptr = tc_ptr;
875 tb->cs_base = cs_base;
876 tb->flags = flags;
877 tb->cflags = cflags;
d07bde88 878 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 879 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 880
d720b93d 881 /* check next page if needed */
c27004ec 882 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 883 phys_page2 = -1;
c27004ec 884 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
885 phys_page2 = get_phys_addr_code(env, virt_page2);
886 }
887 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 888 return tb;
d720b93d 889}
3b46e624 890
9fa3e853
FB
891/* invalidate all TBs which intersect with the target physical page
892 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
893 the same physical page. 'is_cpu_write_access' should be true if called
894 from a real cpu write access: the virtual CPU will exit the current
895 TB if code is modified inside this TB. */
00f82b8a 896void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
897 int is_cpu_write_access)
898{
6b917547 899 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 900 CPUState *env = cpu_single_env;
9fa3e853 901 target_ulong tb_start, tb_end;
6b917547
AL
902 PageDesc *p;
903 int n;
904#ifdef TARGET_HAS_PRECISE_SMC
905 int current_tb_not_found = is_cpu_write_access;
906 TranslationBlock *current_tb = NULL;
907 int current_tb_modified = 0;
908 target_ulong current_pc = 0;
909 target_ulong current_cs_base = 0;
910 int current_flags = 0;
911#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
912
913 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 914 if (!p)
9fa3e853 915 return;
5fafdf24 916 if (!p->code_bitmap &&
d720b93d
FB
917 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
918 is_cpu_write_access) {
9fa3e853
FB
919 /* build code bitmap */
920 build_page_bitmap(p);
921 }
922
923 /* we remove all the TBs in the range [start, end[ */
924 /* XXX: see if in some cases it could be faster to invalidate all the code */
925 tb = p->first_tb;
926 while (tb != NULL) {
927 n = (long)tb & 3;
928 tb = (TranslationBlock *)((long)tb & ~3);
929 tb_next = tb->page_next[n];
930 /* NOTE: this is subtle as a TB may span two physical pages */
931 if (n == 0) {
932 /* NOTE: tb_end may be after the end of the page, but
933 it is not a problem */
934 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
935 tb_end = tb_start + tb->size;
936 } else {
937 tb_start = tb->page_addr[1];
938 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
939 }
940 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
941#ifdef TARGET_HAS_PRECISE_SMC
942 if (current_tb_not_found) {
943 current_tb_not_found = 0;
944 current_tb = NULL;
2e70f6ef 945 if (env->mem_io_pc) {
d720b93d 946 /* now we have a real cpu fault */
2e70f6ef 947 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
948 }
949 }
950 if (current_tb == tb &&
2e70f6ef 951 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
952 /* If we are modifying the current TB, we must stop
953 its execution. We could be more precise by checking
954 that the modification is after the current PC, but it
955 would require a specialized function to partially
956 restore the CPU state */
3b46e624 957
d720b93d 958 current_tb_modified = 1;
5fafdf24 959 cpu_restore_state(current_tb, env,
2e70f6ef 960 env->mem_io_pc, NULL);
6b917547
AL
961 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
962 &current_flags);
d720b93d
FB
963 }
964#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
965 /* we need to do that to handle the case where a signal
966 occurs while doing tb_phys_invalidate() */
967 saved_tb = NULL;
968 if (env) {
969 saved_tb = env->current_tb;
970 env->current_tb = NULL;
971 }
9fa3e853 972 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
973 if (env) {
974 env->current_tb = saved_tb;
975 if (env->interrupt_request && env->current_tb)
976 cpu_interrupt(env, env->interrupt_request);
977 }
9fa3e853
FB
978 }
979 tb = tb_next;
980 }
981#if !defined(CONFIG_USER_ONLY)
982 /* if no code remaining, no need to continue to use slow writes */
983 if (!p->first_tb) {
984 invalidate_page_bitmap(p);
d720b93d 985 if (is_cpu_write_access) {
2e70f6ef 986 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
987 }
988 }
989#endif
990#ifdef TARGET_HAS_PRECISE_SMC
991 if (current_tb_modified) {
992 /* we generate a block containing just the instruction
993 modifying the memory. It will ensure that it cannot modify
994 itself */
ea1c1802 995 env->current_tb = NULL;
2e70f6ef 996 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 997 cpu_resume_from_signal(env, NULL);
9fa3e853 998 }
fd6ce8f6 999#endif
9fa3e853 1000}
fd6ce8f6 1001
9fa3e853 1002/* len must be <= 8 and start must be a multiple of len */
00f82b8a 1003static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
1004{
1005 PageDesc *p;
1006 int offset, b;
59817ccb 1007#if 0
a4193c8a 1008 if (1) {
93fcfe39
AL
1009 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1010 cpu_single_env->mem_io_vaddr, len,
1011 cpu_single_env->eip,
1012 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1013 }
1014#endif
9fa3e853 1015 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1016 if (!p)
9fa3e853
FB
1017 return;
1018 if (p->code_bitmap) {
1019 offset = start & ~TARGET_PAGE_MASK;
1020 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1021 if (b & ((1 << len) - 1))
1022 goto do_invalidate;
1023 } else {
1024 do_invalidate:
d720b93d 1025 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1026 }
1027}
1028
9fa3e853 1029#if !defined(CONFIG_SOFTMMU)
00f82b8a 1030static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1031 unsigned long pc, void *puc)
9fa3e853 1032{
6b917547 1033 TranslationBlock *tb;
9fa3e853 1034 PageDesc *p;
6b917547 1035 int n;
d720b93d 1036#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1037 TranslationBlock *current_tb = NULL;
d720b93d 1038 CPUState *env = cpu_single_env;
6b917547
AL
1039 int current_tb_modified = 0;
1040 target_ulong current_pc = 0;
1041 target_ulong current_cs_base = 0;
1042 int current_flags = 0;
d720b93d 1043#endif
9fa3e853
FB
1044
1045 addr &= TARGET_PAGE_MASK;
1046 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1047 if (!p)
9fa3e853
FB
1048 return;
1049 tb = p->first_tb;
d720b93d
FB
1050#ifdef TARGET_HAS_PRECISE_SMC
1051 if (tb && pc != 0) {
1052 current_tb = tb_find_pc(pc);
1053 }
1054#endif
9fa3e853
FB
1055 while (tb != NULL) {
1056 n = (long)tb & 3;
1057 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1058#ifdef TARGET_HAS_PRECISE_SMC
1059 if (current_tb == tb &&
2e70f6ef 1060 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1061 /* If we are modifying the current TB, we must stop
1062 its execution. We could be more precise by checking
1063 that the modification is after the current PC, but it
1064 would require a specialized function to partially
1065 restore the CPU state */
3b46e624 1066
d720b93d
FB
1067 current_tb_modified = 1;
1068 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1069 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1070 &current_flags);
d720b93d
FB
1071 }
1072#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1073 tb_phys_invalidate(tb, addr);
1074 tb = tb->page_next[n];
1075 }
fd6ce8f6 1076 p->first_tb = NULL;
d720b93d
FB
1077#ifdef TARGET_HAS_PRECISE_SMC
1078 if (current_tb_modified) {
1079 /* we generate a block containing just the instruction
1080 modifying the memory. It will ensure that it cannot modify
1081 itself */
ea1c1802 1082 env->current_tb = NULL;
2e70f6ef 1083 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1084 cpu_resume_from_signal(env, puc);
1085 }
1086#endif
fd6ce8f6 1087}
9fa3e853 1088#endif
fd6ce8f6
FB
1089
1090/* add the tb in the target page and protect it if necessary */
5fafdf24 1091static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1092 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1093{
1094 PageDesc *p;
9fa3e853
FB
1095 TranslationBlock *last_first_tb;
1096
1097 tb->page_addr[n] = page_addr;
3a7d929e 1098 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1099 tb->page_next[n] = p->first_tb;
1100 last_first_tb = p->first_tb;
1101 p->first_tb = (TranslationBlock *)((long)tb | n);
1102 invalidate_page_bitmap(p);
fd6ce8f6 1103
107db443 1104#if defined(TARGET_HAS_SMC) || 1
d720b93d 1105
9fa3e853 1106#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1107 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1108 target_ulong addr;
1109 PageDesc *p2;
9fa3e853
FB
1110 int prot;
1111
fd6ce8f6
FB
1112 /* force the host page as non writable (writes will have a
1113 page fault + mprotect overhead) */
53a5960a 1114 page_addr &= qemu_host_page_mask;
fd6ce8f6 1115 prot = 0;
53a5960a
PB
1116 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1117 addr += TARGET_PAGE_SIZE) {
1118
1119 p2 = page_find (addr >> TARGET_PAGE_BITS);
1120 if (!p2)
1121 continue;
1122 prot |= p2->flags;
1123 p2->flags &= ~PAGE_WRITE;
1124 page_get_flags(addr);
1125 }
5fafdf24 1126 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1127 (prot & PAGE_BITS) & ~PAGE_WRITE);
1128#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1129 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1130 page_addr);
fd6ce8f6 1131#endif
fd6ce8f6 1132 }
9fa3e853
FB
1133#else
1134 /* if some code is already present, then the pages are already
1135 protected. So we handle the case where only the first TB is
1136 allocated in a physical page */
1137 if (!last_first_tb) {
6a00d601 1138 tlb_protect_code(page_addr);
9fa3e853
FB
1139 }
1140#endif
d720b93d
FB
1141
1142#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1143}
1144
1145/* Allocate a new translation block. Flush the translation buffer if
1146 too many translation blocks or too much generated code. */
c27004ec 1147TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1148{
1149 TranslationBlock *tb;
fd6ce8f6 1150
26a5f13b
FB
1151 if (nb_tbs >= code_gen_max_blocks ||
1152 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1153 return NULL;
fd6ce8f6
FB
1154 tb = &tbs[nb_tbs++];
1155 tb->pc = pc;
b448f2f3 1156 tb->cflags = 0;
d4e8164f
FB
1157 return tb;
1158}
1159
2e70f6ef
PB
1160void tb_free(TranslationBlock *tb)
1161{
bf20dc07 1162 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1163 Ignore the hard cases and just back up if this TB happens to
1164 be the last one generated. */
1165 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1166 code_gen_ptr = tb->tc_ptr;
1167 nb_tbs--;
1168 }
1169}
1170
9fa3e853
FB
1171/* add a new TB and link it to the physical page tables. phys_page2 is
1172 (-1) to indicate that only one page contains the TB. */
5fafdf24 1173void tb_link_phys(TranslationBlock *tb,
9fa3e853 1174 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1175{
9fa3e853
FB
1176 unsigned int h;
1177 TranslationBlock **ptb;
1178
c8a706fe
PB
1179 /* Grab the mmap lock to stop another thread invalidating this TB
1180 before we are done. */
1181 mmap_lock();
9fa3e853
FB
1182 /* add in the physical hash table */
1183 h = tb_phys_hash_func(phys_pc);
1184 ptb = &tb_phys_hash[h];
1185 tb->phys_hash_next = *ptb;
1186 *ptb = tb;
fd6ce8f6
FB
1187
1188 /* add in the page list */
9fa3e853
FB
1189 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1190 if (phys_page2 != -1)
1191 tb_alloc_page(tb, 1, phys_page2);
1192 else
1193 tb->page_addr[1] = -1;
9fa3e853 1194
d4e8164f
FB
1195 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1196 tb->jmp_next[0] = NULL;
1197 tb->jmp_next[1] = NULL;
1198
1199 /* init original jump addresses */
1200 if (tb->tb_next_offset[0] != 0xffff)
1201 tb_reset_jump(tb, 0);
1202 if (tb->tb_next_offset[1] != 0xffff)
1203 tb_reset_jump(tb, 1);
8a40a180
FB
1204
1205#ifdef DEBUG_TB_CHECK
1206 tb_page_check();
1207#endif
c8a706fe 1208 mmap_unlock();
fd6ce8f6
FB
1209}
1210
9fa3e853
FB
1211/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1212 tb[1].tc_ptr. Return NULL if not found */
1213TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1214{
9fa3e853
FB
1215 int m_min, m_max, m;
1216 unsigned long v;
1217 TranslationBlock *tb;
a513fe19
FB
1218
1219 if (nb_tbs <= 0)
1220 return NULL;
1221 if (tc_ptr < (unsigned long)code_gen_buffer ||
1222 tc_ptr >= (unsigned long)code_gen_ptr)
1223 return NULL;
1224 /* binary search (cf Knuth) */
1225 m_min = 0;
1226 m_max = nb_tbs - 1;
1227 while (m_min <= m_max) {
1228 m = (m_min + m_max) >> 1;
1229 tb = &tbs[m];
1230 v = (unsigned long)tb->tc_ptr;
1231 if (v == tc_ptr)
1232 return tb;
1233 else if (tc_ptr < v) {
1234 m_max = m - 1;
1235 } else {
1236 m_min = m + 1;
1237 }
5fafdf24 1238 }
a513fe19
FB
1239 return &tbs[m_max];
1240}
7501267e 1241
ea041c0e
FB
1242static void tb_reset_jump_recursive(TranslationBlock *tb);
1243
1244static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1245{
1246 TranslationBlock *tb1, *tb_next, **ptb;
1247 unsigned int n1;
1248
1249 tb1 = tb->jmp_next[n];
1250 if (tb1 != NULL) {
1251 /* find head of list */
1252 for(;;) {
1253 n1 = (long)tb1 & 3;
1254 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1255 if (n1 == 2)
1256 break;
1257 tb1 = tb1->jmp_next[n1];
1258 }
1259 /* we are now sure now that tb jumps to tb1 */
1260 tb_next = tb1;
1261
1262 /* remove tb from the jmp_first list */
1263 ptb = &tb_next->jmp_first;
1264 for(;;) {
1265 tb1 = *ptb;
1266 n1 = (long)tb1 & 3;
1267 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1268 if (n1 == n && tb1 == tb)
1269 break;
1270 ptb = &tb1->jmp_next[n1];
1271 }
1272 *ptb = tb->jmp_next[n];
1273 tb->jmp_next[n] = NULL;
3b46e624 1274
ea041c0e
FB
1275 /* suppress the jump to next tb in generated code */
1276 tb_reset_jump(tb, n);
1277
0124311e 1278 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1279 tb_reset_jump_recursive(tb_next);
1280 }
1281}
1282
1283static void tb_reset_jump_recursive(TranslationBlock *tb)
1284{
1285 tb_reset_jump_recursive2(tb, 0);
1286 tb_reset_jump_recursive2(tb, 1);
1287}
1288
1fddef4b 1289#if defined(TARGET_HAS_ICE)
d720b93d
FB
1290static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1291{
9b3c35e0
JM
1292 target_phys_addr_t addr;
1293 target_ulong pd;
c2f07f81
PB
1294 ram_addr_t ram_addr;
1295 PhysPageDesc *p;
d720b93d 1296
c2f07f81
PB
1297 addr = cpu_get_phys_page_debug(env, pc);
1298 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1299 if (!p) {
1300 pd = IO_MEM_UNASSIGNED;
1301 } else {
1302 pd = p->phys_offset;
1303 }
1304 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1305 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1306}
c27004ec 1307#endif
d720b93d 1308
6658ffb8 1309/* Add a watchpoint. */
a1d1bb31
AL
1310int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1311 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1312{
b4051334 1313 target_ulong len_mask = ~(len - 1);
c0ce998e 1314 CPUWatchpoint *wp;
6658ffb8 1315
b4051334
AL
1316 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1317 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1318 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1319 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1320 return -EINVAL;
1321 }
a1d1bb31 1322 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1323
1324 wp->vaddr = addr;
b4051334 1325 wp->len_mask = len_mask;
a1d1bb31
AL
1326 wp->flags = flags;
1327
2dc9f411 1328 /* keep all GDB-injected watchpoints in front */
c0ce998e
AL
1329 if (flags & BP_GDB)
1330 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1331 else
1332 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1333
6658ffb8 1334 tlb_flush_page(env, addr);
a1d1bb31
AL
1335
1336 if (watchpoint)
1337 *watchpoint = wp;
1338 return 0;
6658ffb8
PB
1339}
1340
a1d1bb31
AL
1341/* Remove a specific watchpoint. */
1342int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1343 int flags)
6658ffb8 1344{
b4051334 1345 target_ulong len_mask = ~(len - 1);
a1d1bb31 1346 CPUWatchpoint *wp;
6658ffb8 1347
c0ce998e 1348 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1349 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1350 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1351 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1352 return 0;
1353 }
1354 }
a1d1bb31 1355 return -ENOENT;
6658ffb8
PB
1356}
1357
a1d1bb31
AL
1358/* Remove a specific watchpoint by reference. */
1359void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1360{
c0ce998e 1361 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1362
a1d1bb31
AL
1363 tlb_flush_page(env, watchpoint->vaddr);
1364
1365 qemu_free(watchpoint);
1366}
1367
1368/* Remove all matching watchpoints. */
1369void cpu_watchpoint_remove_all(CPUState *env, int mask)
1370{
c0ce998e 1371 CPUWatchpoint *wp, *next;
a1d1bb31 1372
c0ce998e 1373 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1374 if (wp->flags & mask)
1375 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1376 }
7d03f82f
EI
1377}
1378
a1d1bb31
AL
1379/* Add a breakpoint. */
1380int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1381 CPUBreakpoint **breakpoint)
4c3a88a2 1382{
1fddef4b 1383#if defined(TARGET_HAS_ICE)
c0ce998e 1384 CPUBreakpoint *bp;
3b46e624 1385
a1d1bb31 1386 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1387
a1d1bb31
AL
1388 bp->pc = pc;
1389 bp->flags = flags;
1390
2dc9f411 1391 /* keep all GDB-injected breakpoints in front */
c0ce998e
AL
1392 if (flags & BP_GDB)
1393 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1394 else
1395 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1396
d720b93d 1397 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1398
1399 if (breakpoint)
1400 *breakpoint = bp;
4c3a88a2
FB
1401 return 0;
1402#else
a1d1bb31 1403 return -ENOSYS;
4c3a88a2
FB
1404#endif
1405}
1406
a1d1bb31
AL
1407/* Remove a specific breakpoint. */
1408int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1409{
7d03f82f 1410#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1411 CPUBreakpoint *bp;
1412
c0ce998e 1413 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1414 if (bp->pc == pc && bp->flags == flags) {
1415 cpu_breakpoint_remove_by_ref(env, bp);
1416 return 0;
1417 }
7d03f82f 1418 }
a1d1bb31
AL
1419 return -ENOENT;
1420#else
1421 return -ENOSYS;
7d03f82f
EI
1422#endif
1423}
1424
a1d1bb31
AL
1425/* Remove a specific breakpoint by reference. */
1426void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1427{
1fddef4b 1428#if defined(TARGET_HAS_ICE)
c0ce998e 1429 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1430
a1d1bb31
AL
1431 breakpoint_invalidate(env, breakpoint->pc);
1432
1433 qemu_free(breakpoint);
1434#endif
1435}
1436
1437/* Remove all matching breakpoints. */
1438void cpu_breakpoint_remove_all(CPUState *env, int mask)
1439{
1440#if defined(TARGET_HAS_ICE)
c0ce998e 1441 CPUBreakpoint *bp, *next;
a1d1bb31 1442
c0ce998e 1443 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1444 if (bp->flags & mask)
1445 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1446 }
4c3a88a2
FB
1447#endif
1448}
1449
c33a346e
FB
1450/* enable or disable single step mode. EXCP_DEBUG is returned by the
1451 CPU loop after each instruction */
1452void cpu_single_step(CPUState *env, int enabled)
1453{
1fddef4b 1454#if defined(TARGET_HAS_ICE)
c33a346e
FB
1455 if (env->singlestep_enabled != enabled) {
1456 env->singlestep_enabled = enabled;
e22a25c9
AL
1457 if (kvm_enabled())
1458 kvm_update_guest_debug(env, 0);
1459 else {
1460 /* must flush all the translated code to avoid inconsistancies */
1461 /* XXX: only flush what is necessary */
1462 tb_flush(env);
1463 }
c33a346e
FB
1464 }
1465#endif
1466}
1467
34865134
FB
1468/* enable or disable low levels log */
1469void cpu_set_log(int log_flags)
1470{
1471 loglevel = log_flags;
1472 if (loglevel && !logfile) {
11fcfab4 1473 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1474 if (!logfile) {
1475 perror(logfilename);
1476 _exit(1);
1477 }
9fa3e853
FB
1478#if !defined(CONFIG_SOFTMMU)
1479 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1480 {
b55266b5 1481 static char logfile_buf[4096];
9fa3e853
FB
1482 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1483 }
1484#else
34865134 1485 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1486#endif
e735b91c
PB
1487 log_append = 1;
1488 }
1489 if (!loglevel && logfile) {
1490 fclose(logfile);
1491 logfile = NULL;
34865134
FB
1492 }
1493}
1494
1495void cpu_set_log_filename(const char *filename)
1496{
1497 logfilename = strdup(filename);
e735b91c
PB
1498 if (logfile) {
1499 fclose(logfile);
1500 logfile = NULL;
1501 }
1502 cpu_set_log(loglevel);
34865134 1503}
c33a346e 1504
3098dba0 1505static void cpu_unlink_tb(CPUState *env)
ea041c0e 1506{
3098dba0
AJ
1507#if defined(USE_NPTL)
1508 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1509 problem and hope the cpu will stop of its own accord. For userspace
1510 emulation this often isn't actually as bad as it sounds. Often
1511 signals are used primarily to interrupt blocking syscalls. */
1512#else
ea041c0e 1513 TranslationBlock *tb;
15a51156 1514 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1515
3098dba0
AJ
1516 tb = env->current_tb;
1517 /* if the cpu is currently executing code, we must unlink it and
1518 all the potentially executing TB */
1519 if (tb && !testandset(&interrupt_lock)) {
1520 env->current_tb = NULL;
1521 tb_reset_jump_recursive(tb);
1522 resetlock(&interrupt_lock);
be214e6c 1523 }
3098dba0
AJ
1524#endif
1525}
1526
1527/* mask must never be zero, except for A20 change call */
1528void cpu_interrupt(CPUState *env, int mask)
1529{
1530 int old_mask;
be214e6c 1531
2e70f6ef 1532 old_mask = env->interrupt_request;
68a79315 1533 env->interrupt_request |= mask;
3098dba0 1534
2e70f6ef 1535 if (use_icount) {
266910c4 1536 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1537#ifndef CONFIG_USER_ONLY
2e70f6ef 1538 if (!can_do_io(env)
be214e6c 1539 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1540 cpu_abort(env, "Raised interrupt while not in I/O function");
1541 }
1542#endif
1543 } else {
3098dba0 1544 cpu_unlink_tb(env);
ea041c0e
FB
1545 }
1546}
1547
b54ad049
FB
1548void cpu_reset_interrupt(CPUState *env, int mask)
1549{
1550 env->interrupt_request &= ~mask;
1551}
1552
3098dba0
AJ
1553void cpu_exit(CPUState *env)
1554{
1555 env->exit_request = 1;
1556 cpu_unlink_tb(env);
1557}
1558
c7cd6a37 1559const CPULogItem cpu_log_items[] = {
5fafdf24 1560 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1561 "show generated host assembly code for each compiled TB" },
1562 { CPU_LOG_TB_IN_ASM, "in_asm",
1563 "show target assembly code for each compiled TB" },
5fafdf24 1564 { CPU_LOG_TB_OP, "op",
57fec1fe 1565 "show micro ops for each compiled TB" },
f193c797 1566 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1567 "show micro ops "
1568#ifdef TARGET_I386
1569 "before eflags optimization and "
f193c797 1570#endif
e01a1157 1571 "after liveness analysis" },
f193c797
FB
1572 { CPU_LOG_INT, "int",
1573 "show interrupts/exceptions in short format" },
1574 { CPU_LOG_EXEC, "exec",
1575 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1576 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1577 "show CPU state before block translation" },
f193c797
FB
1578#ifdef TARGET_I386
1579 { CPU_LOG_PCALL, "pcall",
1580 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1581 { CPU_LOG_RESET, "cpu_reset",
1582 "show CPU state before CPU resets" },
f193c797 1583#endif
8e3a9fd2 1584#ifdef DEBUG_IOPORT
fd872598
FB
1585 { CPU_LOG_IOPORT, "ioport",
1586 "show all i/o ports accesses" },
8e3a9fd2 1587#endif
f193c797
FB
1588 { 0, NULL, NULL },
1589};
1590
1591static int cmp1(const char *s1, int n, const char *s2)
1592{
1593 if (strlen(s2) != n)
1594 return 0;
1595 return memcmp(s1, s2, n) == 0;
1596}
3b46e624 1597
f193c797
FB
1598/* takes a comma separated list of log masks. Return 0 if error. */
1599int cpu_str_to_log_mask(const char *str)
1600{
c7cd6a37 1601 const CPULogItem *item;
f193c797
FB
1602 int mask;
1603 const char *p, *p1;
1604
1605 p = str;
1606 mask = 0;
1607 for(;;) {
1608 p1 = strchr(p, ',');
1609 if (!p1)
1610 p1 = p + strlen(p);
8e3a9fd2
FB
1611 if(cmp1(p,p1-p,"all")) {
1612 for(item = cpu_log_items; item->mask != 0; item++) {
1613 mask |= item->mask;
1614 }
1615 } else {
f193c797
FB
1616 for(item = cpu_log_items; item->mask != 0; item++) {
1617 if (cmp1(p, p1 - p, item->name))
1618 goto found;
1619 }
1620 return 0;
8e3a9fd2 1621 }
f193c797
FB
1622 found:
1623 mask |= item->mask;
1624 if (*p1 != ',')
1625 break;
1626 p = p1 + 1;
1627 }
1628 return mask;
1629}
ea041c0e 1630
7501267e
FB
1631void cpu_abort(CPUState *env, const char *fmt, ...)
1632{
1633 va_list ap;
493ae1f0 1634 va_list ap2;
7501267e
FB
1635
1636 va_start(ap, fmt);
493ae1f0 1637 va_copy(ap2, ap);
7501267e
FB
1638 fprintf(stderr, "qemu: fatal: ");
1639 vfprintf(stderr, fmt, ap);
1640 fprintf(stderr, "\n");
1641#ifdef TARGET_I386
7fe48483
FB
1642 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1643#else
1644 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1645#endif
93fcfe39
AL
1646 if (qemu_log_enabled()) {
1647 qemu_log("qemu: fatal: ");
1648 qemu_log_vprintf(fmt, ap2);
1649 qemu_log("\n");
f9373291 1650#ifdef TARGET_I386
93fcfe39 1651 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1652#else
93fcfe39 1653 log_cpu_state(env, 0);
f9373291 1654#endif
31b1a7b4 1655 qemu_log_flush();
93fcfe39 1656 qemu_log_close();
924edcae 1657 }
493ae1f0 1658 va_end(ap2);
f9373291 1659 va_end(ap);
7501267e
FB
1660 abort();
1661}
1662
c5be9f08
TS
1663CPUState *cpu_copy(CPUState *env)
1664{
01ba9816 1665 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1666 CPUState *next_cpu = new_env->next_cpu;
1667 int cpu_index = new_env->cpu_index;
5a38f081
AL
1668#if defined(TARGET_HAS_ICE)
1669 CPUBreakpoint *bp;
1670 CPUWatchpoint *wp;
1671#endif
1672
c5be9f08 1673 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1674
1675 /* Preserve chaining and index. */
c5be9f08
TS
1676 new_env->next_cpu = next_cpu;
1677 new_env->cpu_index = cpu_index;
5a38f081
AL
1678
1679 /* Clone all break/watchpoints.
1680 Note: Once we support ptrace with hw-debug register access, make sure
1681 BP_CPU break/watchpoints are handled correctly on clone. */
1682 TAILQ_INIT(&env->breakpoints);
1683 TAILQ_INIT(&env->watchpoints);
1684#if defined(TARGET_HAS_ICE)
1685 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1686 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1687 }
1688 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1689 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1690 wp->flags, NULL);
1691 }
1692#endif
1693
c5be9f08
TS
1694 return new_env;
1695}
1696
0124311e
FB
1697#if !defined(CONFIG_USER_ONLY)
1698
5c751e99
EI
1699static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1700{
1701 unsigned int i;
1702
1703 /* Discard jump cache entries for any tb which might potentially
1704 overlap the flushed page. */
1705 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1706 memset (&env->tb_jmp_cache[i], 0,
1707 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1708
1709 i = tb_jmp_cache_hash_page(addr);
1710 memset (&env->tb_jmp_cache[i], 0,
1711 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1712}
1713
ee8b7021
FB
1714/* NOTE: if flush_global is true, also flush global entries (not
1715 implemented yet) */
1716void tlb_flush(CPUState *env, int flush_global)
33417e70 1717{
33417e70 1718 int i;
0124311e 1719
9fa3e853
FB
1720#if defined(DEBUG_TLB)
1721 printf("tlb_flush:\n");
1722#endif
0124311e
FB
1723 /* must reset current TB so that interrupts cannot modify the
1724 links while we are modifying them */
1725 env->current_tb = NULL;
1726
33417e70 1727 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1728 env->tlb_table[0][i].addr_read = -1;
1729 env->tlb_table[0][i].addr_write = -1;
1730 env->tlb_table[0][i].addr_code = -1;
1731 env->tlb_table[1][i].addr_read = -1;
1732 env->tlb_table[1][i].addr_write = -1;
1733 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1734#if (NB_MMU_MODES >= 3)
1735 env->tlb_table[2][i].addr_read = -1;
1736 env->tlb_table[2][i].addr_write = -1;
1737 env->tlb_table[2][i].addr_code = -1;
1738#if (NB_MMU_MODES == 4)
1739 env->tlb_table[3][i].addr_read = -1;
1740 env->tlb_table[3][i].addr_write = -1;
1741 env->tlb_table[3][i].addr_code = -1;
1742#endif
1743#endif
33417e70 1744 }
9fa3e853 1745
8a40a180 1746 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1747
0a962c02
FB
1748#ifdef USE_KQEMU
1749 if (env->kqemu_enabled) {
1750 kqemu_flush(env, flush_global);
1751 }
9fa3e853 1752#endif
e3db7226 1753 tlb_flush_count++;
33417e70
FB
1754}
1755
274da6b2 1756static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1757{
5fafdf24 1758 if (addr == (tlb_entry->addr_read &
84b7b8e7 1759 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1760 addr == (tlb_entry->addr_write &
84b7b8e7 1761 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1762 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1763 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1764 tlb_entry->addr_read = -1;
1765 tlb_entry->addr_write = -1;
1766 tlb_entry->addr_code = -1;
1767 }
61382a50
FB
1768}
1769
2e12669a 1770void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1771{
8a40a180 1772 int i;
0124311e 1773
9fa3e853 1774#if defined(DEBUG_TLB)
108c49b8 1775 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1776#endif
0124311e
FB
1777 /* must reset current TB so that interrupts cannot modify the
1778 links while we are modifying them */
1779 env->current_tb = NULL;
61382a50
FB
1780
1781 addr &= TARGET_PAGE_MASK;
1782 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1783 tlb_flush_entry(&env->tlb_table[0][i], addr);
1784 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1785#if (NB_MMU_MODES >= 3)
1786 tlb_flush_entry(&env->tlb_table[2][i], addr);
1787#if (NB_MMU_MODES == 4)
1788 tlb_flush_entry(&env->tlb_table[3][i], addr);
1789#endif
1790#endif
0124311e 1791
5c751e99 1792 tlb_flush_jmp_cache(env, addr);
9fa3e853 1793
0a962c02
FB
1794#ifdef USE_KQEMU
1795 if (env->kqemu_enabled) {
1796 kqemu_flush_page(env, addr);
1797 }
1798#endif
9fa3e853
FB
1799}
1800
9fa3e853
FB
1801/* update the TLBs so that writes to code in the virtual page 'addr'
1802 can be detected */
6a00d601 1803static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1804{
5fafdf24 1805 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1806 ram_addr + TARGET_PAGE_SIZE,
1807 CODE_DIRTY_FLAG);
9fa3e853
FB
1808}
1809
9fa3e853 1810/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1811 tested for self modifying code */
5fafdf24 1812static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1813 target_ulong vaddr)
9fa3e853 1814{
3a7d929e 1815 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1816}
1817
5fafdf24 1818static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1819 unsigned long start, unsigned long length)
1820{
1821 unsigned long addr;
84b7b8e7
FB
1822 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1823 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1824 if ((addr - start) < length) {
0f459d16 1825 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1826 }
1827 }
1828}
1829
3a7d929e 1830void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1831 int dirty_flags)
1ccde1cb
FB
1832{
1833 CPUState *env;
4f2ac237 1834 unsigned long length, start1;
0a962c02
FB
1835 int i, mask, len;
1836 uint8_t *p;
1ccde1cb
FB
1837
1838 start &= TARGET_PAGE_MASK;
1839 end = TARGET_PAGE_ALIGN(end);
1840
1841 length = end - start;
1842 if (length == 0)
1843 return;
0a962c02 1844 len = length >> TARGET_PAGE_BITS;
3a7d929e 1845#ifdef USE_KQEMU
6a00d601
FB
1846 /* XXX: should not depend on cpu context */
1847 env = first_cpu;
3a7d929e 1848 if (env->kqemu_enabled) {
f23db169
FB
1849 ram_addr_t addr;
1850 addr = start;
1851 for(i = 0; i < len; i++) {
1852 kqemu_set_notdirty(env, addr);
1853 addr += TARGET_PAGE_SIZE;
1854 }
3a7d929e
FB
1855 }
1856#endif
f23db169
FB
1857 mask = ~dirty_flags;
1858 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1859 for(i = 0; i < len; i++)
1860 p[i] &= mask;
1861
1ccde1cb
FB
1862 /* we modify the TLB cache so that the dirty bit will be set again
1863 when accessing the range */
59817ccb 1864 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1865 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1866 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1867 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1868 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1869 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1870#if (NB_MMU_MODES >= 3)
1871 for(i = 0; i < CPU_TLB_SIZE; i++)
1872 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1873#if (NB_MMU_MODES == 4)
1874 for(i = 0; i < CPU_TLB_SIZE; i++)
1875 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1876#endif
1877#endif
6a00d601 1878 }
1ccde1cb
FB
1879}
1880
74576198
AL
1881int cpu_physical_memory_set_dirty_tracking(int enable)
1882{
1883 in_migration = enable;
1884 return 0;
1885}
1886
1887int cpu_physical_memory_get_dirty_tracking(void)
1888{
1889 return in_migration;
1890}
1891
2bec46dc
AL
1892void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1893{
1894 if (kvm_enabled())
1895 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1896}
1897
3a7d929e
FB
1898static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1899{
1900 ram_addr_t ram_addr;
1901
84b7b8e7 1902 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1903 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1904 tlb_entry->addend - (unsigned long)phys_ram_base;
1905 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1906 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1907 }
1908 }
1909}
1910
1911/* update the TLB according to the current state of the dirty bits */
1912void cpu_tlb_update_dirty(CPUState *env)
1913{
1914 int i;
1915 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1916 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1917 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1918 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1919#if (NB_MMU_MODES >= 3)
1920 for(i = 0; i < CPU_TLB_SIZE; i++)
1921 tlb_update_dirty(&env->tlb_table[2][i]);
1922#if (NB_MMU_MODES == 4)
1923 for(i = 0; i < CPU_TLB_SIZE; i++)
1924 tlb_update_dirty(&env->tlb_table[3][i]);
1925#endif
1926#endif
3a7d929e
FB
1927}
1928
0f459d16 1929static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1930{
0f459d16
PB
1931 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1932 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1933}
1934
0f459d16
PB
1935/* update the TLB corresponding to virtual page vaddr
1936 so that it is no longer dirty */
1937static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1938{
1ccde1cb
FB
1939 int i;
1940
0f459d16 1941 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1942 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1943 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1944 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1945#if (NB_MMU_MODES >= 3)
0f459d16 1946 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1947#if (NB_MMU_MODES == 4)
0f459d16 1948 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1949#endif
1950#endif
9fa3e853
FB
1951}
1952
59817ccb
FB
1953/* add a new TLB entry. At most one entry for a given virtual address
1954 is permitted. Return 0 if OK or 2 if the page could not be mapped
1955 (can only happen in non SOFTMMU mode for I/O pages or pages
1956 conflicting with the host address space). */
5fafdf24
TS
1957int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1958 target_phys_addr_t paddr, int prot,
6ebbf390 1959 int mmu_idx, int is_softmmu)
9fa3e853 1960{
92e873b9 1961 PhysPageDesc *p;
4f2ac237 1962 unsigned long pd;
9fa3e853 1963 unsigned int index;
4f2ac237 1964 target_ulong address;
0f459d16 1965 target_ulong code_address;
108c49b8 1966 target_phys_addr_t addend;
9fa3e853 1967 int ret;
84b7b8e7 1968 CPUTLBEntry *te;
a1d1bb31 1969 CPUWatchpoint *wp;
0f459d16 1970 target_phys_addr_t iotlb;
9fa3e853 1971
92e873b9 1972 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1973 if (!p) {
1974 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1975 } else {
1976 pd = p->phys_offset;
9fa3e853
FB
1977 }
1978#if defined(DEBUG_TLB)
6ebbf390
JM
1979 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1980 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1981#endif
1982
1983 ret = 0;
0f459d16
PB
1984 address = vaddr;
1985 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1986 /* IO memory case (romd handled later) */
1987 address |= TLB_MMIO;
1988 }
1989 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1990 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1991 /* Normal RAM. */
1992 iotlb = pd & TARGET_PAGE_MASK;
1993 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1994 iotlb |= IO_MEM_NOTDIRTY;
1995 else
1996 iotlb |= IO_MEM_ROM;
1997 } else {
1998 /* IO handlers are currently passed a phsical address.
1999 It would be nice to pass an offset from the base address
2000 of that region. This would avoid having to special case RAM,
2001 and avoid full address decoding in every device.
2002 We can't use the high bits of pd for this because
2003 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2004 iotlb = (pd & ~TARGET_PAGE_MASK);
2005 if (p) {
8da3ff18
PB
2006 iotlb += p->region_offset;
2007 } else {
2008 iotlb += paddr;
2009 }
0f459d16
PB
2010 }
2011
2012 code_address = address;
2013 /* Make accesses to pages with watchpoints go via the
2014 watchpoint trap routines. */
c0ce998e 2015 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2016 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
2017 iotlb = io_mem_watch + paddr;
2018 /* TODO: The memory case can be optimized by not trapping
2019 reads of pages with a write breakpoint. */
2020 address |= TLB_MMIO;
6658ffb8 2021 }
0f459d16 2022 }
d79acba4 2023
0f459d16
PB
2024 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2025 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2026 te = &env->tlb_table[mmu_idx][index];
2027 te->addend = addend - vaddr;
2028 if (prot & PAGE_READ) {
2029 te->addr_read = address;
2030 } else {
2031 te->addr_read = -1;
2032 }
5c751e99 2033
0f459d16
PB
2034 if (prot & PAGE_EXEC) {
2035 te->addr_code = code_address;
2036 } else {
2037 te->addr_code = -1;
2038 }
2039 if (prot & PAGE_WRITE) {
2040 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2041 (pd & IO_MEM_ROMD)) {
2042 /* Write access calls the I/O callback. */
2043 te->addr_write = address | TLB_MMIO;
2044 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2045 !cpu_physical_memory_is_dirty(pd)) {
2046 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2047 } else {
0f459d16 2048 te->addr_write = address;
9fa3e853 2049 }
0f459d16
PB
2050 } else {
2051 te->addr_write = -1;
9fa3e853 2052 }
9fa3e853
FB
2053 return ret;
2054}
2055
0124311e
FB
2056#else
2057
ee8b7021 2058void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2059{
2060}
2061
2e12669a 2062void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2063{
2064}
2065
5fafdf24
TS
2066int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2067 target_phys_addr_t paddr, int prot,
6ebbf390 2068 int mmu_idx, int is_softmmu)
9fa3e853
FB
2069{
2070 return 0;
2071}
0124311e 2072
9fa3e853
FB
2073/* dump memory mappings */
2074void page_dump(FILE *f)
33417e70 2075{
9fa3e853
FB
2076 unsigned long start, end;
2077 int i, j, prot, prot1;
2078 PageDesc *p;
33417e70 2079
9fa3e853
FB
2080 fprintf(f, "%-8s %-8s %-8s %s\n",
2081 "start", "end", "size", "prot");
2082 start = -1;
2083 end = -1;
2084 prot = 0;
2085 for(i = 0; i <= L1_SIZE; i++) {
2086 if (i < L1_SIZE)
2087 p = l1_map[i];
2088 else
2089 p = NULL;
2090 for(j = 0;j < L2_SIZE; j++) {
2091 if (!p)
2092 prot1 = 0;
2093 else
2094 prot1 = p[j].flags;
2095 if (prot1 != prot) {
2096 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2097 if (start != -1) {
2098 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 2099 start, end, end - start,
9fa3e853
FB
2100 prot & PAGE_READ ? 'r' : '-',
2101 prot & PAGE_WRITE ? 'w' : '-',
2102 prot & PAGE_EXEC ? 'x' : '-');
2103 }
2104 if (prot1 != 0)
2105 start = end;
2106 else
2107 start = -1;
2108 prot = prot1;
2109 }
2110 if (!p)
2111 break;
2112 }
33417e70 2113 }
33417e70
FB
2114}
2115
53a5960a 2116int page_get_flags(target_ulong address)
33417e70 2117{
9fa3e853
FB
2118 PageDesc *p;
2119
2120 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2121 if (!p)
9fa3e853
FB
2122 return 0;
2123 return p->flags;
2124}
2125
2126/* modify the flags of a page and invalidate the code if
2127 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2128 depending on PAGE_WRITE */
53a5960a 2129void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2130{
2131 PageDesc *p;
53a5960a 2132 target_ulong addr;
9fa3e853 2133
c8a706fe 2134 /* mmap_lock should already be held. */
9fa3e853
FB
2135 start = start & TARGET_PAGE_MASK;
2136 end = TARGET_PAGE_ALIGN(end);
2137 if (flags & PAGE_WRITE)
2138 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2139 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2140 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2141 /* We may be called for host regions that are outside guest
2142 address space. */
2143 if (!p)
2144 return;
9fa3e853
FB
2145 /* if the write protection is set, then we invalidate the code
2146 inside */
5fafdf24 2147 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2148 (flags & PAGE_WRITE) &&
2149 p->first_tb) {
d720b93d 2150 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2151 }
2152 p->flags = flags;
2153 }
33417e70
FB
2154}
2155
3d97b40b
TS
2156int page_check_range(target_ulong start, target_ulong len, int flags)
2157{
2158 PageDesc *p;
2159 target_ulong end;
2160 target_ulong addr;
2161
55f280c9
AZ
2162 if (start + len < start)
2163 /* we've wrapped around */
2164 return -1;
2165
3d97b40b
TS
2166 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2167 start = start & TARGET_PAGE_MASK;
2168
3d97b40b
TS
2169 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2170 p = page_find(addr >> TARGET_PAGE_BITS);
2171 if( !p )
2172 return -1;
2173 if( !(p->flags & PAGE_VALID) )
2174 return -1;
2175
dae3270c 2176 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2177 return -1;
dae3270c
FB
2178 if (flags & PAGE_WRITE) {
2179 if (!(p->flags & PAGE_WRITE_ORG))
2180 return -1;
2181 /* unprotect the page if it was put read-only because it
2182 contains translated code */
2183 if (!(p->flags & PAGE_WRITE)) {
2184 if (!page_unprotect(addr, 0, NULL))
2185 return -1;
2186 }
2187 return 0;
2188 }
3d97b40b
TS
2189 }
2190 return 0;
2191}
2192
9fa3e853
FB
2193/* called from signal handler: invalidate the code and unprotect the
2194 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2195int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2196{
2197 unsigned int page_index, prot, pindex;
2198 PageDesc *p, *p1;
53a5960a 2199 target_ulong host_start, host_end, addr;
9fa3e853 2200
c8a706fe
PB
2201 /* Technically this isn't safe inside a signal handler. However we
2202 know this only ever happens in a synchronous SEGV handler, so in
2203 practice it seems to be ok. */
2204 mmap_lock();
2205
83fb7adf 2206 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2207 page_index = host_start >> TARGET_PAGE_BITS;
2208 p1 = page_find(page_index);
c8a706fe
PB
2209 if (!p1) {
2210 mmap_unlock();
9fa3e853 2211 return 0;
c8a706fe 2212 }
83fb7adf 2213 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2214 p = p1;
2215 prot = 0;
2216 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2217 prot |= p->flags;
2218 p++;
2219 }
2220 /* if the page was really writable, then we change its
2221 protection back to writable */
2222 if (prot & PAGE_WRITE_ORG) {
2223 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2224 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2225 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2226 (prot & PAGE_BITS) | PAGE_WRITE);
2227 p1[pindex].flags |= PAGE_WRITE;
2228 /* and since the content will be modified, we must invalidate
2229 the corresponding translated code. */
d720b93d 2230 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2231#ifdef DEBUG_TB_CHECK
2232 tb_invalidate_check(address);
2233#endif
c8a706fe 2234 mmap_unlock();
9fa3e853
FB
2235 return 1;
2236 }
2237 }
c8a706fe 2238 mmap_unlock();
9fa3e853
FB
2239 return 0;
2240}
2241
6a00d601
FB
2242static inline void tlb_set_dirty(CPUState *env,
2243 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2244{
2245}
9fa3e853
FB
2246#endif /* defined(CONFIG_USER_ONLY) */
2247
e2eef170 2248#if !defined(CONFIG_USER_ONLY)
8da3ff18 2249
db7b5426 2250static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2251 ram_addr_t memory, ram_addr_t region_offset);
00f82b8a 2252static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2253 ram_addr_t orig_memory, ram_addr_t region_offset);
db7b5426
BS
2254#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2255 need_subpage) \
2256 do { \
2257 if (addr > start_addr) \
2258 start_addr2 = 0; \
2259 else { \
2260 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2261 if (start_addr2 > 0) \
2262 need_subpage = 1; \
2263 } \
2264 \
49e9fba2 2265 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2266 end_addr2 = TARGET_PAGE_SIZE - 1; \
2267 else { \
2268 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2269 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2270 need_subpage = 1; \
2271 } \
2272 } while (0)
2273
33417e70
FB
2274/* register physical memory. 'size' must be a multiple of the target
2275 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2276 io memory page. The address used when calling the IO function is
2277 the offset from the start of the region, plus region_offset. Both
2278 start_region and regon_offset are rounded down to a page boundary
2279 before calculating this offset. This should not be a problem unless
2280 the low bits of start_addr and region_offset differ. */
2281void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2282 ram_addr_t size,
2283 ram_addr_t phys_offset,
2284 ram_addr_t region_offset)
33417e70 2285{
108c49b8 2286 target_phys_addr_t addr, end_addr;
92e873b9 2287 PhysPageDesc *p;
9d42037b 2288 CPUState *env;
00f82b8a 2289 ram_addr_t orig_size = size;
db7b5426 2290 void *subpage;
33417e70 2291
da260249
FB
2292#ifdef USE_KQEMU
2293 /* XXX: should not depend on cpu context */
2294 env = first_cpu;
2295 if (env->kqemu_enabled) {
2296 kqemu_set_phys_mem(start_addr, size, phys_offset);
2297 }
2298#endif
7ba1e619
AL
2299 if (kvm_enabled())
2300 kvm_set_phys_mem(start_addr, size, phys_offset);
2301
67c4d23c
PB
2302 if (phys_offset == IO_MEM_UNASSIGNED) {
2303 region_offset = start_addr;
2304 }
8da3ff18 2305 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2306 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2307 end_addr = start_addr + (target_phys_addr_t)size;
2308 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2309 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2310 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2311 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2312 target_phys_addr_t start_addr2, end_addr2;
2313 int need_subpage = 0;
2314
2315 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2316 need_subpage);
4254fab8 2317 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2318 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2319 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2320 &p->phys_offset, orig_memory,
2321 p->region_offset);
db7b5426
BS
2322 } else {
2323 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2324 >> IO_MEM_SHIFT];
2325 }
8da3ff18
PB
2326 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2327 region_offset);
2328 p->region_offset = 0;
db7b5426
BS
2329 } else {
2330 p->phys_offset = phys_offset;
2331 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2332 (phys_offset & IO_MEM_ROMD))
2333 phys_offset += TARGET_PAGE_SIZE;
2334 }
2335 } else {
2336 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2337 p->phys_offset = phys_offset;
8da3ff18 2338 p->region_offset = region_offset;
db7b5426 2339 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2340 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2341 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2342 } else {
db7b5426
BS
2343 target_phys_addr_t start_addr2, end_addr2;
2344 int need_subpage = 0;
2345
2346 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2347 end_addr2, need_subpage);
2348
4254fab8 2349 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426 2350 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2351 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2352 addr & TARGET_PAGE_MASK);
db7b5426 2353 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2354 phys_offset, region_offset);
2355 p->region_offset = 0;
db7b5426
BS
2356 }
2357 }
2358 }
8da3ff18 2359 region_offset += TARGET_PAGE_SIZE;
33417e70 2360 }
3b46e624 2361
9d42037b
FB
2362 /* since each CPU stores ram addresses in its TLB cache, we must
2363 reset the modified entries */
2364 /* XXX: slow ! */
2365 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2366 tlb_flush(env, 1);
2367 }
33417e70
FB
2368}
2369
ba863458 2370/* XXX: temporary until new memory mapping API */
00f82b8a 2371ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2372{
2373 PhysPageDesc *p;
2374
2375 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2376 if (!p)
2377 return IO_MEM_UNASSIGNED;
2378 return p->phys_offset;
2379}
2380
f65ed4c1
AL
2381void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2382{
2383 if (kvm_enabled())
2384 kvm_coalesce_mmio_region(addr, size);
2385}
2386
2387void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2388{
2389 if (kvm_enabled())
2390 kvm_uncoalesce_mmio_region(addr, size);
2391}
2392
e9a1ab19 2393/* XXX: better than nothing */
00f82b8a 2394ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2395{
2396 ram_addr_t addr;
7fb4fdcf 2397 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
012a7045 2398 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
ed441467 2399 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2400 abort();
2401 }
2402 addr = phys_ram_alloc_offset;
2403 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2404 return addr;
2405}
2406
2407void qemu_ram_free(ram_addr_t addr)
2408{
2409}
2410
a4193c8a 2411static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2412{
67d3b957 2413#ifdef DEBUG_UNASSIGNED
ab3d1727 2414 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2415#endif
0a6f8a6d 2416#if defined(TARGET_SPARC)
e18231a3
BS
2417 do_unassigned_access(addr, 0, 0, 0, 1);
2418#endif
2419 return 0;
2420}
2421
2422static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2423{
2424#ifdef DEBUG_UNASSIGNED
2425 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2426#endif
0a6f8a6d 2427#if defined(TARGET_SPARC)
e18231a3
BS
2428 do_unassigned_access(addr, 0, 0, 0, 2);
2429#endif
2430 return 0;
2431}
2432
2433static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2434{
2435#ifdef DEBUG_UNASSIGNED
2436 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2437#endif
0a6f8a6d 2438#if defined(TARGET_SPARC)
e18231a3 2439 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2440#endif
33417e70
FB
2441 return 0;
2442}
2443
a4193c8a 2444static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2445{
67d3b957 2446#ifdef DEBUG_UNASSIGNED
ab3d1727 2447 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2448#endif
0a6f8a6d 2449#if defined(TARGET_SPARC)
e18231a3
BS
2450 do_unassigned_access(addr, 1, 0, 0, 1);
2451#endif
2452}
2453
2454static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2455{
2456#ifdef DEBUG_UNASSIGNED
2457 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2458#endif
0a6f8a6d 2459#if defined(TARGET_SPARC)
e18231a3
BS
2460 do_unassigned_access(addr, 1, 0, 0, 2);
2461#endif
2462}
2463
2464static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2465{
2466#ifdef DEBUG_UNASSIGNED
2467 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2468#endif
0a6f8a6d 2469#if defined(TARGET_SPARC)
e18231a3 2470 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2471#endif
33417e70
FB
2472}
2473
2474static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2475 unassigned_mem_readb,
e18231a3
BS
2476 unassigned_mem_readw,
2477 unassigned_mem_readl,
33417e70
FB
2478};
2479
2480static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2481 unassigned_mem_writeb,
e18231a3
BS
2482 unassigned_mem_writew,
2483 unassigned_mem_writel,
33417e70
FB
2484};
2485
0f459d16
PB
2486static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2487 uint32_t val)
9fa3e853 2488{
3a7d929e 2489 int dirty_flags;
3a7d929e
FB
2490 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2491 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2492#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2493 tb_invalidate_phys_page_fast(ram_addr, 1);
2494 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2495#endif
3a7d929e 2496 }
0f459d16 2497 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2498#ifdef USE_KQEMU
2499 if (cpu_single_env->kqemu_enabled &&
2500 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2501 kqemu_modify_page(cpu_single_env, ram_addr);
2502#endif
f23db169
FB
2503 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2504 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2505 /* we remove the notdirty callback only if the code has been
2506 flushed */
2507 if (dirty_flags == 0xff)
2e70f6ef 2508 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2509}
2510
0f459d16
PB
2511static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2512 uint32_t val)
9fa3e853 2513{
3a7d929e 2514 int dirty_flags;
3a7d929e
FB
2515 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2516 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2517#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2518 tb_invalidate_phys_page_fast(ram_addr, 2);
2519 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2520#endif
3a7d929e 2521 }
0f459d16 2522 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2523#ifdef USE_KQEMU
2524 if (cpu_single_env->kqemu_enabled &&
2525 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2526 kqemu_modify_page(cpu_single_env, ram_addr);
2527#endif
f23db169
FB
2528 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2529 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2530 /* we remove the notdirty callback only if the code has been
2531 flushed */
2532 if (dirty_flags == 0xff)
2e70f6ef 2533 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2534}
2535
0f459d16
PB
2536static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2537 uint32_t val)
9fa3e853 2538{
3a7d929e 2539 int dirty_flags;
3a7d929e
FB
2540 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2541 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2542#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2543 tb_invalidate_phys_page_fast(ram_addr, 4);
2544 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2545#endif
3a7d929e 2546 }
0f459d16 2547 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2548#ifdef USE_KQEMU
2549 if (cpu_single_env->kqemu_enabled &&
2550 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2551 kqemu_modify_page(cpu_single_env, ram_addr);
2552#endif
f23db169
FB
2553 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2554 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2555 /* we remove the notdirty callback only if the code has been
2556 flushed */
2557 if (dirty_flags == 0xff)
2e70f6ef 2558 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2559}
2560
3a7d929e 2561static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2562 NULL, /* never used */
2563 NULL, /* never used */
2564 NULL, /* never used */
2565};
2566
1ccde1cb
FB
2567static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2568 notdirty_mem_writeb,
2569 notdirty_mem_writew,
2570 notdirty_mem_writel,
2571};
2572
0f459d16 2573/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2574static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
2575{
2576 CPUState *env = cpu_single_env;
06d55cc1
AL
2577 target_ulong pc, cs_base;
2578 TranslationBlock *tb;
0f459d16 2579 target_ulong vaddr;
a1d1bb31 2580 CPUWatchpoint *wp;
06d55cc1 2581 int cpu_flags;
0f459d16 2582
06d55cc1
AL
2583 if (env->watchpoint_hit) {
2584 /* We re-entered the check after replacing the TB. Now raise
2585 * the debug interrupt so that is will trigger after the
2586 * current instruction. */
2587 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2588 return;
2589 }
2e70f6ef 2590 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
c0ce998e 2591 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2592 if ((vaddr == (wp->vaddr & len_mask) ||
2593 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2594 wp->flags |= BP_WATCHPOINT_HIT;
2595 if (!env->watchpoint_hit) {
2596 env->watchpoint_hit = wp;
2597 tb = tb_find_pc(env->mem_io_pc);
2598 if (!tb) {
2599 cpu_abort(env, "check_watchpoint: could not find TB for "
2600 "pc=%p", (void *)env->mem_io_pc);
2601 }
2602 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2603 tb_phys_invalidate(tb, -1);
2604 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2605 env->exception_index = EXCP_DEBUG;
2606 } else {
2607 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2608 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2609 }
2610 cpu_resume_from_signal(env, NULL);
06d55cc1 2611 }
6e140f28
AL
2612 } else {
2613 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2614 }
2615 }
2616}
2617
6658ffb8
PB
2618/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2619 so these check for a hit then pass through to the normal out-of-line
2620 phys routines. */
2621static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2622{
b4051334 2623 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
2624 return ldub_phys(addr);
2625}
2626
2627static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2628{
b4051334 2629 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
2630 return lduw_phys(addr);
2631}
2632
2633static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2634{
b4051334 2635 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
2636 return ldl_phys(addr);
2637}
2638
6658ffb8
PB
2639static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2640 uint32_t val)
2641{
b4051334 2642 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
2643 stb_phys(addr, val);
2644}
2645
2646static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2647 uint32_t val)
2648{
b4051334 2649 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
2650 stw_phys(addr, val);
2651}
2652
2653static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2654 uint32_t val)
2655{
b4051334 2656 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
2657 stl_phys(addr, val);
2658}
2659
2660static CPUReadMemoryFunc *watch_mem_read[3] = {
2661 watch_mem_readb,
2662 watch_mem_readw,
2663 watch_mem_readl,
2664};
2665
2666static CPUWriteMemoryFunc *watch_mem_write[3] = {
2667 watch_mem_writeb,
2668 watch_mem_writew,
2669 watch_mem_writel,
2670};
6658ffb8 2671
db7b5426
BS
2672static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2673 unsigned int len)
2674{
db7b5426
BS
2675 uint32_t ret;
2676 unsigned int idx;
2677
8da3ff18 2678 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2679#if defined(DEBUG_SUBPAGE)
2680 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2681 mmio, len, addr, idx);
2682#endif
8da3ff18
PB
2683 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2684 addr + mmio->region_offset[idx][0][len]);
db7b5426
BS
2685
2686 return ret;
2687}
2688
2689static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2690 uint32_t value, unsigned int len)
2691{
db7b5426
BS
2692 unsigned int idx;
2693
8da3ff18 2694 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2695#if defined(DEBUG_SUBPAGE)
2696 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2697 mmio, len, addr, idx, value);
2698#endif
8da3ff18
PB
2699 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2700 addr + mmio->region_offset[idx][1][len],
2701 value);
db7b5426
BS
2702}
2703
2704static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2705{
2706#if defined(DEBUG_SUBPAGE)
2707 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2708#endif
2709
2710 return subpage_readlen(opaque, addr, 0);
2711}
2712
2713static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2714 uint32_t value)
2715{
2716#if defined(DEBUG_SUBPAGE)
2717 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2718#endif
2719 subpage_writelen(opaque, addr, value, 0);
2720}
2721
2722static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2723{
2724#if defined(DEBUG_SUBPAGE)
2725 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2726#endif
2727
2728 return subpage_readlen(opaque, addr, 1);
2729}
2730
2731static void subpage_writew (void *opaque, target_phys_addr_t addr,
2732 uint32_t value)
2733{
2734#if defined(DEBUG_SUBPAGE)
2735 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2736#endif
2737 subpage_writelen(opaque, addr, value, 1);
2738}
2739
2740static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2741{
2742#if defined(DEBUG_SUBPAGE)
2743 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2744#endif
2745
2746 return subpage_readlen(opaque, addr, 2);
2747}
2748
2749static void subpage_writel (void *opaque,
2750 target_phys_addr_t addr, uint32_t value)
2751{
2752#if defined(DEBUG_SUBPAGE)
2753 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2754#endif
2755 subpage_writelen(opaque, addr, value, 2);
2756}
2757
2758static CPUReadMemoryFunc *subpage_read[] = {
2759 &subpage_readb,
2760 &subpage_readw,
2761 &subpage_readl,
2762};
2763
2764static CPUWriteMemoryFunc *subpage_write[] = {
2765 &subpage_writeb,
2766 &subpage_writew,
2767 &subpage_writel,
2768};
2769
2770static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2771 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
2772{
2773 int idx, eidx;
4254fab8 2774 unsigned int i;
db7b5426
BS
2775
2776 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2777 return -1;
2778 idx = SUBPAGE_IDX(start);
2779 eidx = SUBPAGE_IDX(end);
2780#if defined(DEBUG_SUBPAGE)
2781 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2782 mmio, start, end, idx, eidx, memory);
2783#endif
2784 memory >>= IO_MEM_SHIFT;
2785 for (; idx <= eidx; idx++) {
4254fab8 2786 for (i = 0; i < 4; i++) {
3ee89922
BS
2787 if (io_mem_read[memory][i]) {
2788 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2789 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
8da3ff18 2790 mmio->region_offset[idx][0][i] = region_offset;
3ee89922
BS
2791 }
2792 if (io_mem_write[memory][i]) {
2793 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2794 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
8da3ff18 2795 mmio->region_offset[idx][1][i] = region_offset;
3ee89922 2796 }
4254fab8 2797 }
db7b5426
BS
2798 }
2799
2800 return 0;
2801}
2802
00f82b8a 2803static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2804 ram_addr_t orig_memory, ram_addr_t region_offset)
db7b5426
BS
2805{
2806 subpage_t *mmio;
2807 int subpage_memory;
2808
2809 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
2810
2811 mmio->base = base;
2812 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
db7b5426 2813#if defined(DEBUG_SUBPAGE)
1eec614b
AL
2814 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2815 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 2816#endif
1eec614b
AL
2817 *phys = subpage_memory | IO_MEM_SUBPAGE;
2818 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
8da3ff18 2819 region_offset);
db7b5426
BS
2820
2821 return mmio;
2822}
2823
88715657
AL
2824static int get_free_io_mem_idx(void)
2825{
2826 int i;
2827
2828 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2829 if (!io_mem_used[i]) {
2830 io_mem_used[i] = 1;
2831 return i;
2832 }
2833
2834 return -1;
2835}
2836
33417e70
FB
2837static void io_mem_init(void)
2838{
88715657
AL
2839 int i;
2840
3a7d929e 2841 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2842 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2843 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
88715657
AL
2844 for (i=0; i<5; i++)
2845 io_mem_used[i] = 1;
1ccde1cb 2846
0f459d16 2847 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2848 watch_mem_write, NULL);
1ccde1cb 2849 /* alloc dirty bits array */
0a962c02 2850 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2851 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2852}
2853
2854/* mem_read and mem_write are arrays of functions containing the
2855 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2856 2). Functions can be omitted with a NULL function pointer. The
2857 registered functions may be modified dynamically later.
2858 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2859 modified. If it is zero, a new io zone is allocated. The return
2860 value can be used with cpu_register_physical_memory(). (-1) is
2861 returned if error. */
33417e70
FB
2862int cpu_register_io_memory(int io_index,
2863 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2864 CPUWriteMemoryFunc **mem_write,
2865 void *opaque)
33417e70 2866{
4254fab8 2867 int i, subwidth = 0;
33417e70
FB
2868
2869 if (io_index <= 0) {
88715657
AL
2870 io_index = get_free_io_mem_idx();
2871 if (io_index == -1)
2872 return io_index;
33417e70
FB
2873 } else {
2874 if (io_index >= IO_MEM_NB_ENTRIES)
2875 return -1;
2876 }
b5ff1b31 2877
33417e70 2878 for(i = 0;i < 3; i++) {
4254fab8
BS
2879 if (!mem_read[i] || !mem_write[i])
2880 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2881 io_mem_read[io_index][i] = mem_read[i];
2882 io_mem_write[io_index][i] = mem_write[i];
2883 }
a4193c8a 2884 io_mem_opaque[io_index] = opaque;
4254fab8 2885 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2886}
61382a50 2887
88715657
AL
2888void cpu_unregister_io_memory(int io_table_address)
2889{
2890 int i;
2891 int io_index = io_table_address >> IO_MEM_SHIFT;
2892
2893 for (i=0;i < 3; i++) {
2894 io_mem_read[io_index][i] = unassigned_mem_read[i];
2895 io_mem_write[io_index][i] = unassigned_mem_write[i];
2896 }
2897 io_mem_opaque[io_index] = NULL;
2898 io_mem_used[io_index] = 0;
2899}
2900
8926b517
FB
2901CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2902{
2903 return io_mem_write[io_index >> IO_MEM_SHIFT];
2904}
2905
2906CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2907{
2908 return io_mem_read[io_index >> IO_MEM_SHIFT];
2909}
2910
e2eef170
PB
2911#endif /* !defined(CONFIG_USER_ONLY) */
2912
13eb76e0
FB
2913/* physical memory access (slow version, mainly for debug) */
2914#if defined(CONFIG_USER_ONLY)
5fafdf24 2915void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2916 int len, int is_write)
2917{
2918 int l, flags;
2919 target_ulong page;
53a5960a 2920 void * p;
13eb76e0
FB
2921
2922 while (len > 0) {
2923 page = addr & TARGET_PAGE_MASK;
2924 l = (page + TARGET_PAGE_SIZE) - addr;
2925 if (l > len)
2926 l = len;
2927 flags = page_get_flags(page);
2928 if (!(flags & PAGE_VALID))
2929 return;
2930 if (is_write) {
2931 if (!(flags & PAGE_WRITE))
2932 return;
579a97f7 2933 /* XXX: this code should not depend on lock_user */
72fb7daa 2934 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2935 /* FIXME - should this return an error rather than just fail? */
2936 return;
72fb7daa
AJ
2937 memcpy(p, buf, l);
2938 unlock_user(p, addr, l);
13eb76e0
FB
2939 } else {
2940 if (!(flags & PAGE_READ))
2941 return;
579a97f7 2942 /* XXX: this code should not depend on lock_user */
72fb7daa 2943 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2944 /* FIXME - should this return an error rather than just fail? */
2945 return;
72fb7daa 2946 memcpy(buf, p, l);
5b257578 2947 unlock_user(p, addr, 0);
13eb76e0
FB
2948 }
2949 len -= l;
2950 buf += l;
2951 addr += l;
2952 }
2953}
8df1cd07 2954
13eb76e0 2955#else
5fafdf24 2956void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2957 int len, int is_write)
2958{
2959 int l, io_index;
2960 uint8_t *ptr;
2961 uint32_t val;
2e12669a
FB
2962 target_phys_addr_t page;
2963 unsigned long pd;
92e873b9 2964 PhysPageDesc *p;
3b46e624 2965
13eb76e0
FB
2966 while (len > 0) {
2967 page = addr & TARGET_PAGE_MASK;
2968 l = (page + TARGET_PAGE_SIZE) - addr;
2969 if (l > len)
2970 l = len;
92e873b9 2971 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2972 if (!p) {
2973 pd = IO_MEM_UNASSIGNED;
2974 } else {
2975 pd = p->phys_offset;
2976 }
3b46e624 2977
13eb76e0 2978 if (is_write) {
3a7d929e 2979 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
6c2934db 2980 target_phys_addr_t addr1 = addr;
13eb76e0 2981 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 2982 if (p)
6c2934db 2983 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
2984 /* XXX: could force cpu_single_env to NULL to avoid
2985 potential bugs */
6c2934db 2986 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 2987 /* 32 bit write access */
c27004ec 2988 val = ldl_p(buf);
6c2934db 2989 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 2990 l = 4;
6c2934db 2991 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 2992 /* 16 bit write access */
c27004ec 2993 val = lduw_p(buf);
6c2934db 2994 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
2995 l = 2;
2996 } else {
1c213d19 2997 /* 8 bit write access */
c27004ec 2998 val = ldub_p(buf);
6c2934db 2999 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3000 l = 1;
3001 }
3002 } else {
b448f2f3
FB
3003 unsigned long addr1;
3004 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3005 /* RAM case */
b448f2f3 3006 ptr = phys_ram_base + addr1;
13eb76e0 3007 memcpy(ptr, buf, l);
3a7d929e
FB
3008 if (!cpu_physical_memory_is_dirty(addr1)) {
3009 /* invalidate code */
3010 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3011 /* set dirty bit */
5fafdf24 3012 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 3013 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3014 }
13eb76e0
FB
3015 }
3016 } else {
5fafdf24 3017 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3018 !(pd & IO_MEM_ROMD)) {
6c2934db 3019 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3020 /* I/O case */
3021 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3022 if (p)
6c2934db
AJ
3023 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3024 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3025 /* 32 bit read access */
6c2934db 3026 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3027 stl_p(buf, val);
13eb76e0 3028 l = 4;
6c2934db 3029 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3030 /* 16 bit read access */
6c2934db 3031 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3032 stw_p(buf, val);
13eb76e0
FB
3033 l = 2;
3034 } else {
1c213d19 3035 /* 8 bit read access */
6c2934db 3036 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3037 stb_p(buf, val);
13eb76e0
FB
3038 l = 1;
3039 }
3040 } else {
3041 /* RAM case */
5fafdf24 3042 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3043 (addr & ~TARGET_PAGE_MASK);
3044 memcpy(buf, ptr, l);
3045 }
3046 }
3047 len -= l;
3048 buf += l;
3049 addr += l;
3050 }
3051}
8df1cd07 3052
d0ecd2aa 3053/* used for ROM loading : can write in RAM and ROM */
5fafdf24 3054void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3055 const uint8_t *buf, int len)
3056{
3057 int l;
3058 uint8_t *ptr;
3059 target_phys_addr_t page;
3060 unsigned long pd;
3061 PhysPageDesc *p;
3b46e624 3062
d0ecd2aa
FB
3063 while (len > 0) {
3064 page = addr & TARGET_PAGE_MASK;
3065 l = (page + TARGET_PAGE_SIZE) - addr;
3066 if (l > len)
3067 l = len;
3068 p = phys_page_find(page >> TARGET_PAGE_BITS);
3069 if (!p) {
3070 pd = IO_MEM_UNASSIGNED;
3071 } else {
3072 pd = p->phys_offset;
3073 }
3b46e624 3074
d0ecd2aa 3075 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3076 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3077 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3078 /* do nothing */
3079 } else {
3080 unsigned long addr1;
3081 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3082 /* ROM/RAM case */
3083 ptr = phys_ram_base + addr1;
3084 memcpy(ptr, buf, l);
3085 }
3086 len -= l;
3087 buf += l;
3088 addr += l;
3089 }
3090}
3091
6d16c2f8
AL
3092typedef struct {
3093 void *buffer;
3094 target_phys_addr_t addr;
3095 target_phys_addr_t len;
3096} BounceBuffer;
3097
3098static BounceBuffer bounce;
3099
ba223c29
AL
3100typedef struct MapClient {
3101 void *opaque;
3102 void (*callback)(void *opaque);
3103 LIST_ENTRY(MapClient) link;
3104} MapClient;
3105
3106static LIST_HEAD(map_client_list, MapClient) map_client_list
3107 = LIST_HEAD_INITIALIZER(map_client_list);
3108
3109void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3110{
3111 MapClient *client = qemu_malloc(sizeof(*client));
3112
3113 client->opaque = opaque;
3114 client->callback = callback;
3115 LIST_INSERT_HEAD(&map_client_list, client, link);
3116 return client;
3117}
3118
3119void cpu_unregister_map_client(void *_client)
3120{
3121 MapClient *client = (MapClient *)_client;
3122
3123 LIST_REMOVE(client, link);
3124}
3125
3126static void cpu_notify_map_clients(void)
3127{
3128 MapClient *client;
3129
3130 while (!LIST_EMPTY(&map_client_list)) {
3131 client = LIST_FIRST(&map_client_list);
3132 client->callback(client->opaque);
3133 LIST_REMOVE(client, link);
3134 }
3135}
3136
6d16c2f8
AL
3137/* Map a physical memory region into a host virtual address.
3138 * May map a subset of the requested range, given by and returned in *plen.
3139 * May return NULL if resources needed to perform the mapping are exhausted.
3140 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3141 * Use cpu_register_map_client() to know when retrying the map operation is
3142 * likely to succeed.
6d16c2f8
AL
3143 */
3144void *cpu_physical_memory_map(target_phys_addr_t addr,
3145 target_phys_addr_t *plen,
3146 int is_write)
3147{
3148 target_phys_addr_t len = *plen;
3149 target_phys_addr_t done = 0;
3150 int l;
3151 uint8_t *ret = NULL;
3152 uint8_t *ptr;
3153 target_phys_addr_t page;
3154 unsigned long pd;
3155 PhysPageDesc *p;
3156 unsigned long addr1;
3157
3158 while (len > 0) {
3159 page = addr & TARGET_PAGE_MASK;
3160 l = (page + TARGET_PAGE_SIZE) - addr;
3161 if (l > len)
3162 l = len;
3163 p = phys_page_find(page >> TARGET_PAGE_BITS);
3164 if (!p) {
3165 pd = IO_MEM_UNASSIGNED;
3166 } else {
3167 pd = p->phys_offset;
3168 }
3169
3170 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3171 if (done || bounce.buffer) {
3172 break;
3173 }
3174 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3175 bounce.addr = addr;
3176 bounce.len = l;
3177 if (!is_write) {
3178 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3179 }
3180 ptr = bounce.buffer;
3181 } else {
3182 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3183 ptr = phys_ram_base + addr1;
3184 }
3185 if (!done) {
3186 ret = ptr;
3187 } else if (ret + done != ptr) {
3188 break;
3189 }
3190
3191 len -= l;
3192 addr += l;
3193 done += l;
3194 }
3195 *plen = done;
3196 return ret;
3197}
3198
3199/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3200 * Will also mark the memory as dirty if is_write == 1. access_len gives
3201 * the amount of memory that was actually read or written by the caller.
3202 */
3203void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3204 int is_write, target_phys_addr_t access_len)
3205{
3206 if (buffer != bounce.buffer) {
3207 if (is_write) {
3208 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3209 while (access_len) {
3210 unsigned l;
3211 l = TARGET_PAGE_SIZE;
3212 if (l > access_len)
3213 l = access_len;
3214 if (!cpu_physical_memory_is_dirty(addr1)) {
3215 /* invalidate code */
3216 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3217 /* set dirty bit */
3218 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3219 (0xff & ~CODE_DIRTY_FLAG);
3220 }
3221 addr1 += l;
3222 access_len -= l;
3223 }
3224 }
3225 return;
3226 }
3227 if (is_write) {
3228 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3229 }
3230 qemu_free(bounce.buffer);
3231 bounce.buffer = NULL;
ba223c29 3232 cpu_notify_map_clients();
6d16c2f8 3233}
d0ecd2aa 3234
8df1cd07
FB
3235/* warning: addr must be aligned */
3236uint32_t ldl_phys(target_phys_addr_t addr)
3237{
3238 int io_index;
3239 uint8_t *ptr;
3240 uint32_t val;
3241 unsigned long pd;
3242 PhysPageDesc *p;
3243
3244 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3245 if (!p) {
3246 pd = IO_MEM_UNASSIGNED;
3247 } else {
3248 pd = p->phys_offset;
3249 }
3b46e624 3250
5fafdf24 3251 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3252 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3253 /* I/O case */
3254 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3255 if (p)
3256 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3257 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3258 } else {
3259 /* RAM case */
5fafdf24 3260 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3261 (addr & ~TARGET_PAGE_MASK);
3262 val = ldl_p(ptr);
3263 }
3264 return val;
3265}
3266
84b7b8e7
FB
3267/* warning: addr must be aligned */
3268uint64_t ldq_phys(target_phys_addr_t addr)
3269{
3270 int io_index;
3271 uint8_t *ptr;
3272 uint64_t val;
3273 unsigned long pd;
3274 PhysPageDesc *p;
3275
3276 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3277 if (!p) {
3278 pd = IO_MEM_UNASSIGNED;
3279 } else {
3280 pd = p->phys_offset;
3281 }
3b46e624 3282
2a4188a3
FB
3283 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3284 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3285 /* I/O case */
3286 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3287 if (p)
3288 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
3289#ifdef TARGET_WORDS_BIGENDIAN
3290 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3291 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3292#else
3293 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3294 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3295#endif
3296 } else {
3297 /* RAM case */
5fafdf24 3298 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3299 (addr & ~TARGET_PAGE_MASK);
3300 val = ldq_p(ptr);
3301 }
3302 return val;
3303}
3304
aab33094
FB
3305/* XXX: optimize */
3306uint32_t ldub_phys(target_phys_addr_t addr)
3307{
3308 uint8_t val;
3309 cpu_physical_memory_read(addr, &val, 1);
3310 return val;
3311}
3312
3313/* XXX: optimize */
3314uint32_t lduw_phys(target_phys_addr_t addr)
3315{
3316 uint16_t val;
3317 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3318 return tswap16(val);
3319}
3320
8df1cd07
FB
3321/* warning: addr must be aligned. The ram page is not masked as dirty
3322 and the code inside is not invalidated. It is useful if the dirty
3323 bits are used to track modified PTEs */
3324void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3325{
3326 int io_index;
3327 uint8_t *ptr;
3328 unsigned long pd;
3329 PhysPageDesc *p;
3330
3331 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3332 if (!p) {
3333 pd = IO_MEM_UNASSIGNED;
3334 } else {
3335 pd = p->phys_offset;
3336 }
3b46e624 3337
3a7d929e 3338 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3339 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3340 if (p)
3341 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3342 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3343 } else {
74576198
AL
3344 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3345 ptr = phys_ram_base + addr1;
8df1cd07 3346 stl_p(ptr, val);
74576198
AL
3347
3348 if (unlikely(in_migration)) {
3349 if (!cpu_physical_memory_is_dirty(addr1)) {
3350 /* invalidate code */
3351 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3352 /* set dirty bit */
3353 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3354 (0xff & ~CODE_DIRTY_FLAG);
3355 }
3356 }
8df1cd07
FB
3357 }
3358}
3359
bc98a7ef
JM
3360void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3361{
3362 int io_index;
3363 uint8_t *ptr;
3364 unsigned long pd;
3365 PhysPageDesc *p;
3366
3367 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3368 if (!p) {
3369 pd = IO_MEM_UNASSIGNED;
3370 } else {
3371 pd = p->phys_offset;
3372 }
3b46e624 3373
bc98a7ef
JM
3374 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3375 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3376 if (p)
3377 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
3378#ifdef TARGET_WORDS_BIGENDIAN
3379 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3380 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3381#else
3382 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3383 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3384#endif
3385 } else {
5fafdf24 3386 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3387 (addr & ~TARGET_PAGE_MASK);
3388 stq_p(ptr, val);
3389 }
3390}
3391
8df1cd07 3392/* warning: addr must be aligned */
8df1cd07
FB
3393void stl_phys(target_phys_addr_t addr, uint32_t val)
3394{
3395 int io_index;
3396 uint8_t *ptr;
3397 unsigned long pd;
3398 PhysPageDesc *p;
3399
3400 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3401 if (!p) {
3402 pd = IO_MEM_UNASSIGNED;
3403 } else {
3404 pd = p->phys_offset;
3405 }
3b46e624 3406
3a7d929e 3407 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3408 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3409 if (p)
3410 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3411 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3412 } else {
3413 unsigned long addr1;
3414 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3415 /* RAM case */
3416 ptr = phys_ram_base + addr1;
3417 stl_p(ptr, val);
3a7d929e
FB
3418 if (!cpu_physical_memory_is_dirty(addr1)) {
3419 /* invalidate code */
3420 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3421 /* set dirty bit */
f23db169
FB
3422 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3423 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3424 }
8df1cd07
FB
3425 }
3426}
3427
aab33094
FB
3428/* XXX: optimize */
3429void stb_phys(target_phys_addr_t addr, uint32_t val)
3430{
3431 uint8_t v = val;
3432 cpu_physical_memory_write(addr, &v, 1);
3433}
3434
3435/* XXX: optimize */
3436void stw_phys(target_phys_addr_t addr, uint32_t val)
3437{
3438 uint16_t v = tswap16(val);
3439 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3440}
3441
3442/* XXX: optimize */
3443void stq_phys(target_phys_addr_t addr, uint64_t val)
3444{
3445 val = tswap64(val);
3446 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3447}
3448
13eb76e0
FB
3449#endif
3450
3451/* virtual memory access for debug */
5fafdf24 3452int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3453 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3454{
3455 int l;
9b3c35e0
JM
3456 target_phys_addr_t phys_addr;
3457 target_ulong page;
13eb76e0
FB
3458
3459 while (len > 0) {
3460 page = addr & TARGET_PAGE_MASK;
3461 phys_addr = cpu_get_phys_page_debug(env, page);
3462 /* if no physical page mapped, return an error */
3463 if (phys_addr == -1)
3464 return -1;
3465 l = (page + TARGET_PAGE_SIZE) - addr;
3466 if (l > len)
3467 l = len;
5fafdf24 3468 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3469 buf, l, is_write);
13eb76e0
FB
3470 len -= l;
3471 buf += l;
3472 addr += l;
3473 }
3474 return 0;
3475}
3476
2e70f6ef
PB
3477/* in deterministic execution mode, instructions doing device I/Os
3478 must be at the end of the TB */
3479void cpu_io_recompile(CPUState *env, void *retaddr)
3480{
3481 TranslationBlock *tb;
3482 uint32_t n, cflags;
3483 target_ulong pc, cs_base;
3484 uint64_t flags;
3485
3486 tb = tb_find_pc((unsigned long)retaddr);
3487 if (!tb) {
3488 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3489 retaddr);
3490 }
3491 n = env->icount_decr.u16.low + tb->icount;
3492 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3493 /* Calculate how many instructions had been executed before the fault
bf20dc07 3494 occurred. */
2e70f6ef
PB
3495 n = n - env->icount_decr.u16.low;
3496 /* Generate a new TB ending on the I/O insn. */
3497 n++;
3498 /* On MIPS and SH, delay slot instructions can only be restarted if
3499 they were already the first instruction in the TB. If this is not
bf20dc07 3500 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3501 branch. */
3502#if defined(TARGET_MIPS)
3503 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3504 env->active_tc.PC -= 4;
3505 env->icount_decr.u16.low++;
3506 env->hflags &= ~MIPS_HFLAG_BMASK;
3507 }
3508#elif defined(TARGET_SH4)
3509 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3510 && n > 1) {
3511 env->pc -= 2;
3512 env->icount_decr.u16.low++;
3513 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3514 }
3515#endif
3516 /* This should never happen. */
3517 if (n > CF_COUNT_MASK)
3518 cpu_abort(env, "TB too big during recompile");
3519
3520 cflags = n | CF_LAST_IO;
3521 pc = tb->pc;
3522 cs_base = tb->cs_base;
3523 flags = tb->flags;
3524 tb_phys_invalidate(tb, -1);
3525 /* FIXME: In theory this could raise an exception. In practice
3526 we have already translated the block once so it's probably ok. */
3527 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3528 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3529 the first in the TB) then we end up generating a whole new TB and
3530 repeating the fault, which is horribly inefficient.
3531 Better would be to execute just this insn uncached, or generate a
3532 second new TB. */
3533 cpu_resume_from_signal(env, NULL);
3534}
3535
e3db7226
FB
3536void dump_exec_info(FILE *f,
3537 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3538{
3539 int i, target_code_size, max_target_code_size;
3540 int direct_jmp_count, direct_jmp2_count, cross_page;
3541 TranslationBlock *tb;
3b46e624 3542
e3db7226
FB
3543 target_code_size = 0;
3544 max_target_code_size = 0;
3545 cross_page = 0;
3546 direct_jmp_count = 0;
3547 direct_jmp2_count = 0;
3548 for(i = 0; i < nb_tbs; i++) {
3549 tb = &tbs[i];
3550 target_code_size += tb->size;
3551 if (tb->size > max_target_code_size)
3552 max_target_code_size = tb->size;
3553 if (tb->page_addr[1] != -1)
3554 cross_page++;
3555 if (tb->tb_next_offset[0] != 0xffff) {
3556 direct_jmp_count++;
3557 if (tb->tb_next_offset[1] != 0xffff) {
3558 direct_jmp2_count++;
3559 }
3560 }
3561 }
3562 /* XXX: avoid using doubles ? */
57fec1fe 3563 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3564 cpu_fprintf(f, "gen code size %ld/%ld\n",
3565 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3566 cpu_fprintf(f, "TB count %d/%d\n",
3567 nb_tbs, code_gen_max_blocks);
5fafdf24 3568 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3569 nb_tbs ? target_code_size / nb_tbs : 0,
3570 max_target_code_size);
5fafdf24 3571 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3572 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3573 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3574 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3575 cross_page,
e3db7226
FB
3576 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3577 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3578 direct_jmp_count,
e3db7226
FB
3579 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3580 direct_jmp2_count,
3581 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3582 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3583 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3584 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3585 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3586 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3587}
3588
5fafdf24 3589#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3590
3591#define MMUSUFFIX _cmmu
3592#define GETPC() NULL
3593#define env cpu_single_env
b769d8fe 3594#define SOFTMMU_CODE_ACCESS
61382a50
FB
3595
3596#define SHIFT 0
3597#include "softmmu_template.h"
3598
3599#define SHIFT 1
3600#include "softmmu_template.h"
3601
3602#define SHIFT 2
3603#include "softmmu_template.h"
3604
3605#define SHIFT 3
3606#include "softmmu_template.h"
3607
3608#undef env
3609
3610#endif