]> git.proxmox.com Git - qemu.git/blame - exec.c
Don't rely on ARM tcg_out_goto() generating just a single insn.
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
b3c7724c 40#include "hw/hw.h"
74576198 41#include "osdep.h"
7ba1e619 42#include "kvm.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
45#endif
54936004 46
fd6ce8f6 47//#define DEBUG_TB_INVALIDATE
66e85a21 48//#define DEBUG_FLUSH
9fa3e853 49//#define DEBUG_TLB
67d3b957 50//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
51
52/* make various TB consistency checks */
5fafdf24
TS
53//#define DEBUG_TB_CHECK
54//#define DEBUG_TLB_CHECK
fd6ce8f6 55
1196be37 56//#define DEBUG_IOPORT
db7b5426 57//#define DEBUG_SUBPAGE
1196be37 58
99773bd4
PB
59#if !defined(CONFIG_USER_ONLY)
60/* TB consistency checks only implemented for usermode emulation. */
61#undef DEBUG_TB_CHECK
62#endif
63
9fa3e853
FB
64#define SMC_BITMAP_USE_THRESHOLD 10
65
66#define MMAP_AREA_START 0x00000000
67#define MMAP_AREA_END 0xa8000000
fd6ce8f6 68
108c49b8
FB
69#if defined(TARGET_SPARC64)
70#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
71#elif defined(TARGET_SPARC)
72#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
73#elif defined(TARGET_ALPHA)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
76#elif defined(TARGET_PPC64)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
78#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 42
80#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
82#else
83/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84#define TARGET_PHYS_ADDR_SPACE_BITS 32
85#endif
86
bdaf78e0 87static TranslationBlock *tbs;
26a5f13b 88int code_gen_max_blocks;
9fa3e853 89TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 90static int nb_tbs;
eb51d102
FB
91/* any access to the tbs or the page table must use this lock */
92spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 93
141ac468
BS
94#if defined(__arm__) || defined(__sparc_v9__)
95/* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
97 section close to code segment. */
98#define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101#else
102#define code_gen_section \
103 __attribute__((aligned (32)))
104#endif
105
106uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
107static uint8_t *code_gen_buffer;
108static unsigned long code_gen_buffer_size;
26a5f13b 109/* threshold to flush the translated code buffer */
bdaf78e0 110static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
111uint8_t *code_gen_ptr;
112
e2eef170 113#if !defined(CONFIG_USER_ONLY)
00f82b8a 114ram_addr_t phys_ram_size;
9fa3e853
FB
115int phys_ram_fd;
116uint8_t *phys_ram_base;
1ccde1cb 117uint8_t *phys_ram_dirty;
74576198 118static int in_migration;
e9a1ab19 119static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 120#endif
9fa3e853 121
6a00d601
FB
122CPUState *first_cpu;
123/* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
5fafdf24 125CPUState *cpu_single_env;
2e70f6ef 126/* 0 = Do not count executed instructions.
bf20dc07 127 1 = Precise instruction counting.
2e70f6ef
PB
128 2 = Adaptive rate instruction counting. */
129int use_icount = 0;
130/* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132int64_t qemu_icount;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
92e873b9 146typedef struct PhysPageDesc {
0f459d16 147 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 148 ram_addr_t phys_offset;
92e873b9
FB
149} PhysPageDesc;
150
54936004 151#define L2_BITS 10
bedb69ea
JM
152#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
153/* XXX: this is a temporary hack for alpha target.
154 * In the future, this is to be replaced by a multi-level table
155 * to actually be able to handle the complete 64 bits address space.
156 */
157#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
158#else
03875444 159#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 160#endif
54936004
FB
161
162#define L1_SIZE (1 << L1_BITS)
163#define L2_SIZE (1 << L2_BITS)
164
83fb7adf
FB
165unsigned long qemu_real_host_page_size;
166unsigned long qemu_host_page_bits;
167unsigned long qemu_host_page_size;
168unsigned long qemu_host_page_mask;
54936004 169
92e873b9 170/* XXX: for system emulation, it could just be an array */
54936004 171static PageDesc *l1_map[L1_SIZE];
bdaf78e0 172static PhysPageDesc **l1_phys_map;
54936004 173
e2eef170
PB
174#if !defined(CONFIG_USER_ONLY)
175static void io_mem_init(void);
176
33417e70 177/* io memory support */
33417e70
FB
178CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
179CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 180void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 181static int io_mem_nb;
6658ffb8
PB
182static int io_mem_watch;
183#endif
33417e70 184
34865134 185/* log support */
d9b630fd 186static const char *logfilename = "/tmp/qemu.log";
34865134
FB
187FILE *logfile;
188int loglevel;
e735b91c 189static int log_append = 0;
34865134 190
e3db7226
FB
191/* statistics */
192static int tlb_flush_count;
193static int tb_flush_count;
194static int tb_phys_invalidate_count;
195
db7b5426
BS
196#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
197typedef struct subpage_t {
198 target_phys_addr_t base;
3ee89922
BS
199 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
200 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
201 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
202} subpage_t;
203
7cb69cae
FB
204#ifdef _WIN32
205static void map_exec(void *addr, long size)
206{
207 DWORD old_protect;
208 VirtualProtect(addr, size,
209 PAGE_EXECUTE_READWRITE, &old_protect);
210
211}
212#else
213static void map_exec(void *addr, long size)
214{
4369415f 215 unsigned long start, end, page_size;
7cb69cae 216
4369415f 217 page_size = getpagesize();
7cb69cae 218 start = (unsigned long)addr;
4369415f 219 start &= ~(page_size - 1);
7cb69cae
FB
220
221 end = (unsigned long)addr + size;
4369415f
FB
222 end += page_size - 1;
223 end &= ~(page_size - 1);
7cb69cae
FB
224
225 mprotect((void *)start, end - start,
226 PROT_READ | PROT_WRITE | PROT_EXEC);
227}
228#endif
229
b346ff46 230static void page_init(void)
54936004 231{
83fb7adf 232 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 233 TARGET_PAGE_SIZE */
c2b48b69
AL
234#ifdef _WIN32
235 {
236 SYSTEM_INFO system_info;
237
238 GetSystemInfo(&system_info);
239 qemu_real_host_page_size = system_info.dwPageSize;
240 }
241#else
242 qemu_real_host_page_size = getpagesize();
243#endif
83fb7adf
FB
244 if (qemu_host_page_size == 0)
245 qemu_host_page_size = qemu_real_host_page_size;
246 if (qemu_host_page_size < TARGET_PAGE_SIZE)
247 qemu_host_page_size = TARGET_PAGE_SIZE;
248 qemu_host_page_bits = 0;
249 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
250 qemu_host_page_bits++;
251 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
252 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
253 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
254
255#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
256 {
257 long long startaddr, endaddr;
258 FILE *f;
259 int n;
260
c8a706fe 261 mmap_lock();
0776590d 262 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
263 f = fopen("/proc/self/maps", "r");
264 if (f) {
265 do {
266 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
267 if (n == 2) {
e0b8d65a
BS
268 startaddr = MIN(startaddr,
269 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270 endaddr = MIN(endaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 272 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
273 TARGET_PAGE_ALIGN(endaddr),
274 PAGE_RESERVED);
275 }
276 } while (!feof(f));
277 fclose(f);
278 }
c8a706fe 279 mmap_unlock();
50a9569b
AZ
280 }
281#endif
54936004
FB
282}
283
434929bf 284static inline PageDesc **page_l1_map(target_ulong index)
54936004 285{
17e2377a
PB
286#if TARGET_LONG_BITS > 32
287 /* Host memory outside guest VM. For 32-bit targets we have already
288 excluded high addresses. */
d8173e0f 289 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
290 return NULL;
291#endif
434929bf
AL
292 return &l1_map[index >> L2_BITS];
293}
294
295static inline PageDesc *page_find_alloc(target_ulong index)
296{
297 PageDesc **lp, *p;
298 lp = page_l1_map(index);
299 if (!lp)
300 return NULL;
301
54936004
FB
302 p = *lp;
303 if (!p) {
304 /* allocate if not found */
17e2377a
PB
305#if defined(CONFIG_USER_ONLY)
306 unsigned long addr;
307 size_t len = sizeof(PageDesc) * L2_SIZE;
308 /* Don't use qemu_malloc because it may recurse. */
309 p = mmap(0, len, PROT_READ | PROT_WRITE,
310 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 311 *lp = p;
17e2377a
PB
312 addr = h2g(p);
313 if (addr == (target_ulong)addr) {
314 page_set_flags(addr & TARGET_PAGE_MASK,
315 TARGET_PAGE_ALIGN(addr + len),
316 PAGE_RESERVED);
317 }
318#else
319 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
320 *lp = p;
321#endif
54936004
FB
322 }
323 return p + (index & (L2_SIZE - 1));
324}
325
00f82b8a 326static inline PageDesc *page_find(target_ulong index)
54936004 327{
434929bf
AL
328 PageDesc **lp, *p;
329 lp = page_l1_map(index);
330 if (!lp)
331 return NULL;
54936004 332
434929bf 333 p = *lp;
54936004
FB
334 if (!p)
335 return 0;
fd6ce8f6
FB
336 return p + (index & (L2_SIZE - 1));
337}
338
108c49b8 339static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 340{
108c49b8 341 void **lp, **p;
e3f4e2a4 342 PhysPageDesc *pd;
92e873b9 343
108c49b8
FB
344 p = (void **)l1_phys_map;
345#if TARGET_PHYS_ADDR_SPACE_BITS > 32
346
347#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
348#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
349#endif
350 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
351 p = *lp;
352 if (!p) {
353 /* allocate if not found */
108c49b8
FB
354 if (!alloc)
355 return NULL;
356 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
357 memset(p, 0, sizeof(void *) * L1_SIZE);
358 *lp = p;
359 }
360#endif
361 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
362 pd = *lp;
363 if (!pd) {
364 int i;
108c49b8
FB
365 /* allocate if not found */
366 if (!alloc)
367 return NULL;
e3f4e2a4
PB
368 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
369 *lp = pd;
370 for (i = 0; i < L2_SIZE; i++)
371 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 372 }
e3f4e2a4 373 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
374}
375
108c49b8 376static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 377{
108c49b8 378 return phys_page_find_alloc(index, 0);
92e873b9
FB
379}
380
9fa3e853 381#if !defined(CONFIG_USER_ONLY)
6a00d601 382static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 383static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 384 target_ulong vaddr);
c8a706fe
PB
385#define mmap_lock() do { } while(0)
386#define mmap_unlock() do { } while(0)
9fa3e853 387#endif
fd6ce8f6 388
4369415f
FB
389#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
390
391#if defined(CONFIG_USER_ONLY)
392/* Currently it is not recommanded to allocate big chunks of data in
393 user mode. It will change when a dedicated libc will be used */
394#define USE_STATIC_CODE_GEN_BUFFER
395#endif
396
397#ifdef USE_STATIC_CODE_GEN_BUFFER
398static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
399#endif
400
8fcd3692 401static void code_gen_alloc(unsigned long tb_size)
26a5f13b 402{
4369415f
FB
403#ifdef USE_STATIC_CODE_GEN_BUFFER
404 code_gen_buffer = static_code_gen_buffer;
405 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
406 map_exec(code_gen_buffer, code_gen_buffer_size);
407#else
26a5f13b
FB
408 code_gen_buffer_size = tb_size;
409 if (code_gen_buffer_size == 0) {
4369415f
FB
410#if defined(CONFIG_USER_ONLY)
411 /* in user mode, phys_ram_size is not meaningful */
412 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
413#else
26a5f13b 414 /* XXX: needs ajustments */
174a9a1f 415 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
4369415f 416#endif
26a5f13b
FB
417 }
418 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
419 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
420 /* The code gen buffer location may have constraints depending on
421 the host cpu and OS */
422#if defined(__linux__)
423 {
424 int flags;
141ac468
BS
425 void *start = NULL;
426
26a5f13b
FB
427 flags = MAP_PRIVATE | MAP_ANONYMOUS;
428#if defined(__x86_64__)
429 flags |= MAP_32BIT;
430 /* Cannot map more than that */
431 if (code_gen_buffer_size > (800 * 1024 * 1024))
432 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
433#elif defined(__sparc_v9__)
434 // Map the buffer below 2G, so we can use direct calls and branches
435 flags |= MAP_FIXED;
436 start = (void *) 0x60000000UL;
437 if (code_gen_buffer_size > (512 * 1024 * 1024))
438 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e
AZ
439#elif defined(__arm__)
440 /* Map the buffer below 64M, so we can use direct calls and branches */
441 flags |= MAP_FIXED;
442 start = (void *) 0x01000000UL;
443 if (code_gen_buffer_size > 16 * 1024 * 1024)
444 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 445#endif
141ac468
BS
446 code_gen_buffer = mmap(start, code_gen_buffer_size,
447 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
448 flags, -1, 0);
449 if (code_gen_buffer == MAP_FAILED) {
450 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
451 exit(1);
452 }
453 }
06e67a82
AL
454#elif defined(__FreeBSD__)
455 {
456 int flags;
457 void *addr = NULL;
458 flags = MAP_PRIVATE | MAP_ANONYMOUS;
459#if defined(__x86_64__)
460 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
461 * 0x40000000 is free */
462 flags |= MAP_FIXED;
463 addr = (void *)0x40000000;
464 /* Cannot map more than that */
465 if (code_gen_buffer_size > (800 * 1024 * 1024))
466 code_gen_buffer_size = (800 * 1024 * 1024);
467#endif
468 code_gen_buffer = mmap(addr, code_gen_buffer_size,
469 PROT_WRITE | PROT_READ | PROT_EXEC,
470 flags, -1, 0);
471 if (code_gen_buffer == MAP_FAILED) {
472 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
473 exit(1);
474 }
475 }
26a5f13b
FB
476#else
477 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
478 if (!code_gen_buffer) {
479 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
480 exit(1);
481 }
482 map_exec(code_gen_buffer, code_gen_buffer_size);
483#endif
4369415f 484#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
485 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
486 code_gen_buffer_max_size = code_gen_buffer_size -
487 code_gen_max_block_size();
488 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
489 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
490}
491
492/* Must be called before using the QEMU cpus. 'tb_size' is the size
493 (in bytes) allocated to the translation buffer. Zero means default
494 size. */
495void cpu_exec_init_all(unsigned long tb_size)
496{
26a5f13b
FB
497 cpu_gen_init();
498 code_gen_alloc(tb_size);
499 code_gen_ptr = code_gen_buffer;
4369415f 500 page_init();
e2eef170 501#if !defined(CONFIG_USER_ONLY)
26a5f13b 502 io_mem_init();
e2eef170 503#endif
26a5f13b
FB
504}
505
9656f324
PB
506#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
507
508#define CPU_COMMON_SAVE_VERSION 1
509
510static void cpu_common_save(QEMUFile *f, void *opaque)
511{
512 CPUState *env = opaque;
513
514 qemu_put_be32s(f, &env->halted);
515 qemu_put_be32s(f, &env->interrupt_request);
516}
517
518static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
519{
520 CPUState *env = opaque;
521
522 if (version_id != CPU_COMMON_SAVE_VERSION)
523 return -EINVAL;
524
525 qemu_get_be32s(f, &env->halted);
75f482ae 526 qemu_get_be32s(f, &env->interrupt_request);
9656f324
PB
527 tlb_flush(env, 1);
528
529 return 0;
530}
531#endif
532
6a00d601 533void cpu_exec_init(CPUState *env)
fd6ce8f6 534{
6a00d601
FB
535 CPUState **penv;
536 int cpu_index;
537
6a00d601
FB
538 env->next_cpu = NULL;
539 penv = &first_cpu;
540 cpu_index = 0;
541 while (*penv != NULL) {
542 penv = (CPUState **)&(*penv)->next_cpu;
543 cpu_index++;
544 }
545 env->cpu_index = cpu_index;
c0ce998e
AL
546 TAILQ_INIT(&env->breakpoints);
547 TAILQ_INIT(&env->watchpoints);
6a00d601 548 *penv = env;
b3c7724c 549#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
550 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
551 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
552 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
553 cpu_save, cpu_load, env);
554#endif
fd6ce8f6
FB
555}
556
9fa3e853
FB
557static inline void invalidate_page_bitmap(PageDesc *p)
558{
559 if (p->code_bitmap) {
59817ccb 560 qemu_free(p->code_bitmap);
9fa3e853
FB
561 p->code_bitmap = NULL;
562 }
563 p->code_write_count = 0;
564}
565
fd6ce8f6
FB
566/* set to NULL all the 'first_tb' fields in all PageDescs */
567static void page_flush_tb(void)
568{
569 int i, j;
570 PageDesc *p;
571
572 for(i = 0; i < L1_SIZE; i++) {
573 p = l1_map[i];
574 if (p) {
9fa3e853
FB
575 for(j = 0; j < L2_SIZE; j++) {
576 p->first_tb = NULL;
577 invalidate_page_bitmap(p);
578 p++;
579 }
fd6ce8f6
FB
580 }
581 }
582}
583
584/* flush all the translation blocks */
d4e8164f 585/* XXX: tb_flush is currently not thread safe */
6a00d601 586void tb_flush(CPUState *env1)
fd6ce8f6 587{
6a00d601 588 CPUState *env;
0124311e 589#if defined(DEBUG_FLUSH)
ab3d1727
BS
590 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
591 (unsigned long)(code_gen_ptr - code_gen_buffer),
592 nb_tbs, nb_tbs > 0 ?
593 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 594#endif
26a5f13b 595 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
596 cpu_abort(env1, "Internal error: code buffer overflow\n");
597
fd6ce8f6 598 nb_tbs = 0;
3b46e624 599
6a00d601
FB
600 for(env = first_cpu; env != NULL; env = env->next_cpu) {
601 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
602 }
9fa3e853 603
8a8a608f 604 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 605 page_flush_tb();
9fa3e853 606
fd6ce8f6 607 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
608 /* XXX: flush processor icache at this point if cache flush is
609 expensive */
e3db7226 610 tb_flush_count++;
fd6ce8f6
FB
611}
612
613#ifdef DEBUG_TB_CHECK
614
bc98a7ef 615static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
616{
617 TranslationBlock *tb;
618 int i;
619 address &= TARGET_PAGE_MASK;
99773bd4
PB
620 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
621 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
622 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
623 address >= tb->pc + tb->size)) {
624 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 625 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
626 }
627 }
628 }
629}
630
631/* verify that all the pages have correct rights for code */
632static void tb_page_check(void)
633{
634 TranslationBlock *tb;
635 int i, flags1, flags2;
3b46e624 636
99773bd4
PB
637 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
638 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
639 flags1 = page_get_flags(tb->pc);
640 flags2 = page_get_flags(tb->pc + tb->size - 1);
641 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
642 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 643 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
644 }
645 }
646 }
647}
648
bdaf78e0 649static void tb_jmp_check(TranslationBlock *tb)
d4e8164f
FB
650{
651 TranslationBlock *tb1;
652 unsigned int n1;
653
654 /* suppress any remaining jumps to this TB */
655 tb1 = tb->jmp_first;
656 for(;;) {
657 n1 = (long)tb1 & 3;
658 tb1 = (TranslationBlock *)((long)tb1 & ~3);
659 if (n1 == 2)
660 break;
661 tb1 = tb1->jmp_next[n1];
662 }
663 /* check end of list */
664 if (tb1 != tb) {
665 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
666 }
667}
668
fd6ce8f6
FB
669#endif
670
671/* invalidate one TB */
672static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
673 int next_offset)
674{
675 TranslationBlock *tb1;
676 for(;;) {
677 tb1 = *ptb;
678 if (tb1 == tb) {
679 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
680 break;
681 }
682 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
683 }
684}
685
9fa3e853
FB
686static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
687{
688 TranslationBlock *tb1;
689 unsigned int n1;
690
691 for(;;) {
692 tb1 = *ptb;
693 n1 = (long)tb1 & 3;
694 tb1 = (TranslationBlock *)((long)tb1 & ~3);
695 if (tb1 == tb) {
696 *ptb = tb1->page_next[n1];
697 break;
698 }
699 ptb = &tb1->page_next[n1];
700 }
701}
702
d4e8164f
FB
703static inline void tb_jmp_remove(TranslationBlock *tb, int n)
704{
705 TranslationBlock *tb1, **ptb;
706 unsigned int n1;
707
708 ptb = &tb->jmp_next[n];
709 tb1 = *ptb;
710 if (tb1) {
711 /* find tb(n) in circular list */
712 for(;;) {
713 tb1 = *ptb;
714 n1 = (long)tb1 & 3;
715 tb1 = (TranslationBlock *)((long)tb1 & ~3);
716 if (n1 == n && tb1 == tb)
717 break;
718 if (n1 == 2) {
719 ptb = &tb1->jmp_first;
720 } else {
721 ptb = &tb1->jmp_next[n1];
722 }
723 }
724 /* now we can suppress tb(n) from the list */
725 *ptb = tb->jmp_next[n];
726
727 tb->jmp_next[n] = NULL;
728 }
729}
730
731/* reset the jump entry 'n' of a TB so that it is not chained to
732 another TB */
733static inline void tb_reset_jump(TranslationBlock *tb, int n)
734{
735 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
736}
737
2e70f6ef 738void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 739{
6a00d601 740 CPUState *env;
8a40a180 741 PageDesc *p;
d4e8164f 742 unsigned int h, n1;
00f82b8a 743 target_phys_addr_t phys_pc;
8a40a180 744 TranslationBlock *tb1, *tb2;
3b46e624 745
8a40a180
FB
746 /* remove the TB from the hash list */
747 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
748 h = tb_phys_hash_func(phys_pc);
5fafdf24 749 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
750 offsetof(TranslationBlock, phys_hash_next));
751
752 /* remove the TB from the page list */
753 if (tb->page_addr[0] != page_addr) {
754 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
755 tb_page_remove(&p->first_tb, tb);
756 invalidate_page_bitmap(p);
757 }
758 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
759 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
760 tb_page_remove(&p->first_tb, tb);
761 invalidate_page_bitmap(p);
762 }
763
36bdbe54 764 tb_invalidated_flag = 1;
59817ccb 765
fd6ce8f6 766 /* remove the TB from the hash list */
8a40a180 767 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
768 for(env = first_cpu; env != NULL; env = env->next_cpu) {
769 if (env->tb_jmp_cache[h] == tb)
770 env->tb_jmp_cache[h] = NULL;
771 }
d4e8164f
FB
772
773 /* suppress this TB from the two jump lists */
774 tb_jmp_remove(tb, 0);
775 tb_jmp_remove(tb, 1);
776
777 /* suppress any remaining jumps to this TB */
778 tb1 = tb->jmp_first;
779 for(;;) {
780 n1 = (long)tb1 & 3;
781 if (n1 == 2)
782 break;
783 tb1 = (TranslationBlock *)((long)tb1 & ~3);
784 tb2 = tb1->jmp_next[n1];
785 tb_reset_jump(tb1, n1);
786 tb1->jmp_next[n1] = NULL;
787 tb1 = tb2;
788 }
789 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 790
e3db7226 791 tb_phys_invalidate_count++;
9fa3e853
FB
792}
793
794static inline void set_bits(uint8_t *tab, int start, int len)
795{
796 int end, mask, end1;
797
798 end = start + len;
799 tab += start >> 3;
800 mask = 0xff << (start & 7);
801 if ((start & ~7) == (end & ~7)) {
802 if (start < end) {
803 mask &= ~(0xff << (end & 7));
804 *tab |= mask;
805 }
806 } else {
807 *tab++ |= mask;
808 start = (start + 8) & ~7;
809 end1 = end & ~7;
810 while (start < end1) {
811 *tab++ = 0xff;
812 start += 8;
813 }
814 if (start < end) {
815 mask = ~(0xff << (end & 7));
816 *tab |= mask;
817 }
818 }
819}
820
821static void build_page_bitmap(PageDesc *p)
822{
823 int n, tb_start, tb_end;
824 TranslationBlock *tb;
3b46e624 825
b2a7081a 826 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
827 if (!p->code_bitmap)
828 return;
9fa3e853
FB
829
830 tb = p->first_tb;
831 while (tb != NULL) {
832 n = (long)tb & 3;
833 tb = (TranslationBlock *)((long)tb & ~3);
834 /* NOTE: this is subtle as a TB may span two physical pages */
835 if (n == 0) {
836 /* NOTE: tb_end may be after the end of the page, but
837 it is not a problem */
838 tb_start = tb->pc & ~TARGET_PAGE_MASK;
839 tb_end = tb_start + tb->size;
840 if (tb_end > TARGET_PAGE_SIZE)
841 tb_end = TARGET_PAGE_SIZE;
842 } else {
843 tb_start = 0;
844 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
845 }
846 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
847 tb = tb->page_next[n];
848 }
849}
850
2e70f6ef
PB
851TranslationBlock *tb_gen_code(CPUState *env,
852 target_ulong pc, target_ulong cs_base,
853 int flags, int cflags)
d720b93d
FB
854{
855 TranslationBlock *tb;
856 uint8_t *tc_ptr;
857 target_ulong phys_pc, phys_page2, virt_page2;
858 int code_gen_size;
859
c27004ec
FB
860 phys_pc = get_phys_addr_code(env, pc);
861 tb = tb_alloc(pc);
d720b93d
FB
862 if (!tb) {
863 /* flush must be done */
864 tb_flush(env);
865 /* cannot fail at this point */
c27004ec 866 tb = tb_alloc(pc);
2e70f6ef
PB
867 /* Don't forget to invalidate previous TB info. */
868 tb_invalidated_flag = 1;
d720b93d
FB
869 }
870 tc_ptr = code_gen_ptr;
871 tb->tc_ptr = tc_ptr;
872 tb->cs_base = cs_base;
873 tb->flags = flags;
874 tb->cflags = cflags;
d07bde88 875 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 876 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 877
d720b93d 878 /* check next page if needed */
c27004ec 879 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 880 phys_page2 = -1;
c27004ec 881 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
882 phys_page2 = get_phys_addr_code(env, virt_page2);
883 }
884 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 885 return tb;
d720b93d 886}
3b46e624 887
9fa3e853
FB
888/* invalidate all TBs which intersect with the target physical page
889 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
890 the same physical page. 'is_cpu_write_access' should be true if called
891 from a real cpu write access: the virtual CPU will exit the current
892 TB if code is modified inside this TB. */
00f82b8a 893void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
894 int is_cpu_write_access)
895{
6b917547 896 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 897 CPUState *env = cpu_single_env;
9fa3e853 898 target_ulong tb_start, tb_end;
6b917547
AL
899 PageDesc *p;
900 int n;
901#ifdef TARGET_HAS_PRECISE_SMC
902 int current_tb_not_found = is_cpu_write_access;
903 TranslationBlock *current_tb = NULL;
904 int current_tb_modified = 0;
905 target_ulong current_pc = 0;
906 target_ulong current_cs_base = 0;
907 int current_flags = 0;
908#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
909
910 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 911 if (!p)
9fa3e853 912 return;
5fafdf24 913 if (!p->code_bitmap &&
d720b93d
FB
914 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
915 is_cpu_write_access) {
9fa3e853
FB
916 /* build code bitmap */
917 build_page_bitmap(p);
918 }
919
920 /* we remove all the TBs in the range [start, end[ */
921 /* XXX: see if in some cases it could be faster to invalidate all the code */
922 tb = p->first_tb;
923 while (tb != NULL) {
924 n = (long)tb & 3;
925 tb = (TranslationBlock *)((long)tb & ~3);
926 tb_next = tb->page_next[n];
927 /* NOTE: this is subtle as a TB may span two physical pages */
928 if (n == 0) {
929 /* NOTE: tb_end may be after the end of the page, but
930 it is not a problem */
931 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
932 tb_end = tb_start + tb->size;
933 } else {
934 tb_start = tb->page_addr[1];
935 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
936 }
937 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
938#ifdef TARGET_HAS_PRECISE_SMC
939 if (current_tb_not_found) {
940 current_tb_not_found = 0;
941 current_tb = NULL;
2e70f6ef 942 if (env->mem_io_pc) {
d720b93d 943 /* now we have a real cpu fault */
2e70f6ef 944 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
945 }
946 }
947 if (current_tb == tb &&
2e70f6ef 948 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
949 /* If we are modifying the current TB, we must stop
950 its execution. We could be more precise by checking
951 that the modification is after the current PC, but it
952 would require a specialized function to partially
953 restore the CPU state */
3b46e624 954
d720b93d 955 current_tb_modified = 1;
5fafdf24 956 cpu_restore_state(current_tb, env,
2e70f6ef 957 env->mem_io_pc, NULL);
6b917547
AL
958 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
959 &current_flags);
d720b93d
FB
960 }
961#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
962 /* we need to do that to handle the case where a signal
963 occurs while doing tb_phys_invalidate() */
964 saved_tb = NULL;
965 if (env) {
966 saved_tb = env->current_tb;
967 env->current_tb = NULL;
968 }
9fa3e853 969 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
970 if (env) {
971 env->current_tb = saved_tb;
972 if (env->interrupt_request && env->current_tb)
973 cpu_interrupt(env, env->interrupt_request);
974 }
9fa3e853
FB
975 }
976 tb = tb_next;
977 }
978#if !defined(CONFIG_USER_ONLY)
979 /* if no code remaining, no need to continue to use slow writes */
980 if (!p->first_tb) {
981 invalidate_page_bitmap(p);
d720b93d 982 if (is_cpu_write_access) {
2e70f6ef 983 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
984 }
985 }
986#endif
987#ifdef TARGET_HAS_PRECISE_SMC
988 if (current_tb_modified) {
989 /* we generate a block containing just the instruction
990 modifying the memory. It will ensure that it cannot modify
991 itself */
ea1c1802 992 env->current_tb = NULL;
2e70f6ef 993 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 994 cpu_resume_from_signal(env, NULL);
9fa3e853 995 }
fd6ce8f6 996#endif
9fa3e853 997}
fd6ce8f6 998
9fa3e853 999/* len must be <= 8 and start must be a multiple of len */
00f82b8a 1000static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
1001{
1002 PageDesc *p;
1003 int offset, b;
59817ccb 1004#if 0
a4193c8a
FB
1005 if (1) {
1006 if (loglevel) {
5fafdf24 1007 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
2e70f6ef 1008 cpu_single_env->mem_io_vaddr, len,
5fafdf24 1009 cpu_single_env->eip,
a4193c8a
FB
1010 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1011 }
59817ccb
FB
1012 }
1013#endif
9fa3e853 1014 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1015 if (!p)
9fa3e853
FB
1016 return;
1017 if (p->code_bitmap) {
1018 offset = start & ~TARGET_PAGE_MASK;
1019 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1020 if (b & ((1 << len) - 1))
1021 goto do_invalidate;
1022 } else {
1023 do_invalidate:
d720b93d 1024 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1025 }
1026}
1027
9fa3e853 1028#if !defined(CONFIG_SOFTMMU)
00f82b8a 1029static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1030 unsigned long pc, void *puc)
9fa3e853 1031{
6b917547 1032 TranslationBlock *tb;
9fa3e853 1033 PageDesc *p;
6b917547 1034 int n;
d720b93d 1035#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1036 TranslationBlock *current_tb = NULL;
d720b93d 1037 CPUState *env = cpu_single_env;
6b917547
AL
1038 int current_tb_modified = 0;
1039 target_ulong current_pc = 0;
1040 target_ulong current_cs_base = 0;
1041 int current_flags = 0;
d720b93d 1042#endif
9fa3e853
FB
1043
1044 addr &= TARGET_PAGE_MASK;
1045 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1046 if (!p)
9fa3e853
FB
1047 return;
1048 tb = p->first_tb;
d720b93d
FB
1049#ifdef TARGET_HAS_PRECISE_SMC
1050 if (tb && pc != 0) {
1051 current_tb = tb_find_pc(pc);
1052 }
1053#endif
9fa3e853
FB
1054 while (tb != NULL) {
1055 n = (long)tb & 3;
1056 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1057#ifdef TARGET_HAS_PRECISE_SMC
1058 if (current_tb == tb &&
2e70f6ef 1059 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1060 /* If we are modifying the current TB, we must stop
1061 its execution. We could be more precise by checking
1062 that the modification is after the current PC, but it
1063 would require a specialized function to partially
1064 restore the CPU state */
3b46e624 1065
d720b93d
FB
1066 current_tb_modified = 1;
1067 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1068 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1069 &current_flags);
d720b93d
FB
1070 }
1071#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1072 tb_phys_invalidate(tb, addr);
1073 tb = tb->page_next[n];
1074 }
fd6ce8f6 1075 p->first_tb = NULL;
d720b93d
FB
1076#ifdef TARGET_HAS_PRECISE_SMC
1077 if (current_tb_modified) {
1078 /* we generate a block containing just the instruction
1079 modifying the memory. It will ensure that it cannot modify
1080 itself */
ea1c1802 1081 env->current_tb = NULL;
2e70f6ef 1082 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1083 cpu_resume_from_signal(env, puc);
1084 }
1085#endif
fd6ce8f6 1086}
9fa3e853 1087#endif
fd6ce8f6
FB
1088
1089/* add the tb in the target page and protect it if necessary */
5fafdf24 1090static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1091 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1092{
1093 PageDesc *p;
9fa3e853
FB
1094 TranslationBlock *last_first_tb;
1095
1096 tb->page_addr[n] = page_addr;
3a7d929e 1097 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1098 tb->page_next[n] = p->first_tb;
1099 last_first_tb = p->first_tb;
1100 p->first_tb = (TranslationBlock *)((long)tb | n);
1101 invalidate_page_bitmap(p);
fd6ce8f6 1102
107db443 1103#if defined(TARGET_HAS_SMC) || 1
d720b93d 1104
9fa3e853 1105#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1106 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1107 target_ulong addr;
1108 PageDesc *p2;
9fa3e853
FB
1109 int prot;
1110
fd6ce8f6
FB
1111 /* force the host page as non writable (writes will have a
1112 page fault + mprotect overhead) */
53a5960a 1113 page_addr &= qemu_host_page_mask;
fd6ce8f6 1114 prot = 0;
53a5960a
PB
1115 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1116 addr += TARGET_PAGE_SIZE) {
1117
1118 p2 = page_find (addr >> TARGET_PAGE_BITS);
1119 if (!p2)
1120 continue;
1121 prot |= p2->flags;
1122 p2->flags &= ~PAGE_WRITE;
1123 page_get_flags(addr);
1124 }
5fafdf24 1125 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1126 (prot & PAGE_BITS) & ~PAGE_WRITE);
1127#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1128 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1129 page_addr);
fd6ce8f6 1130#endif
fd6ce8f6 1131 }
9fa3e853
FB
1132#else
1133 /* if some code is already present, then the pages are already
1134 protected. So we handle the case where only the first TB is
1135 allocated in a physical page */
1136 if (!last_first_tb) {
6a00d601 1137 tlb_protect_code(page_addr);
9fa3e853
FB
1138 }
1139#endif
d720b93d
FB
1140
1141#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1142}
1143
1144/* Allocate a new translation block. Flush the translation buffer if
1145 too many translation blocks or too much generated code. */
c27004ec 1146TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1147{
1148 TranslationBlock *tb;
fd6ce8f6 1149
26a5f13b
FB
1150 if (nb_tbs >= code_gen_max_blocks ||
1151 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1152 return NULL;
fd6ce8f6
FB
1153 tb = &tbs[nb_tbs++];
1154 tb->pc = pc;
b448f2f3 1155 tb->cflags = 0;
d4e8164f
FB
1156 return tb;
1157}
1158
2e70f6ef
PB
1159void tb_free(TranslationBlock *tb)
1160{
bf20dc07 1161 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1162 Ignore the hard cases and just back up if this TB happens to
1163 be the last one generated. */
1164 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1165 code_gen_ptr = tb->tc_ptr;
1166 nb_tbs--;
1167 }
1168}
1169
9fa3e853
FB
1170/* add a new TB and link it to the physical page tables. phys_page2 is
1171 (-1) to indicate that only one page contains the TB. */
5fafdf24 1172void tb_link_phys(TranslationBlock *tb,
9fa3e853 1173 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1174{
9fa3e853
FB
1175 unsigned int h;
1176 TranslationBlock **ptb;
1177
c8a706fe
PB
1178 /* Grab the mmap lock to stop another thread invalidating this TB
1179 before we are done. */
1180 mmap_lock();
9fa3e853
FB
1181 /* add in the physical hash table */
1182 h = tb_phys_hash_func(phys_pc);
1183 ptb = &tb_phys_hash[h];
1184 tb->phys_hash_next = *ptb;
1185 *ptb = tb;
fd6ce8f6
FB
1186
1187 /* add in the page list */
9fa3e853
FB
1188 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1189 if (phys_page2 != -1)
1190 tb_alloc_page(tb, 1, phys_page2);
1191 else
1192 tb->page_addr[1] = -1;
9fa3e853 1193
d4e8164f
FB
1194 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1195 tb->jmp_next[0] = NULL;
1196 tb->jmp_next[1] = NULL;
1197
1198 /* init original jump addresses */
1199 if (tb->tb_next_offset[0] != 0xffff)
1200 tb_reset_jump(tb, 0);
1201 if (tb->tb_next_offset[1] != 0xffff)
1202 tb_reset_jump(tb, 1);
8a40a180
FB
1203
1204#ifdef DEBUG_TB_CHECK
1205 tb_page_check();
1206#endif
c8a706fe 1207 mmap_unlock();
fd6ce8f6
FB
1208}
1209
9fa3e853
FB
1210/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1211 tb[1].tc_ptr. Return NULL if not found */
1212TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1213{
9fa3e853
FB
1214 int m_min, m_max, m;
1215 unsigned long v;
1216 TranslationBlock *tb;
a513fe19
FB
1217
1218 if (nb_tbs <= 0)
1219 return NULL;
1220 if (tc_ptr < (unsigned long)code_gen_buffer ||
1221 tc_ptr >= (unsigned long)code_gen_ptr)
1222 return NULL;
1223 /* binary search (cf Knuth) */
1224 m_min = 0;
1225 m_max = nb_tbs - 1;
1226 while (m_min <= m_max) {
1227 m = (m_min + m_max) >> 1;
1228 tb = &tbs[m];
1229 v = (unsigned long)tb->tc_ptr;
1230 if (v == tc_ptr)
1231 return tb;
1232 else if (tc_ptr < v) {
1233 m_max = m - 1;
1234 } else {
1235 m_min = m + 1;
1236 }
5fafdf24 1237 }
a513fe19
FB
1238 return &tbs[m_max];
1239}
7501267e 1240
ea041c0e
FB
1241static void tb_reset_jump_recursive(TranslationBlock *tb);
1242
1243static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1244{
1245 TranslationBlock *tb1, *tb_next, **ptb;
1246 unsigned int n1;
1247
1248 tb1 = tb->jmp_next[n];
1249 if (tb1 != NULL) {
1250 /* find head of list */
1251 for(;;) {
1252 n1 = (long)tb1 & 3;
1253 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1254 if (n1 == 2)
1255 break;
1256 tb1 = tb1->jmp_next[n1];
1257 }
1258 /* we are now sure now that tb jumps to tb1 */
1259 tb_next = tb1;
1260
1261 /* remove tb from the jmp_first list */
1262 ptb = &tb_next->jmp_first;
1263 for(;;) {
1264 tb1 = *ptb;
1265 n1 = (long)tb1 & 3;
1266 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1267 if (n1 == n && tb1 == tb)
1268 break;
1269 ptb = &tb1->jmp_next[n1];
1270 }
1271 *ptb = tb->jmp_next[n];
1272 tb->jmp_next[n] = NULL;
3b46e624 1273
ea041c0e
FB
1274 /* suppress the jump to next tb in generated code */
1275 tb_reset_jump(tb, n);
1276
0124311e 1277 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1278 tb_reset_jump_recursive(tb_next);
1279 }
1280}
1281
1282static void tb_reset_jump_recursive(TranslationBlock *tb)
1283{
1284 tb_reset_jump_recursive2(tb, 0);
1285 tb_reset_jump_recursive2(tb, 1);
1286}
1287
1fddef4b 1288#if defined(TARGET_HAS_ICE)
d720b93d
FB
1289static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1290{
9b3c35e0
JM
1291 target_phys_addr_t addr;
1292 target_ulong pd;
c2f07f81
PB
1293 ram_addr_t ram_addr;
1294 PhysPageDesc *p;
d720b93d 1295
c2f07f81
PB
1296 addr = cpu_get_phys_page_debug(env, pc);
1297 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1298 if (!p) {
1299 pd = IO_MEM_UNASSIGNED;
1300 } else {
1301 pd = p->phys_offset;
1302 }
1303 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1304 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1305}
c27004ec 1306#endif
d720b93d 1307
6658ffb8 1308/* Add a watchpoint. */
a1d1bb31
AL
1309int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1310 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1311{
b4051334 1312 target_ulong len_mask = ~(len - 1);
c0ce998e 1313 CPUWatchpoint *wp;
6658ffb8 1314
b4051334
AL
1315 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1316 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1317 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1318 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1319 return -EINVAL;
1320 }
a1d1bb31
AL
1321 wp = qemu_malloc(sizeof(*wp));
1322 if (!wp)
426cd5d6 1323 return -ENOMEM;
a1d1bb31
AL
1324
1325 wp->vaddr = addr;
b4051334 1326 wp->len_mask = len_mask;
a1d1bb31
AL
1327 wp->flags = flags;
1328
2dc9f411 1329 /* keep all GDB-injected watchpoints in front */
c0ce998e
AL
1330 if (flags & BP_GDB)
1331 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1332 else
1333 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1334
6658ffb8 1335 tlb_flush_page(env, addr);
a1d1bb31
AL
1336
1337 if (watchpoint)
1338 *watchpoint = wp;
1339 return 0;
6658ffb8
PB
1340}
1341
a1d1bb31
AL
1342/* Remove a specific watchpoint. */
1343int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1344 int flags)
6658ffb8 1345{
b4051334 1346 target_ulong len_mask = ~(len - 1);
a1d1bb31 1347 CPUWatchpoint *wp;
6658ffb8 1348
c0ce998e 1349 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1350 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1351 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1352 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1353 return 0;
1354 }
1355 }
a1d1bb31 1356 return -ENOENT;
6658ffb8
PB
1357}
1358
a1d1bb31
AL
1359/* Remove a specific watchpoint by reference. */
1360void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1361{
c0ce998e 1362 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1363
a1d1bb31
AL
1364 tlb_flush_page(env, watchpoint->vaddr);
1365
1366 qemu_free(watchpoint);
1367}
1368
1369/* Remove all matching watchpoints. */
1370void cpu_watchpoint_remove_all(CPUState *env, int mask)
1371{
c0ce998e 1372 CPUWatchpoint *wp, *next;
a1d1bb31 1373
c0ce998e 1374 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1375 if (wp->flags & mask)
1376 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1377 }
7d03f82f
EI
1378}
1379
a1d1bb31
AL
1380/* Add a breakpoint. */
1381int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1382 CPUBreakpoint **breakpoint)
4c3a88a2 1383{
1fddef4b 1384#if defined(TARGET_HAS_ICE)
c0ce998e 1385 CPUBreakpoint *bp;
3b46e624 1386
a1d1bb31
AL
1387 bp = qemu_malloc(sizeof(*bp));
1388 if (!bp)
426cd5d6 1389 return -ENOMEM;
4c3a88a2 1390
a1d1bb31
AL
1391 bp->pc = pc;
1392 bp->flags = flags;
1393
2dc9f411 1394 /* keep all GDB-injected breakpoints in front */
c0ce998e
AL
1395 if (flags & BP_GDB)
1396 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1397 else
1398 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1399
d720b93d 1400 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1401
1402 if (breakpoint)
1403 *breakpoint = bp;
4c3a88a2
FB
1404 return 0;
1405#else
a1d1bb31 1406 return -ENOSYS;
4c3a88a2
FB
1407#endif
1408}
1409
a1d1bb31
AL
1410/* Remove a specific breakpoint. */
1411int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1412{
7d03f82f 1413#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1414 CPUBreakpoint *bp;
1415
c0ce998e 1416 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1417 if (bp->pc == pc && bp->flags == flags) {
1418 cpu_breakpoint_remove_by_ref(env, bp);
1419 return 0;
1420 }
7d03f82f 1421 }
a1d1bb31
AL
1422 return -ENOENT;
1423#else
1424 return -ENOSYS;
7d03f82f
EI
1425#endif
1426}
1427
a1d1bb31
AL
1428/* Remove a specific breakpoint by reference. */
1429void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1430{
1fddef4b 1431#if defined(TARGET_HAS_ICE)
c0ce998e 1432 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1433
a1d1bb31
AL
1434 breakpoint_invalidate(env, breakpoint->pc);
1435
1436 qemu_free(breakpoint);
1437#endif
1438}
1439
1440/* Remove all matching breakpoints. */
1441void cpu_breakpoint_remove_all(CPUState *env, int mask)
1442{
1443#if defined(TARGET_HAS_ICE)
c0ce998e 1444 CPUBreakpoint *bp, *next;
a1d1bb31 1445
c0ce998e 1446 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1447 if (bp->flags & mask)
1448 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1449 }
4c3a88a2
FB
1450#endif
1451}
1452
c33a346e
FB
1453/* enable or disable single step mode. EXCP_DEBUG is returned by the
1454 CPU loop after each instruction */
1455void cpu_single_step(CPUState *env, int enabled)
1456{
1fddef4b 1457#if defined(TARGET_HAS_ICE)
c33a346e
FB
1458 if (env->singlestep_enabled != enabled) {
1459 env->singlestep_enabled = enabled;
1460 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1461 /* XXX: only flush what is necessary */
0124311e 1462 tb_flush(env);
c33a346e
FB
1463 }
1464#endif
1465}
1466
34865134
FB
1467/* enable or disable low levels log */
1468void cpu_set_log(int log_flags)
1469{
1470 loglevel = log_flags;
1471 if (loglevel && !logfile) {
11fcfab4 1472 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1473 if (!logfile) {
1474 perror(logfilename);
1475 _exit(1);
1476 }
9fa3e853
FB
1477#if !defined(CONFIG_SOFTMMU)
1478 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1479 {
b55266b5 1480 static char logfile_buf[4096];
9fa3e853
FB
1481 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1482 }
1483#else
34865134 1484 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1485#endif
e735b91c
PB
1486 log_append = 1;
1487 }
1488 if (!loglevel && logfile) {
1489 fclose(logfile);
1490 logfile = NULL;
34865134
FB
1491 }
1492}
1493
1494void cpu_set_log_filename(const char *filename)
1495{
1496 logfilename = strdup(filename);
e735b91c
PB
1497 if (logfile) {
1498 fclose(logfile);
1499 logfile = NULL;
1500 }
1501 cpu_set_log(loglevel);
34865134 1502}
c33a346e 1503
0124311e 1504/* mask must never be zero, except for A20 change call */
68a79315 1505void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1506{
d5975363 1507#if !defined(USE_NPTL)
ea041c0e 1508 TranslationBlock *tb;
15a51156 1509 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1510#endif
2e70f6ef 1511 int old_mask;
59817ccb 1512
2e70f6ef 1513 old_mask = env->interrupt_request;
d5975363 1514 /* FIXME: This is probably not threadsafe. A different thread could
bf20dc07 1515 be in the middle of a read-modify-write operation. */
68a79315 1516 env->interrupt_request |= mask;
d5975363
PB
1517#if defined(USE_NPTL)
1518 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1519 problem and hope the cpu will stop of its own accord. For userspace
1520 emulation this often isn't actually as bad as it sounds. Often
1521 signals are used primarily to interrupt blocking syscalls. */
1522#else
2e70f6ef 1523 if (use_icount) {
266910c4 1524 env->icount_decr.u16.high = 0xffff;
2e70f6ef
PB
1525#ifndef CONFIG_USER_ONLY
1526 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1527 an async event happened and we need to process it. */
1528 if (!can_do_io(env)
1529 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1530 cpu_abort(env, "Raised interrupt while not in I/O function");
1531 }
1532#endif
1533 } else {
1534 tb = env->current_tb;
1535 /* if the cpu is currently executing code, we must unlink it and
1536 all the potentially executing TB */
1537 if (tb && !testandset(&interrupt_lock)) {
1538 env->current_tb = NULL;
1539 tb_reset_jump_recursive(tb);
1540 resetlock(&interrupt_lock);
1541 }
ea041c0e 1542 }
d5975363 1543#endif
ea041c0e
FB
1544}
1545
b54ad049
FB
1546void cpu_reset_interrupt(CPUState *env, int mask)
1547{
1548 env->interrupt_request &= ~mask;
1549}
1550
c7cd6a37 1551const CPULogItem cpu_log_items[] = {
5fafdf24 1552 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1553 "show generated host assembly code for each compiled TB" },
1554 { CPU_LOG_TB_IN_ASM, "in_asm",
1555 "show target assembly code for each compiled TB" },
5fafdf24 1556 { CPU_LOG_TB_OP, "op",
57fec1fe 1557 "show micro ops for each compiled TB" },
f193c797 1558 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1559 "show micro ops "
1560#ifdef TARGET_I386
1561 "before eflags optimization and "
f193c797 1562#endif
e01a1157 1563 "after liveness analysis" },
f193c797
FB
1564 { CPU_LOG_INT, "int",
1565 "show interrupts/exceptions in short format" },
1566 { CPU_LOG_EXEC, "exec",
1567 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1568 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1569 "show CPU state before block translation" },
f193c797
FB
1570#ifdef TARGET_I386
1571 { CPU_LOG_PCALL, "pcall",
1572 "show protected mode far calls/returns/exceptions" },
1573#endif
8e3a9fd2 1574#ifdef DEBUG_IOPORT
fd872598
FB
1575 { CPU_LOG_IOPORT, "ioport",
1576 "show all i/o ports accesses" },
8e3a9fd2 1577#endif
f193c797
FB
1578 { 0, NULL, NULL },
1579};
1580
1581static int cmp1(const char *s1, int n, const char *s2)
1582{
1583 if (strlen(s2) != n)
1584 return 0;
1585 return memcmp(s1, s2, n) == 0;
1586}
3b46e624 1587
f193c797
FB
1588/* takes a comma separated list of log masks. Return 0 if error. */
1589int cpu_str_to_log_mask(const char *str)
1590{
c7cd6a37 1591 const CPULogItem *item;
f193c797
FB
1592 int mask;
1593 const char *p, *p1;
1594
1595 p = str;
1596 mask = 0;
1597 for(;;) {
1598 p1 = strchr(p, ',');
1599 if (!p1)
1600 p1 = p + strlen(p);
8e3a9fd2
FB
1601 if(cmp1(p,p1-p,"all")) {
1602 for(item = cpu_log_items; item->mask != 0; item++) {
1603 mask |= item->mask;
1604 }
1605 } else {
f193c797
FB
1606 for(item = cpu_log_items; item->mask != 0; item++) {
1607 if (cmp1(p, p1 - p, item->name))
1608 goto found;
1609 }
1610 return 0;
8e3a9fd2 1611 }
f193c797
FB
1612 found:
1613 mask |= item->mask;
1614 if (*p1 != ',')
1615 break;
1616 p = p1 + 1;
1617 }
1618 return mask;
1619}
ea041c0e 1620
7501267e
FB
1621void cpu_abort(CPUState *env, const char *fmt, ...)
1622{
1623 va_list ap;
493ae1f0 1624 va_list ap2;
7501267e
FB
1625
1626 va_start(ap, fmt);
493ae1f0 1627 va_copy(ap2, ap);
7501267e
FB
1628 fprintf(stderr, "qemu: fatal: ");
1629 vfprintf(stderr, fmt, ap);
1630 fprintf(stderr, "\n");
1631#ifdef TARGET_I386
7fe48483
FB
1632 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1633#else
1634 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1635#endif
924edcae 1636 if (logfile) {
f9373291 1637 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1638 vfprintf(logfile, fmt, ap2);
f9373291
JM
1639 fprintf(logfile, "\n");
1640#ifdef TARGET_I386
1641 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1642#else
1643 cpu_dump_state(env, logfile, fprintf, 0);
1644#endif
924edcae
AZ
1645 fflush(logfile);
1646 fclose(logfile);
1647 }
493ae1f0 1648 va_end(ap2);
f9373291 1649 va_end(ap);
7501267e
FB
1650 abort();
1651}
1652
c5be9f08
TS
1653CPUState *cpu_copy(CPUState *env)
1654{
01ba9816 1655 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1656 /* preserve chaining and index */
1657 CPUState *next_cpu = new_env->next_cpu;
1658 int cpu_index = new_env->cpu_index;
1659 memcpy(new_env, env, sizeof(CPUState));
1660 new_env->next_cpu = next_cpu;
1661 new_env->cpu_index = cpu_index;
1662 return new_env;
1663}
1664
0124311e
FB
1665#if !defined(CONFIG_USER_ONLY)
1666
5c751e99
EI
1667static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1668{
1669 unsigned int i;
1670
1671 /* Discard jump cache entries for any tb which might potentially
1672 overlap the flushed page. */
1673 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1674 memset (&env->tb_jmp_cache[i], 0,
1675 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1676
1677 i = tb_jmp_cache_hash_page(addr);
1678 memset (&env->tb_jmp_cache[i], 0,
1679 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1680}
1681
ee8b7021
FB
1682/* NOTE: if flush_global is true, also flush global entries (not
1683 implemented yet) */
1684void tlb_flush(CPUState *env, int flush_global)
33417e70 1685{
33417e70 1686 int i;
0124311e 1687
9fa3e853
FB
1688#if defined(DEBUG_TLB)
1689 printf("tlb_flush:\n");
1690#endif
0124311e
FB
1691 /* must reset current TB so that interrupts cannot modify the
1692 links while we are modifying them */
1693 env->current_tb = NULL;
1694
33417e70 1695 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1696 env->tlb_table[0][i].addr_read = -1;
1697 env->tlb_table[0][i].addr_write = -1;
1698 env->tlb_table[0][i].addr_code = -1;
1699 env->tlb_table[1][i].addr_read = -1;
1700 env->tlb_table[1][i].addr_write = -1;
1701 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1702#if (NB_MMU_MODES >= 3)
1703 env->tlb_table[2][i].addr_read = -1;
1704 env->tlb_table[2][i].addr_write = -1;
1705 env->tlb_table[2][i].addr_code = -1;
1706#if (NB_MMU_MODES == 4)
1707 env->tlb_table[3][i].addr_read = -1;
1708 env->tlb_table[3][i].addr_write = -1;
1709 env->tlb_table[3][i].addr_code = -1;
1710#endif
1711#endif
33417e70 1712 }
9fa3e853 1713
8a40a180 1714 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1715
0a962c02
FB
1716#ifdef USE_KQEMU
1717 if (env->kqemu_enabled) {
1718 kqemu_flush(env, flush_global);
1719 }
9fa3e853 1720#endif
e3db7226 1721 tlb_flush_count++;
33417e70
FB
1722}
1723
274da6b2 1724static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1725{
5fafdf24 1726 if (addr == (tlb_entry->addr_read &
84b7b8e7 1727 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1728 addr == (tlb_entry->addr_write &
84b7b8e7 1729 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1730 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1731 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1732 tlb_entry->addr_read = -1;
1733 tlb_entry->addr_write = -1;
1734 tlb_entry->addr_code = -1;
1735 }
61382a50
FB
1736}
1737
2e12669a 1738void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1739{
8a40a180 1740 int i;
0124311e 1741
9fa3e853 1742#if defined(DEBUG_TLB)
108c49b8 1743 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1744#endif
0124311e
FB
1745 /* must reset current TB so that interrupts cannot modify the
1746 links while we are modifying them */
1747 env->current_tb = NULL;
61382a50
FB
1748
1749 addr &= TARGET_PAGE_MASK;
1750 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1751 tlb_flush_entry(&env->tlb_table[0][i], addr);
1752 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1753#if (NB_MMU_MODES >= 3)
1754 tlb_flush_entry(&env->tlb_table[2][i], addr);
1755#if (NB_MMU_MODES == 4)
1756 tlb_flush_entry(&env->tlb_table[3][i], addr);
1757#endif
1758#endif
0124311e 1759
5c751e99 1760 tlb_flush_jmp_cache(env, addr);
9fa3e853 1761
0a962c02
FB
1762#ifdef USE_KQEMU
1763 if (env->kqemu_enabled) {
1764 kqemu_flush_page(env, addr);
1765 }
1766#endif
9fa3e853
FB
1767}
1768
9fa3e853
FB
1769/* update the TLBs so that writes to code in the virtual page 'addr'
1770 can be detected */
6a00d601 1771static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1772{
5fafdf24 1773 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1774 ram_addr + TARGET_PAGE_SIZE,
1775 CODE_DIRTY_FLAG);
9fa3e853
FB
1776}
1777
9fa3e853 1778/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1779 tested for self modifying code */
5fafdf24 1780static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1781 target_ulong vaddr)
9fa3e853 1782{
3a7d929e 1783 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1784}
1785
5fafdf24 1786static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1787 unsigned long start, unsigned long length)
1788{
1789 unsigned long addr;
84b7b8e7
FB
1790 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1791 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1792 if ((addr - start) < length) {
0f459d16 1793 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1794 }
1795 }
1796}
1797
3a7d929e 1798void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1799 int dirty_flags)
1ccde1cb
FB
1800{
1801 CPUState *env;
4f2ac237 1802 unsigned long length, start1;
0a962c02
FB
1803 int i, mask, len;
1804 uint8_t *p;
1ccde1cb
FB
1805
1806 start &= TARGET_PAGE_MASK;
1807 end = TARGET_PAGE_ALIGN(end);
1808
1809 length = end - start;
1810 if (length == 0)
1811 return;
0a962c02 1812 len = length >> TARGET_PAGE_BITS;
3a7d929e 1813#ifdef USE_KQEMU
6a00d601
FB
1814 /* XXX: should not depend on cpu context */
1815 env = first_cpu;
3a7d929e 1816 if (env->kqemu_enabled) {
f23db169
FB
1817 ram_addr_t addr;
1818 addr = start;
1819 for(i = 0; i < len; i++) {
1820 kqemu_set_notdirty(env, addr);
1821 addr += TARGET_PAGE_SIZE;
1822 }
3a7d929e
FB
1823 }
1824#endif
f23db169
FB
1825 mask = ~dirty_flags;
1826 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1827 for(i = 0; i < len; i++)
1828 p[i] &= mask;
1829
1ccde1cb
FB
1830 /* we modify the TLB cache so that the dirty bit will be set again
1831 when accessing the range */
59817ccb 1832 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1833 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1834 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1835 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1836 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1837 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1838#if (NB_MMU_MODES >= 3)
1839 for(i = 0; i < CPU_TLB_SIZE; i++)
1840 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1841#if (NB_MMU_MODES == 4)
1842 for(i = 0; i < CPU_TLB_SIZE; i++)
1843 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1844#endif
1845#endif
6a00d601 1846 }
1ccde1cb
FB
1847}
1848
74576198
AL
1849int cpu_physical_memory_set_dirty_tracking(int enable)
1850{
1851 in_migration = enable;
1852 return 0;
1853}
1854
1855int cpu_physical_memory_get_dirty_tracking(void)
1856{
1857 return in_migration;
1858}
1859
2bec46dc
AL
1860void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1861{
1862 if (kvm_enabled())
1863 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1864}
1865
3a7d929e
FB
1866static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1867{
1868 ram_addr_t ram_addr;
1869
84b7b8e7 1870 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1871 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1872 tlb_entry->addend - (unsigned long)phys_ram_base;
1873 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1874 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1875 }
1876 }
1877}
1878
1879/* update the TLB according to the current state of the dirty bits */
1880void cpu_tlb_update_dirty(CPUState *env)
1881{
1882 int i;
1883 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1884 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1885 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1886 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1887#if (NB_MMU_MODES >= 3)
1888 for(i = 0; i < CPU_TLB_SIZE; i++)
1889 tlb_update_dirty(&env->tlb_table[2][i]);
1890#if (NB_MMU_MODES == 4)
1891 for(i = 0; i < CPU_TLB_SIZE; i++)
1892 tlb_update_dirty(&env->tlb_table[3][i]);
1893#endif
1894#endif
3a7d929e
FB
1895}
1896
0f459d16 1897static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1898{
0f459d16
PB
1899 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1900 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1901}
1902
0f459d16
PB
1903/* update the TLB corresponding to virtual page vaddr
1904 so that it is no longer dirty */
1905static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1906{
1ccde1cb
FB
1907 int i;
1908
0f459d16 1909 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1910 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1911 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1912 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1913#if (NB_MMU_MODES >= 3)
0f459d16 1914 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1915#if (NB_MMU_MODES == 4)
0f459d16 1916 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1917#endif
1918#endif
9fa3e853
FB
1919}
1920
59817ccb
FB
1921/* add a new TLB entry. At most one entry for a given virtual address
1922 is permitted. Return 0 if OK or 2 if the page could not be mapped
1923 (can only happen in non SOFTMMU mode for I/O pages or pages
1924 conflicting with the host address space). */
5fafdf24
TS
1925int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1926 target_phys_addr_t paddr, int prot,
6ebbf390 1927 int mmu_idx, int is_softmmu)
9fa3e853 1928{
92e873b9 1929 PhysPageDesc *p;
4f2ac237 1930 unsigned long pd;
9fa3e853 1931 unsigned int index;
4f2ac237 1932 target_ulong address;
0f459d16 1933 target_ulong code_address;
108c49b8 1934 target_phys_addr_t addend;
9fa3e853 1935 int ret;
84b7b8e7 1936 CPUTLBEntry *te;
a1d1bb31 1937 CPUWatchpoint *wp;
0f459d16 1938 target_phys_addr_t iotlb;
9fa3e853 1939
92e873b9 1940 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1941 if (!p) {
1942 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1943 } else {
1944 pd = p->phys_offset;
9fa3e853
FB
1945 }
1946#if defined(DEBUG_TLB)
6ebbf390
JM
1947 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1948 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1949#endif
1950
1951 ret = 0;
0f459d16
PB
1952 address = vaddr;
1953 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1954 /* IO memory case (romd handled later) */
1955 address |= TLB_MMIO;
1956 }
1957 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1958 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1959 /* Normal RAM. */
1960 iotlb = pd & TARGET_PAGE_MASK;
1961 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1962 iotlb |= IO_MEM_NOTDIRTY;
1963 else
1964 iotlb |= IO_MEM_ROM;
1965 } else {
1966 /* IO handlers are currently passed a phsical address.
1967 It would be nice to pass an offset from the base address
1968 of that region. This would avoid having to special case RAM,
1969 and avoid full address decoding in every device.
1970 We can't use the high bits of pd for this because
1971 IO_MEM_ROMD uses these as a ram address. */
1972 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1973 }
1974
1975 code_address = address;
1976 /* Make accesses to pages with watchpoints go via the
1977 watchpoint trap routines. */
c0ce998e 1978 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 1979 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
1980 iotlb = io_mem_watch + paddr;
1981 /* TODO: The memory case can be optimized by not trapping
1982 reads of pages with a write breakpoint. */
1983 address |= TLB_MMIO;
6658ffb8 1984 }
0f459d16 1985 }
d79acba4 1986
0f459d16
PB
1987 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1988 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1989 te = &env->tlb_table[mmu_idx][index];
1990 te->addend = addend - vaddr;
1991 if (prot & PAGE_READ) {
1992 te->addr_read = address;
1993 } else {
1994 te->addr_read = -1;
1995 }
5c751e99 1996
0f459d16
PB
1997 if (prot & PAGE_EXEC) {
1998 te->addr_code = code_address;
1999 } else {
2000 te->addr_code = -1;
2001 }
2002 if (prot & PAGE_WRITE) {
2003 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2004 (pd & IO_MEM_ROMD)) {
2005 /* Write access calls the I/O callback. */
2006 te->addr_write = address | TLB_MMIO;
2007 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2008 !cpu_physical_memory_is_dirty(pd)) {
2009 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2010 } else {
0f459d16 2011 te->addr_write = address;
9fa3e853 2012 }
0f459d16
PB
2013 } else {
2014 te->addr_write = -1;
9fa3e853 2015 }
9fa3e853
FB
2016 return ret;
2017}
2018
0124311e
FB
2019#else
2020
ee8b7021 2021void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2022{
2023}
2024
2e12669a 2025void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2026{
2027}
2028
5fafdf24
TS
2029int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2030 target_phys_addr_t paddr, int prot,
6ebbf390 2031 int mmu_idx, int is_softmmu)
9fa3e853
FB
2032{
2033 return 0;
2034}
0124311e 2035
9fa3e853
FB
2036/* dump memory mappings */
2037void page_dump(FILE *f)
33417e70 2038{
9fa3e853
FB
2039 unsigned long start, end;
2040 int i, j, prot, prot1;
2041 PageDesc *p;
33417e70 2042
9fa3e853
FB
2043 fprintf(f, "%-8s %-8s %-8s %s\n",
2044 "start", "end", "size", "prot");
2045 start = -1;
2046 end = -1;
2047 prot = 0;
2048 for(i = 0; i <= L1_SIZE; i++) {
2049 if (i < L1_SIZE)
2050 p = l1_map[i];
2051 else
2052 p = NULL;
2053 for(j = 0;j < L2_SIZE; j++) {
2054 if (!p)
2055 prot1 = 0;
2056 else
2057 prot1 = p[j].flags;
2058 if (prot1 != prot) {
2059 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2060 if (start != -1) {
2061 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 2062 start, end, end - start,
9fa3e853
FB
2063 prot & PAGE_READ ? 'r' : '-',
2064 prot & PAGE_WRITE ? 'w' : '-',
2065 prot & PAGE_EXEC ? 'x' : '-');
2066 }
2067 if (prot1 != 0)
2068 start = end;
2069 else
2070 start = -1;
2071 prot = prot1;
2072 }
2073 if (!p)
2074 break;
2075 }
33417e70 2076 }
33417e70
FB
2077}
2078
53a5960a 2079int page_get_flags(target_ulong address)
33417e70 2080{
9fa3e853
FB
2081 PageDesc *p;
2082
2083 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2084 if (!p)
9fa3e853
FB
2085 return 0;
2086 return p->flags;
2087}
2088
2089/* modify the flags of a page and invalidate the code if
2090 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2091 depending on PAGE_WRITE */
53a5960a 2092void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2093{
2094 PageDesc *p;
53a5960a 2095 target_ulong addr;
9fa3e853 2096
c8a706fe 2097 /* mmap_lock should already be held. */
9fa3e853
FB
2098 start = start & TARGET_PAGE_MASK;
2099 end = TARGET_PAGE_ALIGN(end);
2100 if (flags & PAGE_WRITE)
2101 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2102 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2103 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2104 /* We may be called for host regions that are outside guest
2105 address space. */
2106 if (!p)
2107 return;
9fa3e853
FB
2108 /* if the write protection is set, then we invalidate the code
2109 inside */
5fafdf24 2110 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2111 (flags & PAGE_WRITE) &&
2112 p->first_tb) {
d720b93d 2113 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2114 }
2115 p->flags = flags;
2116 }
33417e70
FB
2117}
2118
3d97b40b
TS
2119int page_check_range(target_ulong start, target_ulong len, int flags)
2120{
2121 PageDesc *p;
2122 target_ulong end;
2123 target_ulong addr;
2124
55f280c9
AZ
2125 if (start + len < start)
2126 /* we've wrapped around */
2127 return -1;
2128
3d97b40b
TS
2129 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2130 start = start & TARGET_PAGE_MASK;
2131
3d97b40b
TS
2132 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2133 p = page_find(addr >> TARGET_PAGE_BITS);
2134 if( !p )
2135 return -1;
2136 if( !(p->flags & PAGE_VALID) )
2137 return -1;
2138
dae3270c 2139 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2140 return -1;
dae3270c
FB
2141 if (flags & PAGE_WRITE) {
2142 if (!(p->flags & PAGE_WRITE_ORG))
2143 return -1;
2144 /* unprotect the page if it was put read-only because it
2145 contains translated code */
2146 if (!(p->flags & PAGE_WRITE)) {
2147 if (!page_unprotect(addr, 0, NULL))
2148 return -1;
2149 }
2150 return 0;
2151 }
3d97b40b
TS
2152 }
2153 return 0;
2154}
2155
9fa3e853
FB
2156/* called from signal handler: invalidate the code and unprotect the
2157 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2158int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2159{
2160 unsigned int page_index, prot, pindex;
2161 PageDesc *p, *p1;
53a5960a 2162 target_ulong host_start, host_end, addr;
9fa3e853 2163
c8a706fe
PB
2164 /* Technically this isn't safe inside a signal handler. However we
2165 know this only ever happens in a synchronous SEGV handler, so in
2166 practice it seems to be ok. */
2167 mmap_lock();
2168
83fb7adf 2169 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2170 page_index = host_start >> TARGET_PAGE_BITS;
2171 p1 = page_find(page_index);
c8a706fe
PB
2172 if (!p1) {
2173 mmap_unlock();
9fa3e853 2174 return 0;
c8a706fe 2175 }
83fb7adf 2176 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2177 p = p1;
2178 prot = 0;
2179 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2180 prot |= p->flags;
2181 p++;
2182 }
2183 /* if the page was really writable, then we change its
2184 protection back to writable */
2185 if (prot & PAGE_WRITE_ORG) {
2186 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2187 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2188 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2189 (prot & PAGE_BITS) | PAGE_WRITE);
2190 p1[pindex].flags |= PAGE_WRITE;
2191 /* and since the content will be modified, we must invalidate
2192 the corresponding translated code. */
d720b93d 2193 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2194#ifdef DEBUG_TB_CHECK
2195 tb_invalidate_check(address);
2196#endif
c8a706fe 2197 mmap_unlock();
9fa3e853
FB
2198 return 1;
2199 }
2200 }
c8a706fe 2201 mmap_unlock();
9fa3e853
FB
2202 return 0;
2203}
2204
6a00d601
FB
2205static inline void tlb_set_dirty(CPUState *env,
2206 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2207{
2208}
9fa3e853
FB
2209#endif /* defined(CONFIG_USER_ONLY) */
2210
e2eef170 2211#if !defined(CONFIG_USER_ONLY)
db7b5426 2212static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2213 ram_addr_t memory);
2214static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2215 ram_addr_t orig_memory);
db7b5426
BS
2216#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2217 need_subpage) \
2218 do { \
2219 if (addr > start_addr) \
2220 start_addr2 = 0; \
2221 else { \
2222 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2223 if (start_addr2 > 0) \
2224 need_subpage = 1; \
2225 } \
2226 \
49e9fba2 2227 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2228 end_addr2 = TARGET_PAGE_SIZE - 1; \
2229 else { \
2230 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2231 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2232 need_subpage = 1; \
2233 } \
2234 } while (0)
2235
33417e70
FB
2236/* register physical memory. 'size' must be a multiple of the target
2237 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2238 io memory page */
5fafdf24 2239void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2240 ram_addr_t size,
2241 ram_addr_t phys_offset)
33417e70 2242{
108c49b8 2243 target_phys_addr_t addr, end_addr;
92e873b9 2244 PhysPageDesc *p;
9d42037b 2245 CPUState *env;
00f82b8a 2246 ram_addr_t orig_size = size;
db7b5426 2247 void *subpage;
33417e70 2248
da260249
FB
2249#ifdef USE_KQEMU
2250 /* XXX: should not depend on cpu context */
2251 env = first_cpu;
2252 if (env->kqemu_enabled) {
2253 kqemu_set_phys_mem(start_addr, size, phys_offset);
2254 }
2255#endif
7ba1e619
AL
2256 if (kvm_enabled())
2257 kvm_set_phys_mem(start_addr, size, phys_offset);
2258
5fd386f6 2259 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2260 end_addr = start_addr + (target_phys_addr_t)size;
2261 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2262 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2263 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2264 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2265 target_phys_addr_t start_addr2, end_addr2;
2266 int need_subpage = 0;
2267
2268 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2269 need_subpage);
4254fab8 2270 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2271 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2272 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2273 &p->phys_offset, orig_memory);
2274 } else {
2275 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2276 >> IO_MEM_SHIFT];
2277 }
2278 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2279 } else {
2280 p->phys_offset = phys_offset;
2281 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2282 (phys_offset & IO_MEM_ROMD))
2283 phys_offset += TARGET_PAGE_SIZE;
2284 }
2285 } else {
2286 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2287 p->phys_offset = phys_offset;
2288 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2289 (phys_offset & IO_MEM_ROMD))
2290 phys_offset += TARGET_PAGE_SIZE;
2291 else {
2292 target_phys_addr_t start_addr2, end_addr2;
2293 int need_subpage = 0;
2294
2295 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2296 end_addr2, need_subpage);
2297
4254fab8 2298 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2299 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2300 &p->phys_offset, IO_MEM_UNASSIGNED);
2301 subpage_register(subpage, start_addr2, end_addr2,
2302 phys_offset);
2303 }
2304 }
2305 }
33417e70 2306 }
3b46e624 2307
9d42037b
FB
2308 /* since each CPU stores ram addresses in its TLB cache, we must
2309 reset the modified entries */
2310 /* XXX: slow ! */
2311 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2312 tlb_flush(env, 1);
2313 }
33417e70
FB
2314}
2315
ba863458 2316/* XXX: temporary until new memory mapping API */
00f82b8a 2317ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2318{
2319 PhysPageDesc *p;
2320
2321 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2322 if (!p)
2323 return IO_MEM_UNASSIGNED;
2324 return p->phys_offset;
2325}
2326
e9a1ab19 2327/* XXX: better than nothing */
00f82b8a 2328ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2329{
2330 ram_addr_t addr;
7fb4fdcf 2331 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
012a7045 2332 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
ed441467 2333 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2334 abort();
2335 }
2336 addr = phys_ram_alloc_offset;
2337 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2338 return addr;
2339}
2340
2341void qemu_ram_free(ram_addr_t addr)
2342{
2343}
2344
a4193c8a 2345static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2346{
67d3b957 2347#ifdef DEBUG_UNASSIGNED
ab3d1727 2348 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2349#endif
e18231a3
BS
2350#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2351 do_unassigned_access(addr, 0, 0, 0, 1);
2352#endif
2353 return 0;
2354}
2355
2356static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2357{
2358#ifdef DEBUG_UNASSIGNED
2359 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2360#endif
2361#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2362 do_unassigned_access(addr, 0, 0, 0, 2);
2363#endif
2364 return 0;
2365}
2366
2367static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2368{
2369#ifdef DEBUG_UNASSIGNED
2370 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2371#endif
2372#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2373 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2374#endif
33417e70
FB
2375 return 0;
2376}
2377
a4193c8a 2378static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2379{
67d3b957 2380#ifdef DEBUG_UNASSIGNED
ab3d1727 2381 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2382#endif
e18231a3
BS
2383#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2384 do_unassigned_access(addr, 1, 0, 0, 1);
2385#endif
2386}
2387
2388static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2389{
2390#ifdef DEBUG_UNASSIGNED
2391 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2392#endif
2393#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2394 do_unassigned_access(addr, 1, 0, 0, 2);
2395#endif
2396}
2397
2398static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2399{
2400#ifdef DEBUG_UNASSIGNED
2401 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2402#endif
2403#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2404 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2405#endif
33417e70
FB
2406}
2407
2408static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2409 unassigned_mem_readb,
e18231a3
BS
2410 unassigned_mem_readw,
2411 unassigned_mem_readl,
33417e70
FB
2412};
2413
2414static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2415 unassigned_mem_writeb,
e18231a3
BS
2416 unassigned_mem_writew,
2417 unassigned_mem_writel,
33417e70
FB
2418};
2419
0f459d16
PB
2420static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2421 uint32_t val)
9fa3e853 2422{
3a7d929e 2423 int dirty_flags;
3a7d929e
FB
2424 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2425 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2426#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2427 tb_invalidate_phys_page_fast(ram_addr, 1);
2428 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2429#endif
3a7d929e 2430 }
0f459d16 2431 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2432#ifdef USE_KQEMU
2433 if (cpu_single_env->kqemu_enabled &&
2434 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2435 kqemu_modify_page(cpu_single_env, ram_addr);
2436#endif
f23db169
FB
2437 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2438 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2439 /* we remove the notdirty callback only if the code has been
2440 flushed */
2441 if (dirty_flags == 0xff)
2e70f6ef 2442 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2443}
2444
0f459d16
PB
2445static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2446 uint32_t val)
9fa3e853 2447{
3a7d929e 2448 int dirty_flags;
3a7d929e
FB
2449 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2450 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2451#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2452 tb_invalidate_phys_page_fast(ram_addr, 2);
2453 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2454#endif
3a7d929e 2455 }
0f459d16 2456 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2457#ifdef USE_KQEMU
2458 if (cpu_single_env->kqemu_enabled &&
2459 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2460 kqemu_modify_page(cpu_single_env, ram_addr);
2461#endif
f23db169
FB
2462 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2463 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2464 /* we remove the notdirty callback only if the code has been
2465 flushed */
2466 if (dirty_flags == 0xff)
2e70f6ef 2467 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2468}
2469
0f459d16
PB
2470static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2471 uint32_t val)
9fa3e853 2472{
3a7d929e 2473 int dirty_flags;
3a7d929e
FB
2474 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2475 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2476#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2477 tb_invalidate_phys_page_fast(ram_addr, 4);
2478 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2479#endif
3a7d929e 2480 }
0f459d16 2481 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2482#ifdef USE_KQEMU
2483 if (cpu_single_env->kqemu_enabled &&
2484 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2485 kqemu_modify_page(cpu_single_env, ram_addr);
2486#endif
f23db169
FB
2487 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2488 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2489 /* we remove the notdirty callback only if the code has been
2490 flushed */
2491 if (dirty_flags == 0xff)
2e70f6ef 2492 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2493}
2494
3a7d929e 2495static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2496 NULL, /* never used */
2497 NULL, /* never used */
2498 NULL, /* never used */
2499};
2500
1ccde1cb
FB
2501static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2502 notdirty_mem_writeb,
2503 notdirty_mem_writew,
2504 notdirty_mem_writel,
2505};
2506
0f459d16 2507/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2508static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
2509{
2510 CPUState *env = cpu_single_env;
06d55cc1
AL
2511 target_ulong pc, cs_base;
2512 TranslationBlock *tb;
0f459d16 2513 target_ulong vaddr;
a1d1bb31 2514 CPUWatchpoint *wp;
06d55cc1 2515 int cpu_flags;
0f459d16 2516
06d55cc1
AL
2517 if (env->watchpoint_hit) {
2518 /* We re-entered the check after replacing the TB. Now raise
2519 * the debug interrupt so that is will trigger after the
2520 * current instruction. */
2521 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2522 return;
2523 }
2e70f6ef 2524 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
c0ce998e 2525 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2526 if ((vaddr == (wp->vaddr & len_mask) ||
2527 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2528 wp->flags |= BP_WATCHPOINT_HIT;
2529 if (!env->watchpoint_hit) {
2530 env->watchpoint_hit = wp;
2531 tb = tb_find_pc(env->mem_io_pc);
2532 if (!tb) {
2533 cpu_abort(env, "check_watchpoint: could not find TB for "
2534 "pc=%p", (void *)env->mem_io_pc);
2535 }
2536 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2537 tb_phys_invalidate(tb, -1);
2538 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2539 env->exception_index = EXCP_DEBUG;
2540 } else {
2541 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2542 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2543 }
2544 cpu_resume_from_signal(env, NULL);
06d55cc1 2545 }
6e140f28
AL
2546 } else {
2547 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2548 }
2549 }
2550}
2551
6658ffb8
PB
2552/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2553 so these check for a hit then pass through to the normal out-of-line
2554 phys routines. */
2555static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2556{
b4051334 2557 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
2558 return ldub_phys(addr);
2559}
2560
2561static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2562{
b4051334 2563 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
2564 return lduw_phys(addr);
2565}
2566
2567static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2568{
b4051334 2569 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
2570 return ldl_phys(addr);
2571}
2572
6658ffb8
PB
2573static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2574 uint32_t val)
2575{
b4051334 2576 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
2577 stb_phys(addr, val);
2578}
2579
2580static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2581 uint32_t val)
2582{
b4051334 2583 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
2584 stw_phys(addr, val);
2585}
2586
2587static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2588 uint32_t val)
2589{
b4051334 2590 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
2591 stl_phys(addr, val);
2592}
2593
2594static CPUReadMemoryFunc *watch_mem_read[3] = {
2595 watch_mem_readb,
2596 watch_mem_readw,
2597 watch_mem_readl,
2598};
2599
2600static CPUWriteMemoryFunc *watch_mem_write[3] = {
2601 watch_mem_writeb,
2602 watch_mem_writew,
2603 watch_mem_writel,
2604};
6658ffb8 2605
db7b5426
BS
2606static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2607 unsigned int len)
2608{
db7b5426
BS
2609 uint32_t ret;
2610 unsigned int idx;
2611
2612 idx = SUBPAGE_IDX(addr - mmio->base);
2613#if defined(DEBUG_SUBPAGE)
2614 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2615 mmio, len, addr, idx);
2616#endif
3ee89922 2617 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2618
2619 return ret;
2620}
2621
2622static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2623 uint32_t value, unsigned int len)
2624{
db7b5426
BS
2625 unsigned int idx;
2626
2627 idx = SUBPAGE_IDX(addr - mmio->base);
2628#if defined(DEBUG_SUBPAGE)
2629 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2630 mmio, len, addr, idx, value);
2631#endif
3ee89922 2632 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2633}
2634
2635static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2636{
2637#if defined(DEBUG_SUBPAGE)
2638 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2639#endif
2640
2641 return subpage_readlen(opaque, addr, 0);
2642}
2643
2644static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2645 uint32_t value)
2646{
2647#if defined(DEBUG_SUBPAGE)
2648 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2649#endif
2650 subpage_writelen(opaque, addr, value, 0);
2651}
2652
2653static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2654{
2655#if defined(DEBUG_SUBPAGE)
2656 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2657#endif
2658
2659 return subpage_readlen(opaque, addr, 1);
2660}
2661
2662static void subpage_writew (void *opaque, target_phys_addr_t addr,
2663 uint32_t value)
2664{
2665#if defined(DEBUG_SUBPAGE)
2666 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2667#endif
2668 subpage_writelen(opaque, addr, value, 1);
2669}
2670
2671static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2672{
2673#if defined(DEBUG_SUBPAGE)
2674 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2675#endif
2676
2677 return subpage_readlen(opaque, addr, 2);
2678}
2679
2680static void subpage_writel (void *opaque,
2681 target_phys_addr_t addr, uint32_t value)
2682{
2683#if defined(DEBUG_SUBPAGE)
2684 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2685#endif
2686 subpage_writelen(opaque, addr, value, 2);
2687}
2688
2689static CPUReadMemoryFunc *subpage_read[] = {
2690 &subpage_readb,
2691 &subpage_readw,
2692 &subpage_readl,
2693};
2694
2695static CPUWriteMemoryFunc *subpage_write[] = {
2696 &subpage_writeb,
2697 &subpage_writew,
2698 &subpage_writel,
2699};
2700
2701static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2702 ram_addr_t memory)
db7b5426
BS
2703{
2704 int idx, eidx;
4254fab8 2705 unsigned int i;
db7b5426
BS
2706
2707 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2708 return -1;
2709 idx = SUBPAGE_IDX(start);
2710 eidx = SUBPAGE_IDX(end);
2711#if defined(DEBUG_SUBPAGE)
2712 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2713 mmio, start, end, idx, eidx, memory);
2714#endif
2715 memory >>= IO_MEM_SHIFT;
2716 for (; idx <= eidx; idx++) {
4254fab8 2717 for (i = 0; i < 4; i++) {
3ee89922
BS
2718 if (io_mem_read[memory][i]) {
2719 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2720 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2721 }
2722 if (io_mem_write[memory][i]) {
2723 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2724 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2725 }
4254fab8 2726 }
db7b5426
BS
2727 }
2728
2729 return 0;
2730}
2731
00f82b8a
AJ
2732static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2733 ram_addr_t orig_memory)
db7b5426
BS
2734{
2735 subpage_t *mmio;
2736 int subpage_memory;
2737
2738 mmio = qemu_mallocz(sizeof(subpage_t));
2739 if (mmio != NULL) {
2740 mmio->base = base;
2741 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2742#if defined(DEBUG_SUBPAGE)
2743 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2744 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2745#endif
2746 *phys = subpage_memory | IO_MEM_SUBPAGE;
2747 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2748 }
2749
2750 return mmio;
2751}
2752
33417e70
FB
2753static void io_mem_init(void)
2754{
3a7d929e 2755 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2756 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2757 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2758 io_mem_nb = 5;
2759
0f459d16 2760 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2761 watch_mem_write, NULL);
1ccde1cb 2762 /* alloc dirty bits array */
0a962c02 2763 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2764 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2765}
2766
2767/* mem_read and mem_write are arrays of functions containing the
2768 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2769 2). Functions can be omitted with a NULL function pointer. The
2770 registered functions may be modified dynamically later.
2771 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2772 modified. If it is zero, a new io zone is allocated. The return
2773 value can be used with cpu_register_physical_memory(). (-1) is
2774 returned if error. */
33417e70
FB
2775int cpu_register_io_memory(int io_index,
2776 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2777 CPUWriteMemoryFunc **mem_write,
2778 void *opaque)
33417e70 2779{
4254fab8 2780 int i, subwidth = 0;
33417e70
FB
2781
2782 if (io_index <= 0) {
b5ff1b31 2783 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2784 return -1;
2785 io_index = io_mem_nb++;
2786 } else {
2787 if (io_index >= IO_MEM_NB_ENTRIES)
2788 return -1;
2789 }
b5ff1b31 2790
33417e70 2791 for(i = 0;i < 3; i++) {
4254fab8
BS
2792 if (!mem_read[i] || !mem_write[i])
2793 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2794 io_mem_read[io_index][i] = mem_read[i];
2795 io_mem_write[io_index][i] = mem_write[i];
2796 }
a4193c8a 2797 io_mem_opaque[io_index] = opaque;
4254fab8 2798 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2799}
61382a50 2800
8926b517
FB
2801CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2802{
2803 return io_mem_write[io_index >> IO_MEM_SHIFT];
2804}
2805
2806CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2807{
2808 return io_mem_read[io_index >> IO_MEM_SHIFT];
2809}
2810
e2eef170
PB
2811#endif /* !defined(CONFIG_USER_ONLY) */
2812
13eb76e0
FB
2813/* physical memory access (slow version, mainly for debug) */
2814#if defined(CONFIG_USER_ONLY)
5fafdf24 2815void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2816 int len, int is_write)
2817{
2818 int l, flags;
2819 target_ulong page;
53a5960a 2820 void * p;
13eb76e0
FB
2821
2822 while (len > 0) {
2823 page = addr & TARGET_PAGE_MASK;
2824 l = (page + TARGET_PAGE_SIZE) - addr;
2825 if (l > len)
2826 l = len;
2827 flags = page_get_flags(page);
2828 if (!(flags & PAGE_VALID))
2829 return;
2830 if (is_write) {
2831 if (!(flags & PAGE_WRITE))
2832 return;
579a97f7 2833 /* XXX: this code should not depend on lock_user */
72fb7daa 2834 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2835 /* FIXME - should this return an error rather than just fail? */
2836 return;
72fb7daa
AJ
2837 memcpy(p, buf, l);
2838 unlock_user(p, addr, l);
13eb76e0
FB
2839 } else {
2840 if (!(flags & PAGE_READ))
2841 return;
579a97f7 2842 /* XXX: this code should not depend on lock_user */
72fb7daa 2843 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2844 /* FIXME - should this return an error rather than just fail? */
2845 return;
72fb7daa 2846 memcpy(buf, p, l);
5b257578 2847 unlock_user(p, addr, 0);
13eb76e0
FB
2848 }
2849 len -= l;
2850 buf += l;
2851 addr += l;
2852 }
2853}
8df1cd07 2854
13eb76e0 2855#else
5fafdf24 2856void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2857 int len, int is_write)
2858{
2859 int l, io_index;
2860 uint8_t *ptr;
2861 uint32_t val;
2e12669a
FB
2862 target_phys_addr_t page;
2863 unsigned long pd;
92e873b9 2864 PhysPageDesc *p;
3b46e624 2865
13eb76e0
FB
2866 while (len > 0) {
2867 page = addr & TARGET_PAGE_MASK;
2868 l = (page + TARGET_PAGE_SIZE) - addr;
2869 if (l > len)
2870 l = len;
92e873b9 2871 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2872 if (!p) {
2873 pd = IO_MEM_UNASSIGNED;
2874 } else {
2875 pd = p->phys_offset;
2876 }
3b46e624 2877
13eb76e0 2878 if (is_write) {
3a7d929e 2879 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2880 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2881 /* XXX: could force cpu_single_env to NULL to avoid
2882 potential bugs */
13eb76e0 2883 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2884 /* 32 bit write access */
c27004ec 2885 val = ldl_p(buf);
a4193c8a 2886 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2887 l = 4;
2888 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2889 /* 16 bit write access */
c27004ec 2890 val = lduw_p(buf);
a4193c8a 2891 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2892 l = 2;
2893 } else {
1c213d19 2894 /* 8 bit write access */
c27004ec 2895 val = ldub_p(buf);
a4193c8a 2896 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2897 l = 1;
2898 }
2899 } else {
b448f2f3
FB
2900 unsigned long addr1;
2901 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2902 /* RAM case */
b448f2f3 2903 ptr = phys_ram_base + addr1;
13eb76e0 2904 memcpy(ptr, buf, l);
3a7d929e
FB
2905 if (!cpu_physical_memory_is_dirty(addr1)) {
2906 /* invalidate code */
2907 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2908 /* set dirty bit */
5fafdf24 2909 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2910 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2911 }
13eb76e0
FB
2912 }
2913 } else {
5fafdf24 2914 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2915 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2916 /* I/O case */
2917 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2918 if (l >= 4 && ((addr & 3) == 0)) {
2919 /* 32 bit read access */
a4193c8a 2920 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2921 stl_p(buf, val);
13eb76e0
FB
2922 l = 4;
2923 } else if (l >= 2 && ((addr & 1) == 0)) {
2924 /* 16 bit read access */
a4193c8a 2925 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2926 stw_p(buf, val);
13eb76e0
FB
2927 l = 2;
2928 } else {
1c213d19 2929 /* 8 bit read access */
a4193c8a 2930 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2931 stb_p(buf, val);
13eb76e0
FB
2932 l = 1;
2933 }
2934 } else {
2935 /* RAM case */
5fafdf24 2936 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2937 (addr & ~TARGET_PAGE_MASK);
2938 memcpy(buf, ptr, l);
2939 }
2940 }
2941 len -= l;
2942 buf += l;
2943 addr += l;
2944 }
2945}
8df1cd07 2946
d0ecd2aa 2947/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2948void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2949 const uint8_t *buf, int len)
2950{
2951 int l;
2952 uint8_t *ptr;
2953 target_phys_addr_t page;
2954 unsigned long pd;
2955 PhysPageDesc *p;
3b46e624 2956
d0ecd2aa
FB
2957 while (len > 0) {
2958 page = addr & TARGET_PAGE_MASK;
2959 l = (page + TARGET_PAGE_SIZE) - addr;
2960 if (l > len)
2961 l = len;
2962 p = phys_page_find(page >> TARGET_PAGE_BITS);
2963 if (!p) {
2964 pd = IO_MEM_UNASSIGNED;
2965 } else {
2966 pd = p->phys_offset;
2967 }
3b46e624 2968
d0ecd2aa 2969 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2970 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2971 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2972 /* do nothing */
2973 } else {
2974 unsigned long addr1;
2975 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2976 /* ROM/RAM case */
2977 ptr = phys_ram_base + addr1;
2978 memcpy(ptr, buf, l);
2979 }
2980 len -= l;
2981 buf += l;
2982 addr += l;
2983 }
2984}
2985
2986
8df1cd07
FB
2987/* warning: addr must be aligned */
2988uint32_t ldl_phys(target_phys_addr_t addr)
2989{
2990 int io_index;
2991 uint8_t *ptr;
2992 uint32_t val;
2993 unsigned long pd;
2994 PhysPageDesc *p;
2995
2996 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2997 if (!p) {
2998 pd = IO_MEM_UNASSIGNED;
2999 } else {
3000 pd = p->phys_offset;
3001 }
3b46e624 3002
5fafdf24 3003 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3004 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3005 /* I/O case */
3006 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3007 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3008 } else {
3009 /* RAM case */
5fafdf24 3010 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3011 (addr & ~TARGET_PAGE_MASK);
3012 val = ldl_p(ptr);
3013 }
3014 return val;
3015}
3016
84b7b8e7
FB
3017/* warning: addr must be aligned */
3018uint64_t ldq_phys(target_phys_addr_t addr)
3019{
3020 int io_index;
3021 uint8_t *ptr;
3022 uint64_t val;
3023 unsigned long pd;
3024 PhysPageDesc *p;
3025
3026 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3027 if (!p) {
3028 pd = IO_MEM_UNASSIGNED;
3029 } else {
3030 pd = p->phys_offset;
3031 }
3b46e624 3032
2a4188a3
FB
3033 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3034 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3035 /* I/O case */
3036 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3037#ifdef TARGET_WORDS_BIGENDIAN
3038 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3039 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3040#else
3041 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3042 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3043#endif
3044 } else {
3045 /* RAM case */
5fafdf24 3046 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3047 (addr & ~TARGET_PAGE_MASK);
3048 val = ldq_p(ptr);
3049 }
3050 return val;
3051}
3052
aab33094
FB
3053/* XXX: optimize */
3054uint32_t ldub_phys(target_phys_addr_t addr)
3055{
3056 uint8_t val;
3057 cpu_physical_memory_read(addr, &val, 1);
3058 return val;
3059}
3060
3061/* XXX: optimize */
3062uint32_t lduw_phys(target_phys_addr_t addr)
3063{
3064 uint16_t val;
3065 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3066 return tswap16(val);
3067}
3068
8df1cd07
FB
3069/* warning: addr must be aligned. The ram page is not masked as dirty
3070 and the code inside is not invalidated. It is useful if the dirty
3071 bits are used to track modified PTEs */
3072void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3073{
3074 int io_index;
3075 uint8_t *ptr;
3076 unsigned long pd;
3077 PhysPageDesc *p;
3078
3079 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3080 if (!p) {
3081 pd = IO_MEM_UNASSIGNED;
3082 } else {
3083 pd = p->phys_offset;
3084 }
3b46e624 3085
3a7d929e 3086 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
3087 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3088 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3089 } else {
74576198
AL
3090 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3091 ptr = phys_ram_base + addr1;
8df1cd07 3092 stl_p(ptr, val);
74576198
AL
3093
3094 if (unlikely(in_migration)) {
3095 if (!cpu_physical_memory_is_dirty(addr1)) {
3096 /* invalidate code */
3097 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3098 /* set dirty bit */
3099 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3100 (0xff & ~CODE_DIRTY_FLAG);
3101 }
3102 }
8df1cd07
FB
3103 }
3104}
3105
bc98a7ef
JM
3106void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3107{
3108 int io_index;
3109 uint8_t *ptr;
3110 unsigned long pd;
3111 PhysPageDesc *p;
3112
3113 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3114 if (!p) {
3115 pd = IO_MEM_UNASSIGNED;
3116 } else {
3117 pd = p->phys_offset;
3118 }
3b46e624 3119
bc98a7ef
JM
3120 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3121 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3122#ifdef TARGET_WORDS_BIGENDIAN
3123 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3124 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3125#else
3126 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3127 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3128#endif
3129 } else {
5fafdf24 3130 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3131 (addr & ~TARGET_PAGE_MASK);
3132 stq_p(ptr, val);
3133 }
3134}
3135
8df1cd07 3136/* warning: addr must be aligned */
8df1cd07
FB
3137void stl_phys(target_phys_addr_t addr, uint32_t val)
3138{
3139 int io_index;
3140 uint8_t *ptr;
3141 unsigned long pd;
3142 PhysPageDesc *p;
3143
3144 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3145 if (!p) {
3146 pd = IO_MEM_UNASSIGNED;
3147 } else {
3148 pd = p->phys_offset;
3149 }
3b46e624 3150
3a7d929e 3151 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
3152 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3153 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3154 } else {
3155 unsigned long addr1;
3156 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3157 /* RAM case */
3158 ptr = phys_ram_base + addr1;
3159 stl_p(ptr, val);
3a7d929e
FB
3160 if (!cpu_physical_memory_is_dirty(addr1)) {
3161 /* invalidate code */
3162 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3163 /* set dirty bit */
f23db169
FB
3164 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3165 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3166 }
8df1cd07
FB
3167 }
3168}
3169
aab33094
FB
3170/* XXX: optimize */
3171void stb_phys(target_phys_addr_t addr, uint32_t val)
3172{
3173 uint8_t v = val;
3174 cpu_physical_memory_write(addr, &v, 1);
3175}
3176
3177/* XXX: optimize */
3178void stw_phys(target_phys_addr_t addr, uint32_t val)
3179{
3180 uint16_t v = tswap16(val);
3181 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3182}
3183
3184/* XXX: optimize */
3185void stq_phys(target_phys_addr_t addr, uint64_t val)
3186{
3187 val = tswap64(val);
3188 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3189}
3190
13eb76e0
FB
3191#endif
3192
3193/* virtual memory access for debug */
5fafdf24 3194int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3195 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3196{
3197 int l;
9b3c35e0
JM
3198 target_phys_addr_t phys_addr;
3199 target_ulong page;
13eb76e0
FB
3200
3201 while (len > 0) {
3202 page = addr & TARGET_PAGE_MASK;
3203 phys_addr = cpu_get_phys_page_debug(env, page);
3204 /* if no physical page mapped, return an error */
3205 if (phys_addr == -1)
3206 return -1;
3207 l = (page + TARGET_PAGE_SIZE) - addr;
3208 if (l > len)
3209 l = len;
5fafdf24 3210 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3211 buf, l, is_write);
13eb76e0
FB
3212 len -= l;
3213 buf += l;
3214 addr += l;
3215 }
3216 return 0;
3217}
3218
2e70f6ef
PB
3219/* in deterministic execution mode, instructions doing device I/Os
3220 must be at the end of the TB */
3221void cpu_io_recompile(CPUState *env, void *retaddr)
3222{
3223 TranslationBlock *tb;
3224 uint32_t n, cflags;
3225 target_ulong pc, cs_base;
3226 uint64_t flags;
3227
3228 tb = tb_find_pc((unsigned long)retaddr);
3229 if (!tb) {
3230 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3231 retaddr);
3232 }
3233 n = env->icount_decr.u16.low + tb->icount;
3234 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3235 /* Calculate how many instructions had been executed before the fault
bf20dc07 3236 occurred. */
2e70f6ef
PB
3237 n = n - env->icount_decr.u16.low;
3238 /* Generate a new TB ending on the I/O insn. */
3239 n++;
3240 /* On MIPS and SH, delay slot instructions can only be restarted if
3241 they were already the first instruction in the TB. If this is not
bf20dc07 3242 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3243 branch. */
3244#if defined(TARGET_MIPS)
3245 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3246 env->active_tc.PC -= 4;
3247 env->icount_decr.u16.low++;
3248 env->hflags &= ~MIPS_HFLAG_BMASK;
3249 }
3250#elif defined(TARGET_SH4)
3251 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3252 && n > 1) {
3253 env->pc -= 2;
3254 env->icount_decr.u16.low++;
3255 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3256 }
3257#endif
3258 /* This should never happen. */
3259 if (n > CF_COUNT_MASK)
3260 cpu_abort(env, "TB too big during recompile");
3261
3262 cflags = n | CF_LAST_IO;
3263 pc = tb->pc;
3264 cs_base = tb->cs_base;
3265 flags = tb->flags;
3266 tb_phys_invalidate(tb, -1);
3267 /* FIXME: In theory this could raise an exception. In practice
3268 we have already translated the block once so it's probably ok. */
3269 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3270 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3271 the first in the TB) then we end up generating a whole new TB and
3272 repeating the fault, which is horribly inefficient.
3273 Better would be to execute just this insn uncached, or generate a
3274 second new TB. */
3275 cpu_resume_from_signal(env, NULL);
3276}
3277
e3db7226
FB
3278void dump_exec_info(FILE *f,
3279 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3280{
3281 int i, target_code_size, max_target_code_size;
3282 int direct_jmp_count, direct_jmp2_count, cross_page;
3283 TranslationBlock *tb;
3b46e624 3284
e3db7226
FB
3285 target_code_size = 0;
3286 max_target_code_size = 0;
3287 cross_page = 0;
3288 direct_jmp_count = 0;
3289 direct_jmp2_count = 0;
3290 for(i = 0; i < nb_tbs; i++) {
3291 tb = &tbs[i];
3292 target_code_size += tb->size;
3293 if (tb->size > max_target_code_size)
3294 max_target_code_size = tb->size;
3295 if (tb->page_addr[1] != -1)
3296 cross_page++;
3297 if (tb->tb_next_offset[0] != 0xffff) {
3298 direct_jmp_count++;
3299 if (tb->tb_next_offset[1] != 0xffff) {
3300 direct_jmp2_count++;
3301 }
3302 }
3303 }
3304 /* XXX: avoid using doubles ? */
57fec1fe 3305 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3306 cpu_fprintf(f, "gen code size %ld/%ld\n",
3307 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3308 cpu_fprintf(f, "TB count %d/%d\n",
3309 nb_tbs, code_gen_max_blocks);
5fafdf24 3310 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3311 nb_tbs ? target_code_size / nb_tbs : 0,
3312 max_target_code_size);
5fafdf24 3313 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3314 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3315 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3316 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3317 cross_page,
e3db7226
FB
3318 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3319 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3320 direct_jmp_count,
e3db7226
FB
3321 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3322 direct_jmp2_count,
3323 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3324 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3325 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3326 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3327 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3328 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3329}
3330
5fafdf24 3331#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3332
3333#define MMUSUFFIX _cmmu
3334#define GETPC() NULL
3335#define env cpu_single_env
b769d8fe 3336#define SOFTMMU_CODE_ACCESS
61382a50
FB
3337
3338#define SHIFT 0
3339#include "softmmu_template.h"
3340
3341#define SHIFT 1
3342#include "softmmu_template.h"
3343
3344#define SHIFT 2
3345#include "softmmu_template.h"
3346
3347#define SHIFT 3
3348#include "softmmu_template.h"
3349
3350#undef env
3351
3352#endif