]> git.proxmox.com Git - qemu.git/blame - exec.c
Fix coprocessor register types.
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
b3c7724c 40#include "hw/hw.h"
53a5960a
PB
41#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
43#endif
54936004 44
fd6ce8f6 45//#define DEBUG_TB_INVALIDATE
66e85a21 46//#define DEBUG_FLUSH
9fa3e853 47//#define DEBUG_TLB
67d3b957 48//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
49
50/* make various TB consistency checks */
5fafdf24
TS
51//#define DEBUG_TB_CHECK
52//#define DEBUG_TLB_CHECK
fd6ce8f6 53
1196be37 54//#define DEBUG_IOPORT
db7b5426 55//#define DEBUG_SUBPAGE
1196be37 56
99773bd4
PB
57#if !defined(CONFIG_USER_ONLY)
58/* TB consistency checks only implemented for usermode emulation. */
59#undef DEBUG_TB_CHECK
60#endif
61
9fa3e853
FB
62#define SMC_BITMAP_USE_THRESHOLD 10
63
64#define MMAP_AREA_START 0x00000000
65#define MMAP_AREA_END 0xa8000000
fd6ce8f6 66
108c49b8
FB
67#if defined(TARGET_SPARC64)
68#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
69#elif defined(TARGET_SPARC)
70#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
71#elif defined(TARGET_ALPHA)
72#define TARGET_PHYS_ADDR_SPACE_BITS 42
73#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
74#elif defined(TARGET_PPC64)
75#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
76#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
78#elif defined(TARGET_I386) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
80#else
81/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
82#define TARGET_PHYS_ADDR_SPACE_BITS 32
83#endif
84
fab94c0e 85TranslationBlock *tbs;
26a5f13b 86int code_gen_max_blocks;
9fa3e853 87TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 88int nb_tbs;
eb51d102
FB
89/* any access to the tbs or the page table must use this lock */
90spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 91
7cb69cae 92uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
26a5f13b
FB
93uint8_t *code_gen_buffer;
94unsigned long code_gen_buffer_size;
95/* threshold to flush the translated code buffer */
96unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
97uint8_t *code_gen_ptr;
98
e2eef170 99#if !defined(CONFIG_USER_ONLY)
00f82b8a 100ram_addr_t phys_ram_size;
9fa3e853
FB
101int phys_ram_fd;
102uint8_t *phys_ram_base;
1ccde1cb 103uint8_t *phys_ram_dirty;
e9a1ab19 104static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 105#endif
9fa3e853 106
6a00d601
FB
107CPUState *first_cpu;
108/* current CPU in the current thread. It is only valid inside
109 cpu_exec() */
5fafdf24 110CPUState *cpu_single_env;
2e70f6ef 111/* 0 = Do not count executed instructions.
bf20dc07 112 1 = Precise instruction counting.
2e70f6ef
PB
113 2 = Adaptive rate instruction counting. */
114int use_icount = 0;
115/* Current instruction counter. While executing translated code this may
116 include some instructions that have not yet been executed. */
117int64_t qemu_icount;
6a00d601 118
54936004 119typedef struct PageDesc {
92e873b9 120 /* list of TBs intersecting this ram page */
fd6ce8f6 121 TranslationBlock *first_tb;
9fa3e853
FB
122 /* in order to optimize self modifying code, we count the number
123 of lookups we do to a given page to use a bitmap */
124 unsigned int code_write_count;
125 uint8_t *code_bitmap;
126#if defined(CONFIG_USER_ONLY)
127 unsigned long flags;
128#endif
54936004
FB
129} PageDesc;
130
92e873b9 131typedef struct PhysPageDesc {
0f459d16 132 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 133 ram_addr_t phys_offset;
92e873b9
FB
134} PhysPageDesc;
135
54936004 136#define L2_BITS 10
bedb69ea
JM
137#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
138/* XXX: this is a temporary hack for alpha target.
139 * In the future, this is to be replaced by a multi-level table
140 * to actually be able to handle the complete 64 bits address space.
141 */
142#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
143#else
03875444 144#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 145#endif
54936004
FB
146
147#define L1_SIZE (1 << L1_BITS)
148#define L2_SIZE (1 << L2_BITS)
149
83fb7adf
FB
150unsigned long qemu_real_host_page_size;
151unsigned long qemu_host_page_bits;
152unsigned long qemu_host_page_size;
153unsigned long qemu_host_page_mask;
54936004 154
92e873b9 155/* XXX: for system emulation, it could just be an array */
54936004 156static PageDesc *l1_map[L1_SIZE];
0a962c02 157PhysPageDesc **l1_phys_map;
54936004 158
e2eef170
PB
159#if !defined(CONFIG_USER_ONLY)
160static void io_mem_init(void);
161
33417e70 162/* io memory support */
33417e70
FB
163CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
164CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 165void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 166static int io_mem_nb;
6658ffb8
PB
167static int io_mem_watch;
168#endif
33417e70 169
34865134
FB
170/* log support */
171char *logfilename = "/tmp/qemu.log";
172FILE *logfile;
173int loglevel;
e735b91c 174static int log_append = 0;
34865134 175
e3db7226
FB
176/* statistics */
177static int tlb_flush_count;
178static int tb_flush_count;
179static int tb_phys_invalidate_count;
180
db7b5426
BS
181#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
182typedef struct subpage_t {
183 target_phys_addr_t base;
3ee89922
BS
184 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
185 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
186 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
187} subpage_t;
188
7cb69cae
FB
189#ifdef _WIN32
190static void map_exec(void *addr, long size)
191{
192 DWORD old_protect;
193 VirtualProtect(addr, size,
194 PAGE_EXECUTE_READWRITE, &old_protect);
195
196}
197#else
198static void map_exec(void *addr, long size)
199{
4369415f 200 unsigned long start, end, page_size;
7cb69cae 201
4369415f 202 page_size = getpagesize();
7cb69cae 203 start = (unsigned long)addr;
4369415f 204 start &= ~(page_size - 1);
7cb69cae
FB
205
206 end = (unsigned long)addr + size;
4369415f
FB
207 end += page_size - 1;
208 end &= ~(page_size - 1);
7cb69cae
FB
209
210 mprotect((void *)start, end - start,
211 PROT_READ | PROT_WRITE | PROT_EXEC);
212}
213#endif
214
b346ff46 215static void page_init(void)
54936004 216{
83fb7adf 217 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 218 TARGET_PAGE_SIZE */
67b915a5 219#ifdef _WIN32
d5a8f07c
FB
220 {
221 SYSTEM_INFO system_info;
222 DWORD old_protect;
3b46e624 223
d5a8f07c
FB
224 GetSystemInfo(&system_info);
225 qemu_real_host_page_size = system_info.dwPageSize;
d5a8f07c 226 }
67b915a5 227#else
83fb7adf 228 qemu_real_host_page_size = getpagesize();
67b915a5 229#endif
83fb7adf
FB
230 if (qemu_host_page_size == 0)
231 qemu_host_page_size = qemu_real_host_page_size;
232 if (qemu_host_page_size < TARGET_PAGE_SIZE)
233 qemu_host_page_size = TARGET_PAGE_SIZE;
234 qemu_host_page_bits = 0;
235 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
236 qemu_host_page_bits++;
237 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
238 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
239 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
240
241#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
242 {
243 long long startaddr, endaddr;
244 FILE *f;
245 int n;
246
c8a706fe 247 mmap_lock();
0776590d 248 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
249 f = fopen("/proc/self/maps", "r");
250 if (f) {
251 do {
252 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
253 if (n == 2) {
e0b8d65a
BS
254 startaddr = MIN(startaddr,
255 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
256 endaddr = MIN(endaddr,
257 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 258 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
259 TARGET_PAGE_ALIGN(endaddr),
260 PAGE_RESERVED);
261 }
262 } while (!feof(f));
263 fclose(f);
264 }
c8a706fe 265 mmap_unlock();
50a9569b
AZ
266 }
267#endif
54936004
FB
268}
269
00f82b8a 270static inline PageDesc *page_find_alloc(target_ulong index)
54936004 271{
54936004
FB
272 PageDesc **lp, *p;
273
17e2377a
PB
274#if TARGET_LONG_BITS > 32
275 /* Host memory outside guest VM. For 32-bit targets we have already
276 excluded high addresses. */
277 if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
278 return NULL;
279#endif
54936004
FB
280 lp = &l1_map[index >> L2_BITS];
281 p = *lp;
282 if (!p) {
283 /* allocate if not found */
17e2377a
PB
284#if defined(CONFIG_USER_ONLY)
285 unsigned long addr;
286 size_t len = sizeof(PageDesc) * L2_SIZE;
287 /* Don't use qemu_malloc because it may recurse. */
288 p = mmap(0, len, PROT_READ | PROT_WRITE,
289 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 290 *lp = p;
17e2377a
PB
291 addr = h2g(p);
292 if (addr == (target_ulong)addr) {
293 page_set_flags(addr & TARGET_PAGE_MASK,
294 TARGET_PAGE_ALIGN(addr + len),
295 PAGE_RESERVED);
296 }
297#else
298 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
299 *lp = p;
300#endif
54936004
FB
301 }
302 return p + (index & (L2_SIZE - 1));
303}
304
00f82b8a 305static inline PageDesc *page_find(target_ulong index)
54936004 306{
54936004
FB
307 PageDesc *p;
308
54936004
FB
309 p = l1_map[index >> L2_BITS];
310 if (!p)
311 return 0;
fd6ce8f6
FB
312 return p + (index & (L2_SIZE - 1));
313}
314
108c49b8 315static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 316{
108c49b8 317 void **lp, **p;
e3f4e2a4 318 PhysPageDesc *pd;
92e873b9 319
108c49b8
FB
320 p = (void **)l1_phys_map;
321#if TARGET_PHYS_ADDR_SPACE_BITS > 32
322
323#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
324#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
325#endif
326 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
327 p = *lp;
328 if (!p) {
329 /* allocate if not found */
108c49b8
FB
330 if (!alloc)
331 return NULL;
332 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
333 memset(p, 0, sizeof(void *) * L1_SIZE);
334 *lp = p;
335 }
336#endif
337 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
338 pd = *lp;
339 if (!pd) {
340 int i;
108c49b8
FB
341 /* allocate if not found */
342 if (!alloc)
343 return NULL;
e3f4e2a4
PB
344 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
345 *lp = pd;
346 for (i = 0; i < L2_SIZE; i++)
347 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 348 }
e3f4e2a4 349 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
350}
351
108c49b8 352static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 353{
108c49b8 354 return phys_page_find_alloc(index, 0);
92e873b9
FB
355}
356
9fa3e853 357#if !defined(CONFIG_USER_ONLY)
6a00d601 358static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 359static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 360 target_ulong vaddr);
c8a706fe
PB
361#define mmap_lock() do { } while(0)
362#define mmap_unlock() do { } while(0)
9fa3e853 363#endif
fd6ce8f6 364
4369415f
FB
365#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
366
367#if defined(CONFIG_USER_ONLY)
368/* Currently it is not recommanded to allocate big chunks of data in
369 user mode. It will change when a dedicated libc will be used */
370#define USE_STATIC_CODE_GEN_BUFFER
371#endif
372
373#ifdef USE_STATIC_CODE_GEN_BUFFER
374static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
375#endif
376
26a5f13b
FB
377void code_gen_alloc(unsigned long tb_size)
378{
4369415f
FB
379#ifdef USE_STATIC_CODE_GEN_BUFFER
380 code_gen_buffer = static_code_gen_buffer;
381 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
382 map_exec(code_gen_buffer, code_gen_buffer_size);
383#else
26a5f13b
FB
384 code_gen_buffer_size = tb_size;
385 if (code_gen_buffer_size == 0) {
4369415f
FB
386#if defined(CONFIG_USER_ONLY)
387 /* in user mode, phys_ram_size is not meaningful */
388 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
389#else
26a5f13b
FB
390 /* XXX: needs ajustments */
391 code_gen_buffer_size = (int)(phys_ram_size / 4);
4369415f 392#endif
26a5f13b
FB
393 }
394 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
395 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
396 /* The code gen buffer location may have constraints depending on
397 the host cpu and OS */
398#if defined(__linux__)
399 {
400 int flags;
401 flags = MAP_PRIVATE | MAP_ANONYMOUS;
402#if defined(__x86_64__)
403 flags |= MAP_32BIT;
404 /* Cannot map more than that */
405 if (code_gen_buffer_size > (800 * 1024 * 1024))
406 code_gen_buffer_size = (800 * 1024 * 1024);
407#endif
408 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
409 PROT_WRITE | PROT_READ | PROT_EXEC,
410 flags, -1, 0);
411 if (code_gen_buffer == MAP_FAILED) {
412 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
413 exit(1);
414 }
415 }
416#else
417 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
418 if (!code_gen_buffer) {
419 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
420 exit(1);
421 }
422 map_exec(code_gen_buffer, code_gen_buffer_size);
423#endif
4369415f 424#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
425 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
426 code_gen_buffer_max_size = code_gen_buffer_size -
427 code_gen_max_block_size();
428 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
429 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
430}
431
432/* Must be called before using the QEMU cpus. 'tb_size' is the size
433 (in bytes) allocated to the translation buffer. Zero means default
434 size. */
435void cpu_exec_init_all(unsigned long tb_size)
436{
26a5f13b
FB
437 cpu_gen_init();
438 code_gen_alloc(tb_size);
439 code_gen_ptr = code_gen_buffer;
4369415f 440 page_init();
e2eef170 441#if !defined(CONFIG_USER_ONLY)
26a5f13b 442 io_mem_init();
e2eef170 443#endif
26a5f13b
FB
444}
445
9656f324
PB
446#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
447
448#define CPU_COMMON_SAVE_VERSION 1
449
450static void cpu_common_save(QEMUFile *f, void *opaque)
451{
452 CPUState *env = opaque;
453
454 qemu_put_be32s(f, &env->halted);
455 qemu_put_be32s(f, &env->interrupt_request);
456}
457
458static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
459{
460 CPUState *env = opaque;
461
462 if (version_id != CPU_COMMON_SAVE_VERSION)
463 return -EINVAL;
464
465 qemu_get_be32s(f, &env->halted);
75f482ae 466 qemu_get_be32s(f, &env->interrupt_request);
9656f324
PB
467 tlb_flush(env, 1);
468
469 return 0;
470}
471#endif
472
6a00d601 473void cpu_exec_init(CPUState *env)
fd6ce8f6 474{
6a00d601
FB
475 CPUState **penv;
476 int cpu_index;
477
6a00d601
FB
478 env->next_cpu = NULL;
479 penv = &first_cpu;
480 cpu_index = 0;
481 while (*penv != NULL) {
482 penv = (CPUState **)&(*penv)->next_cpu;
483 cpu_index++;
484 }
485 env->cpu_index = cpu_index;
6658ffb8 486 env->nb_watchpoints = 0;
6a00d601 487 *penv = env;
b3c7724c 488#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
489 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
490 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
491 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
492 cpu_save, cpu_load, env);
493#endif
fd6ce8f6
FB
494}
495
9fa3e853
FB
496static inline void invalidate_page_bitmap(PageDesc *p)
497{
498 if (p->code_bitmap) {
59817ccb 499 qemu_free(p->code_bitmap);
9fa3e853
FB
500 p->code_bitmap = NULL;
501 }
502 p->code_write_count = 0;
503}
504
fd6ce8f6
FB
505/* set to NULL all the 'first_tb' fields in all PageDescs */
506static void page_flush_tb(void)
507{
508 int i, j;
509 PageDesc *p;
510
511 for(i = 0; i < L1_SIZE; i++) {
512 p = l1_map[i];
513 if (p) {
9fa3e853
FB
514 for(j = 0; j < L2_SIZE; j++) {
515 p->first_tb = NULL;
516 invalidate_page_bitmap(p);
517 p++;
518 }
fd6ce8f6
FB
519 }
520 }
521}
522
523/* flush all the translation blocks */
d4e8164f 524/* XXX: tb_flush is currently not thread safe */
6a00d601 525void tb_flush(CPUState *env1)
fd6ce8f6 526{
6a00d601 527 CPUState *env;
0124311e 528#if defined(DEBUG_FLUSH)
ab3d1727
BS
529 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
530 (unsigned long)(code_gen_ptr - code_gen_buffer),
531 nb_tbs, nb_tbs > 0 ?
532 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 533#endif
26a5f13b 534 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
535 cpu_abort(env1, "Internal error: code buffer overflow\n");
536
fd6ce8f6 537 nb_tbs = 0;
3b46e624 538
6a00d601
FB
539 for(env = first_cpu; env != NULL; env = env->next_cpu) {
540 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
541 }
9fa3e853 542
8a8a608f 543 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 544 page_flush_tb();
9fa3e853 545
fd6ce8f6 546 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
547 /* XXX: flush processor icache at this point if cache flush is
548 expensive */
e3db7226 549 tb_flush_count++;
fd6ce8f6
FB
550}
551
552#ifdef DEBUG_TB_CHECK
553
bc98a7ef 554static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
555{
556 TranslationBlock *tb;
557 int i;
558 address &= TARGET_PAGE_MASK;
99773bd4
PB
559 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
560 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
561 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
562 address >= tb->pc + tb->size)) {
563 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 564 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
565 }
566 }
567 }
568}
569
570/* verify that all the pages have correct rights for code */
571static void tb_page_check(void)
572{
573 TranslationBlock *tb;
574 int i, flags1, flags2;
3b46e624 575
99773bd4
PB
576 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
577 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
578 flags1 = page_get_flags(tb->pc);
579 flags2 = page_get_flags(tb->pc + tb->size - 1);
580 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
581 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 582 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
583 }
584 }
585 }
586}
587
d4e8164f
FB
588void tb_jmp_check(TranslationBlock *tb)
589{
590 TranslationBlock *tb1;
591 unsigned int n1;
592
593 /* suppress any remaining jumps to this TB */
594 tb1 = tb->jmp_first;
595 for(;;) {
596 n1 = (long)tb1 & 3;
597 tb1 = (TranslationBlock *)((long)tb1 & ~3);
598 if (n1 == 2)
599 break;
600 tb1 = tb1->jmp_next[n1];
601 }
602 /* check end of list */
603 if (tb1 != tb) {
604 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
605 }
606}
607
fd6ce8f6
FB
608#endif
609
610/* invalidate one TB */
611static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
612 int next_offset)
613{
614 TranslationBlock *tb1;
615 for(;;) {
616 tb1 = *ptb;
617 if (tb1 == tb) {
618 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
619 break;
620 }
621 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
622 }
623}
624
9fa3e853
FB
625static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
626{
627 TranslationBlock *tb1;
628 unsigned int n1;
629
630 for(;;) {
631 tb1 = *ptb;
632 n1 = (long)tb1 & 3;
633 tb1 = (TranslationBlock *)((long)tb1 & ~3);
634 if (tb1 == tb) {
635 *ptb = tb1->page_next[n1];
636 break;
637 }
638 ptb = &tb1->page_next[n1];
639 }
640}
641
d4e8164f
FB
642static inline void tb_jmp_remove(TranslationBlock *tb, int n)
643{
644 TranslationBlock *tb1, **ptb;
645 unsigned int n1;
646
647 ptb = &tb->jmp_next[n];
648 tb1 = *ptb;
649 if (tb1) {
650 /* find tb(n) in circular list */
651 for(;;) {
652 tb1 = *ptb;
653 n1 = (long)tb1 & 3;
654 tb1 = (TranslationBlock *)((long)tb1 & ~3);
655 if (n1 == n && tb1 == tb)
656 break;
657 if (n1 == 2) {
658 ptb = &tb1->jmp_first;
659 } else {
660 ptb = &tb1->jmp_next[n1];
661 }
662 }
663 /* now we can suppress tb(n) from the list */
664 *ptb = tb->jmp_next[n];
665
666 tb->jmp_next[n] = NULL;
667 }
668}
669
670/* reset the jump entry 'n' of a TB so that it is not chained to
671 another TB */
672static inline void tb_reset_jump(TranslationBlock *tb, int n)
673{
674 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
675}
676
2e70f6ef 677void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 678{
6a00d601 679 CPUState *env;
8a40a180 680 PageDesc *p;
d4e8164f 681 unsigned int h, n1;
00f82b8a 682 target_phys_addr_t phys_pc;
8a40a180 683 TranslationBlock *tb1, *tb2;
3b46e624 684
8a40a180
FB
685 /* remove the TB from the hash list */
686 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
687 h = tb_phys_hash_func(phys_pc);
5fafdf24 688 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
689 offsetof(TranslationBlock, phys_hash_next));
690
691 /* remove the TB from the page list */
692 if (tb->page_addr[0] != page_addr) {
693 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
694 tb_page_remove(&p->first_tb, tb);
695 invalidate_page_bitmap(p);
696 }
697 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
698 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
699 tb_page_remove(&p->first_tb, tb);
700 invalidate_page_bitmap(p);
701 }
702
36bdbe54 703 tb_invalidated_flag = 1;
59817ccb 704
fd6ce8f6 705 /* remove the TB from the hash list */
8a40a180 706 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
707 for(env = first_cpu; env != NULL; env = env->next_cpu) {
708 if (env->tb_jmp_cache[h] == tb)
709 env->tb_jmp_cache[h] = NULL;
710 }
d4e8164f
FB
711
712 /* suppress this TB from the two jump lists */
713 tb_jmp_remove(tb, 0);
714 tb_jmp_remove(tb, 1);
715
716 /* suppress any remaining jumps to this TB */
717 tb1 = tb->jmp_first;
718 for(;;) {
719 n1 = (long)tb1 & 3;
720 if (n1 == 2)
721 break;
722 tb1 = (TranslationBlock *)((long)tb1 & ~3);
723 tb2 = tb1->jmp_next[n1];
724 tb_reset_jump(tb1, n1);
725 tb1->jmp_next[n1] = NULL;
726 tb1 = tb2;
727 }
728 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 729
e3db7226 730 tb_phys_invalidate_count++;
9fa3e853
FB
731}
732
733static inline void set_bits(uint8_t *tab, int start, int len)
734{
735 int end, mask, end1;
736
737 end = start + len;
738 tab += start >> 3;
739 mask = 0xff << (start & 7);
740 if ((start & ~7) == (end & ~7)) {
741 if (start < end) {
742 mask &= ~(0xff << (end & 7));
743 *tab |= mask;
744 }
745 } else {
746 *tab++ |= mask;
747 start = (start + 8) & ~7;
748 end1 = end & ~7;
749 while (start < end1) {
750 *tab++ = 0xff;
751 start += 8;
752 }
753 if (start < end) {
754 mask = ~(0xff << (end & 7));
755 *tab |= mask;
756 }
757 }
758}
759
760static void build_page_bitmap(PageDesc *p)
761{
762 int n, tb_start, tb_end;
763 TranslationBlock *tb;
3b46e624 764
b2a7081a 765 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
766 if (!p->code_bitmap)
767 return;
9fa3e853
FB
768
769 tb = p->first_tb;
770 while (tb != NULL) {
771 n = (long)tb & 3;
772 tb = (TranslationBlock *)((long)tb & ~3);
773 /* NOTE: this is subtle as a TB may span two physical pages */
774 if (n == 0) {
775 /* NOTE: tb_end may be after the end of the page, but
776 it is not a problem */
777 tb_start = tb->pc & ~TARGET_PAGE_MASK;
778 tb_end = tb_start + tb->size;
779 if (tb_end > TARGET_PAGE_SIZE)
780 tb_end = TARGET_PAGE_SIZE;
781 } else {
782 tb_start = 0;
783 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
784 }
785 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
786 tb = tb->page_next[n];
787 }
788}
789
2e70f6ef
PB
790TranslationBlock *tb_gen_code(CPUState *env,
791 target_ulong pc, target_ulong cs_base,
792 int flags, int cflags)
d720b93d
FB
793{
794 TranslationBlock *tb;
795 uint8_t *tc_ptr;
796 target_ulong phys_pc, phys_page2, virt_page2;
797 int code_gen_size;
798
c27004ec
FB
799 phys_pc = get_phys_addr_code(env, pc);
800 tb = tb_alloc(pc);
d720b93d
FB
801 if (!tb) {
802 /* flush must be done */
803 tb_flush(env);
804 /* cannot fail at this point */
c27004ec 805 tb = tb_alloc(pc);
2e70f6ef
PB
806 /* Don't forget to invalidate previous TB info. */
807 tb_invalidated_flag = 1;
d720b93d
FB
808 }
809 tc_ptr = code_gen_ptr;
810 tb->tc_ptr = tc_ptr;
811 tb->cs_base = cs_base;
812 tb->flags = flags;
813 tb->cflags = cflags;
d07bde88 814 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 815 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 816
d720b93d 817 /* check next page if needed */
c27004ec 818 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 819 phys_page2 = -1;
c27004ec 820 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
821 phys_page2 = get_phys_addr_code(env, virt_page2);
822 }
823 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 824 return tb;
d720b93d 825}
3b46e624 826
9fa3e853
FB
827/* invalidate all TBs which intersect with the target physical page
828 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
829 the same physical page. 'is_cpu_write_access' should be true if called
830 from a real cpu write access: the virtual CPU will exit the current
831 TB if code is modified inside this TB. */
00f82b8a 832void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
833 int is_cpu_write_access)
834{
835 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 836 CPUState *env = cpu_single_env;
9fa3e853 837 PageDesc *p;
ea1c1802 838 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 839 target_ulong tb_start, tb_end;
d720b93d 840 target_ulong current_pc, current_cs_base;
9fa3e853
FB
841
842 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 843 if (!p)
9fa3e853 844 return;
5fafdf24 845 if (!p->code_bitmap &&
d720b93d
FB
846 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
847 is_cpu_write_access) {
9fa3e853
FB
848 /* build code bitmap */
849 build_page_bitmap(p);
850 }
851
852 /* we remove all the TBs in the range [start, end[ */
853 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
854 current_tb_not_found = is_cpu_write_access;
855 current_tb_modified = 0;
856 current_tb = NULL; /* avoid warning */
857 current_pc = 0; /* avoid warning */
858 current_cs_base = 0; /* avoid warning */
859 current_flags = 0; /* avoid warning */
9fa3e853
FB
860 tb = p->first_tb;
861 while (tb != NULL) {
862 n = (long)tb & 3;
863 tb = (TranslationBlock *)((long)tb & ~3);
864 tb_next = tb->page_next[n];
865 /* NOTE: this is subtle as a TB may span two physical pages */
866 if (n == 0) {
867 /* NOTE: tb_end may be after the end of the page, but
868 it is not a problem */
869 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
870 tb_end = tb_start + tb->size;
871 } else {
872 tb_start = tb->page_addr[1];
873 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
874 }
875 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
876#ifdef TARGET_HAS_PRECISE_SMC
877 if (current_tb_not_found) {
878 current_tb_not_found = 0;
879 current_tb = NULL;
2e70f6ef 880 if (env->mem_io_pc) {
d720b93d 881 /* now we have a real cpu fault */
2e70f6ef 882 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
883 }
884 }
885 if (current_tb == tb &&
2e70f6ef 886 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
887 /* If we are modifying the current TB, we must stop
888 its execution. We could be more precise by checking
889 that the modification is after the current PC, but it
890 would require a specialized function to partially
891 restore the CPU state */
3b46e624 892
d720b93d 893 current_tb_modified = 1;
5fafdf24 894 cpu_restore_state(current_tb, env,
2e70f6ef 895 env->mem_io_pc, NULL);
d720b93d
FB
896#if defined(TARGET_I386)
897 current_flags = env->hflags;
898 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
899 current_cs_base = (target_ulong)env->segs[R_CS].base;
900 current_pc = current_cs_base + env->eip;
901#else
902#error unsupported CPU
903#endif
904 }
905#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
906 /* we need to do that to handle the case where a signal
907 occurs while doing tb_phys_invalidate() */
908 saved_tb = NULL;
909 if (env) {
910 saved_tb = env->current_tb;
911 env->current_tb = NULL;
912 }
9fa3e853 913 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
914 if (env) {
915 env->current_tb = saved_tb;
916 if (env->interrupt_request && env->current_tb)
917 cpu_interrupt(env, env->interrupt_request);
918 }
9fa3e853
FB
919 }
920 tb = tb_next;
921 }
922#if !defined(CONFIG_USER_ONLY)
923 /* if no code remaining, no need to continue to use slow writes */
924 if (!p->first_tb) {
925 invalidate_page_bitmap(p);
d720b93d 926 if (is_cpu_write_access) {
2e70f6ef 927 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
928 }
929 }
930#endif
931#ifdef TARGET_HAS_PRECISE_SMC
932 if (current_tb_modified) {
933 /* we generate a block containing just the instruction
934 modifying the memory. It will ensure that it cannot modify
935 itself */
ea1c1802 936 env->current_tb = NULL;
2e70f6ef 937 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 938 cpu_resume_from_signal(env, NULL);
9fa3e853 939 }
fd6ce8f6 940#endif
9fa3e853 941}
fd6ce8f6 942
9fa3e853 943/* len must be <= 8 and start must be a multiple of len */
00f82b8a 944static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
945{
946 PageDesc *p;
947 int offset, b;
59817ccb 948#if 0
a4193c8a
FB
949 if (1) {
950 if (loglevel) {
5fafdf24 951 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
2e70f6ef 952 cpu_single_env->mem_io_vaddr, len,
5fafdf24 953 cpu_single_env->eip,
a4193c8a
FB
954 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
955 }
59817ccb
FB
956 }
957#endif
9fa3e853 958 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 959 if (!p)
9fa3e853
FB
960 return;
961 if (p->code_bitmap) {
962 offset = start & ~TARGET_PAGE_MASK;
963 b = p->code_bitmap[offset >> 3] >> (offset & 7);
964 if (b & ((1 << len) - 1))
965 goto do_invalidate;
966 } else {
967 do_invalidate:
d720b93d 968 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
969 }
970}
971
9fa3e853 972#if !defined(CONFIG_SOFTMMU)
00f82b8a 973static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 974 unsigned long pc, void *puc)
9fa3e853 975{
d720b93d
FB
976 int n, current_flags, current_tb_modified;
977 target_ulong current_pc, current_cs_base;
9fa3e853 978 PageDesc *p;
d720b93d
FB
979 TranslationBlock *tb, *current_tb;
980#ifdef TARGET_HAS_PRECISE_SMC
981 CPUState *env = cpu_single_env;
982#endif
9fa3e853
FB
983
984 addr &= TARGET_PAGE_MASK;
985 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 986 if (!p)
9fa3e853
FB
987 return;
988 tb = p->first_tb;
d720b93d
FB
989 current_tb_modified = 0;
990 current_tb = NULL;
991 current_pc = 0; /* avoid warning */
992 current_cs_base = 0; /* avoid warning */
993 current_flags = 0; /* avoid warning */
994#ifdef TARGET_HAS_PRECISE_SMC
995 if (tb && pc != 0) {
996 current_tb = tb_find_pc(pc);
997 }
998#endif
9fa3e853
FB
999 while (tb != NULL) {
1000 n = (long)tb & 3;
1001 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1002#ifdef TARGET_HAS_PRECISE_SMC
1003 if (current_tb == tb &&
2e70f6ef 1004 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1005 /* If we are modifying the current TB, we must stop
1006 its execution. We could be more precise by checking
1007 that the modification is after the current PC, but it
1008 would require a specialized function to partially
1009 restore the CPU state */
3b46e624 1010
d720b93d
FB
1011 current_tb_modified = 1;
1012 cpu_restore_state(current_tb, env, pc, puc);
1013#if defined(TARGET_I386)
1014 current_flags = env->hflags;
1015 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1016 current_cs_base = (target_ulong)env->segs[R_CS].base;
1017 current_pc = current_cs_base + env->eip;
1018#else
1019#error unsupported CPU
1020#endif
1021 }
1022#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1023 tb_phys_invalidate(tb, addr);
1024 tb = tb->page_next[n];
1025 }
fd6ce8f6 1026 p->first_tb = NULL;
d720b93d
FB
1027#ifdef TARGET_HAS_PRECISE_SMC
1028 if (current_tb_modified) {
1029 /* we generate a block containing just the instruction
1030 modifying the memory. It will ensure that it cannot modify
1031 itself */
ea1c1802 1032 env->current_tb = NULL;
2e70f6ef 1033 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1034 cpu_resume_from_signal(env, puc);
1035 }
1036#endif
fd6ce8f6 1037}
9fa3e853 1038#endif
fd6ce8f6
FB
1039
1040/* add the tb in the target page and protect it if necessary */
5fafdf24 1041static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1042 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1043{
1044 PageDesc *p;
9fa3e853
FB
1045 TranslationBlock *last_first_tb;
1046
1047 tb->page_addr[n] = page_addr;
3a7d929e 1048 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1049 tb->page_next[n] = p->first_tb;
1050 last_first_tb = p->first_tb;
1051 p->first_tb = (TranslationBlock *)((long)tb | n);
1052 invalidate_page_bitmap(p);
fd6ce8f6 1053
107db443 1054#if defined(TARGET_HAS_SMC) || 1
d720b93d 1055
9fa3e853 1056#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1057 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1058 target_ulong addr;
1059 PageDesc *p2;
9fa3e853
FB
1060 int prot;
1061
fd6ce8f6
FB
1062 /* force the host page as non writable (writes will have a
1063 page fault + mprotect overhead) */
53a5960a 1064 page_addr &= qemu_host_page_mask;
fd6ce8f6 1065 prot = 0;
53a5960a
PB
1066 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1067 addr += TARGET_PAGE_SIZE) {
1068
1069 p2 = page_find (addr >> TARGET_PAGE_BITS);
1070 if (!p2)
1071 continue;
1072 prot |= p2->flags;
1073 p2->flags &= ~PAGE_WRITE;
1074 page_get_flags(addr);
1075 }
5fafdf24 1076 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1077 (prot & PAGE_BITS) & ~PAGE_WRITE);
1078#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1079 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1080 page_addr);
fd6ce8f6 1081#endif
fd6ce8f6 1082 }
9fa3e853
FB
1083#else
1084 /* if some code is already present, then the pages are already
1085 protected. So we handle the case where only the first TB is
1086 allocated in a physical page */
1087 if (!last_first_tb) {
6a00d601 1088 tlb_protect_code(page_addr);
9fa3e853
FB
1089 }
1090#endif
d720b93d
FB
1091
1092#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1093}
1094
1095/* Allocate a new translation block. Flush the translation buffer if
1096 too many translation blocks or too much generated code. */
c27004ec 1097TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1098{
1099 TranslationBlock *tb;
fd6ce8f6 1100
26a5f13b
FB
1101 if (nb_tbs >= code_gen_max_blocks ||
1102 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1103 return NULL;
fd6ce8f6
FB
1104 tb = &tbs[nb_tbs++];
1105 tb->pc = pc;
b448f2f3 1106 tb->cflags = 0;
d4e8164f
FB
1107 return tb;
1108}
1109
2e70f6ef
PB
1110void tb_free(TranslationBlock *tb)
1111{
bf20dc07 1112 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1113 Ignore the hard cases and just back up if this TB happens to
1114 be the last one generated. */
1115 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1116 code_gen_ptr = tb->tc_ptr;
1117 nb_tbs--;
1118 }
1119}
1120
9fa3e853
FB
1121/* add a new TB and link it to the physical page tables. phys_page2 is
1122 (-1) to indicate that only one page contains the TB. */
5fafdf24 1123void tb_link_phys(TranslationBlock *tb,
9fa3e853 1124 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1125{
9fa3e853
FB
1126 unsigned int h;
1127 TranslationBlock **ptb;
1128
c8a706fe
PB
1129 /* Grab the mmap lock to stop another thread invalidating this TB
1130 before we are done. */
1131 mmap_lock();
9fa3e853
FB
1132 /* add in the physical hash table */
1133 h = tb_phys_hash_func(phys_pc);
1134 ptb = &tb_phys_hash[h];
1135 tb->phys_hash_next = *ptb;
1136 *ptb = tb;
fd6ce8f6
FB
1137
1138 /* add in the page list */
9fa3e853
FB
1139 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1140 if (phys_page2 != -1)
1141 tb_alloc_page(tb, 1, phys_page2);
1142 else
1143 tb->page_addr[1] = -1;
9fa3e853 1144
d4e8164f
FB
1145 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1146 tb->jmp_next[0] = NULL;
1147 tb->jmp_next[1] = NULL;
1148
1149 /* init original jump addresses */
1150 if (tb->tb_next_offset[0] != 0xffff)
1151 tb_reset_jump(tb, 0);
1152 if (tb->tb_next_offset[1] != 0xffff)
1153 tb_reset_jump(tb, 1);
8a40a180
FB
1154
1155#ifdef DEBUG_TB_CHECK
1156 tb_page_check();
1157#endif
c8a706fe 1158 mmap_unlock();
fd6ce8f6
FB
1159}
1160
9fa3e853
FB
1161/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1162 tb[1].tc_ptr. Return NULL if not found */
1163TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1164{
9fa3e853
FB
1165 int m_min, m_max, m;
1166 unsigned long v;
1167 TranslationBlock *tb;
a513fe19
FB
1168
1169 if (nb_tbs <= 0)
1170 return NULL;
1171 if (tc_ptr < (unsigned long)code_gen_buffer ||
1172 tc_ptr >= (unsigned long)code_gen_ptr)
1173 return NULL;
1174 /* binary search (cf Knuth) */
1175 m_min = 0;
1176 m_max = nb_tbs - 1;
1177 while (m_min <= m_max) {
1178 m = (m_min + m_max) >> 1;
1179 tb = &tbs[m];
1180 v = (unsigned long)tb->tc_ptr;
1181 if (v == tc_ptr)
1182 return tb;
1183 else if (tc_ptr < v) {
1184 m_max = m - 1;
1185 } else {
1186 m_min = m + 1;
1187 }
5fafdf24 1188 }
a513fe19
FB
1189 return &tbs[m_max];
1190}
7501267e 1191
ea041c0e
FB
1192static void tb_reset_jump_recursive(TranslationBlock *tb);
1193
1194static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1195{
1196 TranslationBlock *tb1, *tb_next, **ptb;
1197 unsigned int n1;
1198
1199 tb1 = tb->jmp_next[n];
1200 if (tb1 != NULL) {
1201 /* find head of list */
1202 for(;;) {
1203 n1 = (long)tb1 & 3;
1204 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1205 if (n1 == 2)
1206 break;
1207 tb1 = tb1->jmp_next[n1];
1208 }
1209 /* we are now sure now that tb jumps to tb1 */
1210 tb_next = tb1;
1211
1212 /* remove tb from the jmp_first list */
1213 ptb = &tb_next->jmp_first;
1214 for(;;) {
1215 tb1 = *ptb;
1216 n1 = (long)tb1 & 3;
1217 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1218 if (n1 == n && tb1 == tb)
1219 break;
1220 ptb = &tb1->jmp_next[n1];
1221 }
1222 *ptb = tb->jmp_next[n];
1223 tb->jmp_next[n] = NULL;
3b46e624 1224
ea041c0e
FB
1225 /* suppress the jump to next tb in generated code */
1226 tb_reset_jump(tb, n);
1227
0124311e 1228 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1229 tb_reset_jump_recursive(tb_next);
1230 }
1231}
1232
1233static void tb_reset_jump_recursive(TranslationBlock *tb)
1234{
1235 tb_reset_jump_recursive2(tb, 0);
1236 tb_reset_jump_recursive2(tb, 1);
1237}
1238
1fddef4b 1239#if defined(TARGET_HAS_ICE)
d720b93d
FB
1240static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1241{
9b3c35e0
JM
1242 target_phys_addr_t addr;
1243 target_ulong pd;
c2f07f81
PB
1244 ram_addr_t ram_addr;
1245 PhysPageDesc *p;
d720b93d 1246
c2f07f81
PB
1247 addr = cpu_get_phys_page_debug(env, pc);
1248 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1249 if (!p) {
1250 pd = IO_MEM_UNASSIGNED;
1251 } else {
1252 pd = p->phys_offset;
1253 }
1254 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1255 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1256}
c27004ec 1257#endif
d720b93d 1258
6658ffb8 1259/* Add a watchpoint. */
0f459d16 1260int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
6658ffb8
PB
1261{
1262 int i;
1263
1264 for (i = 0; i < env->nb_watchpoints; i++) {
1265 if (addr == env->watchpoint[i].vaddr)
1266 return 0;
1267 }
1268 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1269 return -1;
1270
1271 i = env->nb_watchpoints++;
1272 env->watchpoint[i].vaddr = addr;
0f459d16 1273 env->watchpoint[i].type = type;
6658ffb8
PB
1274 tlb_flush_page(env, addr);
1275 /* FIXME: This flush is needed because of the hack to make memory ops
1276 terminate the TB. It can be removed once the proper IO trap and
1277 re-execute bits are in. */
1278 tb_flush(env);
1279 return i;
1280}
1281
1282/* Remove a watchpoint. */
1283int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1284{
1285 int i;
1286
1287 for (i = 0; i < env->nb_watchpoints; i++) {
1288 if (addr == env->watchpoint[i].vaddr) {
1289 env->nb_watchpoints--;
1290 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1291 tlb_flush_page(env, addr);
1292 return 0;
1293 }
1294 }
1295 return -1;
1296}
1297
7d03f82f
EI
1298/* Remove all watchpoints. */
1299void cpu_watchpoint_remove_all(CPUState *env) {
1300 int i;
1301
1302 for (i = 0; i < env->nb_watchpoints; i++) {
1303 tlb_flush_page(env, env->watchpoint[i].vaddr);
1304 }
1305 env->nb_watchpoints = 0;
1306}
1307
c33a346e
FB
1308/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1309 breakpoint is reached */
2e12669a 1310int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1311{
1fddef4b 1312#if defined(TARGET_HAS_ICE)
4c3a88a2 1313 int i;
3b46e624 1314
4c3a88a2
FB
1315 for(i = 0; i < env->nb_breakpoints; i++) {
1316 if (env->breakpoints[i] == pc)
1317 return 0;
1318 }
1319
1320 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1321 return -1;
1322 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1323
d720b93d 1324 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1325 return 0;
1326#else
1327 return -1;
1328#endif
1329}
1330
7d03f82f
EI
1331/* remove all breakpoints */
1332void cpu_breakpoint_remove_all(CPUState *env) {
1333#if defined(TARGET_HAS_ICE)
1334 int i;
1335 for(i = 0; i < env->nb_breakpoints; i++) {
1336 breakpoint_invalidate(env, env->breakpoints[i]);
1337 }
1338 env->nb_breakpoints = 0;
1339#endif
1340}
1341
4c3a88a2 1342/* remove a breakpoint */
2e12669a 1343int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1344{
1fddef4b 1345#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1346 int i;
1347 for(i = 0; i < env->nb_breakpoints; i++) {
1348 if (env->breakpoints[i] == pc)
1349 goto found;
1350 }
1351 return -1;
1352 found:
4c3a88a2 1353 env->nb_breakpoints--;
1fddef4b
FB
1354 if (i < env->nb_breakpoints)
1355 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1356
1357 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1358 return 0;
1359#else
1360 return -1;
1361#endif
1362}
1363
c33a346e
FB
1364/* enable or disable single step mode. EXCP_DEBUG is returned by the
1365 CPU loop after each instruction */
1366void cpu_single_step(CPUState *env, int enabled)
1367{
1fddef4b 1368#if defined(TARGET_HAS_ICE)
c33a346e
FB
1369 if (env->singlestep_enabled != enabled) {
1370 env->singlestep_enabled = enabled;
1371 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1372 /* XXX: only flush what is necessary */
0124311e 1373 tb_flush(env);
c33a346e
FB
1374 }
1375#endif
1376}
1377
34865134
FB
1378/* enable or disable low levels log */
1379void cpu_set_log(int log_flags)
1380{
1381 loglevel = log_flags;
1382 if (loglevel && !logfile) {
11fcfab4 1383 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1384 if (!logfile) {
1385 perror(logfilename);
1386 _exit(1);
1387 }
9fa3e853
FB
1388#if !defined(CONFIG_SOFTMMU)
1389 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1390 {
1391 static uint8_t logfile_buf[4096];
1392 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1393 }
1394#else
34865134 1395 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1396#endif
e735b91c
PB
1397 log_append = 1;
1398 }
1399 if (!loglevel && logfile) {
1400 fclose(logfile);
1401 logfile = NULL;
34865134
FB
1402 }
1403}
1404
1405void cpu_set_log_filename(const char *filename)
1406{
1407 logfilename = strdup(filename);
e735b91c
PB
1408 if (logfile) {
1409 fclose(logfile);
1410 logfile = NULL;
1411 }
1412 cpu_set_log(loglevel);
34865134 1413}
c33a346e 1414
0124311e 1415/* mask must never be zero, except for A20 change call */
68a79315 1416void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1417{
d5975363 1418#if !defined(USE_NPTL)
ea041c0e 1419 TranslationBlock *tb;
15a51156 1420 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1421#endif
2e70f6ef 1422 int old_mask;
59817ccb 1423
2e70f6ef 1424 old_mask = env->interrupt_request;
d5975363 1425 /* FIXME: This is probably not threadsafe. A different thread could
bf20dc07 1426 be in the middle of a read-modify-write operation. */
68a79315 1427 env->interrupt_request |= mask;
d5975363
PB
1428#if defined(USE_NPTL)
1429 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1430 problem and hope the cpu will stop of its own accord. For userspace
1431 emulation this often isn't actually as bad as it sounds. Often
1432 signals are used primarily to interrupt blocking syscalls. */
1433#else
2e70f6ef 1434 if (use_icount) {
266910c4 1435 env->icount_decr.u16.high = 0xffff;
2e70f6ef
PB
1436#ifndef CONFIG_USER_ONLY
1437 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1438 an async event happened and we need to process it. */
1439 if (!can_do_io(env)
1440 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1441 cpu_abort(env, "Raised interrupt while not in I/O function");
1442 }
1443#endif
1444 } else {
1445 tb = env->current_tb;
1446 /* if the cpu is currently executing code, we must unlink it and
1447 all the potentially executing TB */
1448 if (tb && !testandset(&interrupt_lock)) {
1449 env->current_tb = NULL;
1450 tb_reset_jump_recursive(tb);
1451 resetlock(&interrupt_lock);
1452 }
ea041c0e 1453 }
d5975363 1454#endif
ea041c0e
FB
1455}
1456
b54ad049
FB
1457void cpu_reset_interrupt(CPUState *env, int mask)
1458{
1459 env->interrupt_request &= ~mask;
1460}
1461
f193c797 1462CPULogItem cpu_log_items[] = {
5fafdf24 1463 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1464 "show generated host assembly code for each compiled TB" },
1465 { CPU_LOG_TB_IN_ASM, "in_asm",
1466 "show target assembly code for each compiled TB" },
5fafdf24 1467 { CPU_LOG_TB_OP, "op",
57fec1fe 1468 "show micro ops for each compiled TB" },
f193c797 1469 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1470 "show micro ops "
1471#ifdef TARGET_I386
1472 "before eflags optimization and "
f193c797 1473#endif
e01a1157 1474 "after liveness analysis" },
f193c797
FB
1475 { CPU_LOG_INT, "int",
1476 "show interrupts/exceptions in short format" },
1477 { CPU_LOG_EXEC, "exec",
1478 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1479 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1480 "show CPU state before block translation" },
f193c797
FB
1481#ifdef TARGET_I386
1482 { CPU_LOG_PCALL, "pcall",
1483 "show protected mode far calls/returns/exceptions" },
1484#endif
8e3a9fd2 1485#ifdef DEBUG_IOPORT
fd872598
FB
1486 { CPU_LOG_IOPORT, "ioport",
1487 "show all i/o ports accesses" },
8e3a9fd2 1488#endif
f193c797
FB
1489 { 0, NULL, NULL },
1490};
1491
1492static int cmp1(const char *s1, int n, const char *s2)
1493{
1494 if (strlen(s2) != n)
1495 return 0;
1496 return memcmp(s1, s2, n) == 0;
1497}
3b46e624 1498
f193c797
FB
1499/* takes a comma separated list of log masks. Return 0 if error. */
1500int cpu_str_to_log_mask(const char *str)
1501{
1502 CPULogItem *item;
1503 int mask;
1504 const char *p, *p1;
1505
1506 p = str;
1507 mask = 0;
1508 for(;;) {
1509 p1 = strchr(p, ',');
1510 if (!p1)
1511 p1 = p + strlen(p);
8e3a9fd2
FB
1512 if(cmp1(p,p1-p,"all")) {
1513 for(item = cpu_log_items; item->mask != 0; item++) {
1514 mask |= item->mask;
1515 }
1516 } else {
f193c797
FB
1517 for(item = cpu_log_items; item->mask != 0; item++) {
1518 if (cmp1(p, p1 - p, item->name))
1519 goto found;
1520 }
1521 return 0;
8e3a9fd2 1522 }
f193c797
FB
1523 found:
1524 mask |= item->mask;
1525 if (*p1 != ',')
1526 break;
1527 p = p1 + 1;
1528 }
1529 return mask;
1530}
ea041c0e 1531
7501267e
FB
1532void cpu_abort(CPUState *env, const char *fmt, ...)
1533{
1534 va_list ap;
493ae1f0 1535 va_list ap2;
7501267e
FB
1536
1537 va_start(ap, fmt);
493ae1f0 1538 va_copy(ap2, ap);
7501267e
FB
1539 fprintf(stderr, "qemu: fatal: ");
1540 vfprintf(stderr, fmt, ap);
1541 fprintf(stderr, "\n");
1542#ifdef TARGET_I386
7fe48483
FB
1543 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1544#else
1545 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1546#endif
924edcae 1547 if (logfile) {
f9373291 1548 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1549 vfprintf(logfile, fmt, ap2);
f9373291
JM
1550 fprintf(logfile, "\n");
1551#ifdef TARGET_I386
1552 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1553#else
1554 cpu_dump_state(env, logfile, fprintf, 0);
1555#endif
924edcae
AZ
1556 fflush(logfile);
1557 fclose(logfile);
1558 }
493ae1f0 1559 va_end(ap2);
f9373291 1560 va_end(ap);
7501267e
FB
1561 abort();
1562}
1563
c5be9f08
TS
1564CPUState *cpu_copy(CPUState *env)
1565{
01ba9816 1566 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1567 /* preserve chaining and index */
1568 CPUState *next_cpu = new_env->next_cpu;
1569 int cpu_index = new_env->cpu_index;
1570 memcpy(new_env, env, sizeof(CPUState));
1571 new_env->next_cpu = next_cpu;
1572 new_env->cpu_index = cpu_index;
1573 return new_env;
1574}
1575
0124311e
FB
1576#if !defined(CONFIG_USER_ONLY)
1577
5c751e99
EI
1578static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1579{
1580 unsigned int i;
1581
1582 /* Discard jump cache entries for any tb which might potentially
1583 overlap the flushed page. */
1584 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1585 memset (&env->tb_jmp_cache[i], 0,
1586 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1587
1588 i = tb_jmp_cache_hash_page(addr);
1589 memset (&env->tb_jmp_cache[i], 0,
1590 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1591}
1592
ee8b7021
FB
1593/* NOTE: if flush_global is true, also flush global entries (not
1594 implemented yet) */
1595void tlb_flush(CPUState *env, int flush_global)
33417e70 1596{
33417e70 1597 int i;
0124311e 1598
9fa3e853
FB
1599#if defined(DEBUG_TLB)
1600 printf("tlb_flush:\n");
1601#endif
0124311e
FB
1602 /* must reset current TB so that interrupts cannot modify the
1603 links while we are modifying them */
1604 env->current_tb = NULL;
1605
33417e70 1606 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1607 env->tlb_table[0][i].addr_read = -1;
1608 env->tlb_table[0][i].addr_write = -1;
1609 env->tlb_table[0][i].addr_code = -1;
1610 env->tlb_table[1][i].addr_read = -1;
1611 env->tlb_table[1][i].addr_write = -1;
1612 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1613#if (NB_MMU_MODES >= 3)
1614 env->tlb_table[2][i].addr_read = -1;
1615 env->tlb_table[2][i].addr_write = -1;
1616 env->tlb_table[2][i].addr_code = -1;
1617#if (NB_MMU_MODES == 4)
1618 env->tlb_table[3][i].addr_read = -1;
1619 env->tlb_table[3][i].addr_write = -1;
1620 env->tlb_table[3][i].addr_code = -1;
1621#endif
1622#endif
33417e70 1623 }
9fa3e853 1624
8a40a180 1625 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1626
0a962c02
FB
1627#ifdef USE_KQEMU
1628 if (env->kqemu_enabled) {
1629 kqemu_flush(env, flush_global);
1630 }
9fa3e853 1631#endif
e3db7226 1632 tlb_flush_count++;
33417e70
FB
1633}
1634
274da6b2 1635static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1636{
5fafdf24 1637 if (addr == (tlb_entry->addr_read &
84b7b8e7 1638 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1639 addr == (tlb_entry->addr_write &
84b7b8e7 1640 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1641 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1642 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1643 tlb_entry->addr_read = -1;
1644 tlb_entry->addr_write = -1;
1645 tlb_entry->addr_code = -1;
1646 }
61382a50
FB
1647}
1648
2e12669a 1649void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1650{
8a40a180 1651 int i;
0124311e 1652
9fa3e853 1653#if defined(DEBUG_TLB)
108c49b8 1654 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1655#endif
0124311e
FB
1656 /* must reset current TB so that interrupts cannot modify the
1657 links while we are modifying them */
1658 env->current_tb = NULL;
61382a50
FB
1659
1660 addr &= TARGET_PAGE_MASK;
1661 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1662 tlb_flush_entry(&env->tlb_table[0][i], addr);
1663 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1664#if (NB_MMU_MODES >= 3)
1665 tlb_flush_entry(&env->tlb_table[2][i], addr);
1666#if (NB_MMU_MODES == 4)
1667 tlb_flush_entry(&env->tlb_table[3][i], addr);
1668#endif
1669#endif
0124311e 1670
5c751e99 1671 tlb_flush_jmp_cache(env, addr);
9fa3e853 1672
0a962c02
FB
1673#ifdef USE_KQEMU
1674 if (env->kqemu_enabled) {
1675 kqemu_flush_page(env, addr);
1676 }
1677#endif
9fa3e853
FB
1678}
1679
9fa3e853
FB
1680/* update the TLBs so that writes to code in the virtual page 'addr'
1681 can be detected */
6a00d601 1682static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1683{
5fafdf24 1684 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1685 ram_addr + TARGET_PAGE_SIZE,
1686 CODE_DIRTY_FLAG);
9fa3e853
FB
1687}
1688
9fa3e853 1689/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1690 tested for self modifying code */
5fafdf24 1691static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1692 target_ulong vaddr)
9fa3e853 1693{
3a7d929e 1694 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1695}
1696
5fafdf24 1697static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1698 unsigned long start, unsigned long length)
1699{
1700 unsigned long addr;
84b7b8e7
FB
1701 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1702 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1703 if ((addr - start) < length) {
0f459d16 1704 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1705 }
1706 }
1707}
1708
3a7d929e 1709void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1710 int dirty_flags)
1ccde1cb
FB
1711{
1712 CPUState *env;
4f2ac237 1713 unsigned long length, start1;
0a962c02
FB
1714 int i, mask, len;
1715 uint8_t *p;
1ccde1cb
FB
1716
1717 start &= TARGET_PAGE_MASK;
1718 end = TARGET_PAGE_ALIGN(end);
1719
1720 length = end - start;
1721 if (length == 0)
1722 return;
0a962c02 1723 len = length >> TARGET_PAGE_BITS;
3a7d929e 1724#ifdef USE_KQEMU
6a00d601
FB
1725 /* XXX: should not depend on cpu context */
1726 env = first_cpu;
3a7d929e 1727 if (env->kqemu_enabled) {
f23db169
FB
1728 ram_addr_t addr;
1729 addr = start;
1730 for(i = 0; i < len; i++) {
1731 kqemu_set_notdirty(env, addr);
1732 addr += TARGET_PAGE_SIZE;
1733 }
3a7d929e
FB
1734 }
1735#endif
f23db169
FB
1736 mask = ~dirty_flags;
1737 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1738 for(i = 0; i < len; i++)
1739 p[i] &= mask;
1740
1ccde1cb
FB
1741 /* we modify the TLB cache so that the dirty bit will be set again
1742 when accessing the range */
59817ccb 1743 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1744 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1745 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1746 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1747 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1748 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1749#if (NB_MMU_MODES >= 3)
1750 for(i = 0; i < CPU_TLB_SIZE; i++)
1751 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1752#if (NB_MMU_MODES == 4)
1753 for(i = 0; i < CPU_TLB_SIZE; i++)
1754 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1755#endif
1756#endif
6a00d601 1757 }
1ccde1cb
FB
1758}
1759
3a7d929e
FB
1760static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1761{
1762 ram_addr_t ram_addr;
1763
84b7b8e7 1764 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1765 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1766 tlb_entry->addend - (unsigned long)phys_ram_base;
1767 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1768 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1769 }
1770 }
1771}
1772
1773/* update the TLB according to the current state of the dirty bits */
1774void cpu_tlb_update_dirty(CPUState *env)
1775{
1776 int i;
1777 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1778 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1779 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1780 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1781#if (NB_MMU_MODES >= 3)
1782 for(i = 0; i < CPU_TLB_SIZE; i++)
1783 tlb_update_dirty(&env->tlb_table[2][i]);
1784#if (NB_MMU_MODES == 4)
1785 for(i = 0; i < CPU_TLB_SIZE; i++)
1786 tlb_update_dirty(&env->tlb_table[3][i]);
1787#endif
1788#endif
3a7d929e
FB
1789}
1790
0f459d16 1791static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1792{
0f459d16
PB
1793 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1794 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1795}
1796
0f459d16
PB
1797/* update the TLB corresponding to virtual page vaddr
1798 so that it is no longer dirty */
1799static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1800{
1ccde1cb
FB
1801 int i;
1802
0f459d16 1803 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1804 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1805 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1806 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1807#if (NB_MMU_MODES >= 3)
0f459d16 1808 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1809#if (NB_MMU_MODES == 4)
0f459d16 1810 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1811#endif
1812#endif
9fa3e853
FB
1813}
1814
59817ccb
FB
1815/* add a new TLB entry. At most one entry for a given virtual address
1816 is permitted. Return 0 if OK or 2 if the page could not be mapped
1817 (can only happen in non SOFTMMU mode for I/O pages or pages
1818 conflicting with the host address space). */
5fafdf24
TS
1819int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1820 target_phys_addr_t paddr, int prot,
6ebbf390 1821 int mmu_idx, int is_softmmu)
9fa3e853 1822{
92e873b9 1823 PhysPageDesc *p;
4f2ac237 1824 unsigned long pd;
9fa3e853 1825 unsigned int index;
4f2ac237 1826 target_ulong address;
0f459d16 1827 target_ulong code_address;
108c49b8 1828 target_phys_addr_t addend;
9fa3e853 1829 int ret;
84b7b8e7 1830 CPUTLBEntry *te;
6658ffb8 1831 int i;
0f459d16 1832 target_phys_addr_t iotlb;
9fa3e853 1833
92e873b9 1834 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1835 if (!p) {
1836 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1837 } else {
1838 pd = p->phys_offset;
9fa3e853
FB
1839 }
1840#if defined(DEBUG_TLB)
6ebbf390
JM
1841 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1842 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1843#endif
1844
1845 ret = 0;
0f459d16
PB
1846 address = vaddr;
1847 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1848 /* IO memory case (romd handled later) */
1849 address |= TLB_MMIO;
1850 }
1851 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1852 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1853 /* Normal RAM. */
1854 iotlb = pd & TARGET_PAGE_MASK;
1855 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1856 iotlb |= IO_MEM_NOTDIRTY;
1857 else
1858 iotlb |= IO_MEM_ROM;
1859 } else {
1860 /* IO handlers are currently passed a phsical address.
1861 It would be nice to pass an offset from the base address
1862 of that region. This would avoid having to special case RAM,
1863 and avoid full address decoding in every device.
1864 We can't use the high bits of pd for this because
1865 IO_MEM_ROMD uses these as a ram address. */
1866 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1867 }
1868
1869 code_address = address;
1870 /* Make accesses to pages with watchpoints go via the
1871 watchpoint trap routines. */
1872 for (i = 0; i < env->nb_watchpoints; i++) {
1873 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1874 iotlb = io_mem_watch + paddr;
1875 /* TODO: The memory case can be optimized by not trapping
1876 reads of pages with a write breakpoint. */
1877 address |= TLB_MMIO;
6658ffb8 1878 }
0f459d16 1879 }
d79acba4 1880
0f459d16
PB
1881 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1882 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1883 te = &env->tlb_table[mmu_idx][index];
1884 te->addend = addend - vaddr;
1885 if (prot & PAGE_READ) {
1886 te->addr_read = address;
1887 } else {
1888 te->addr_read = -1;
1889 }
5c751e99 1890
0f459d16
PB
1891 if (prot & PAGE_EXEC) {
1892 te->addr_code = code_address;
1893 } else {
1894 te->addr_code = -1;
1895 }
1896 if (prot & PAGE_WRITE) {
1897 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1898 (pd & IO_MEM_ROMD)) {
1899 /* Write access calls the I/O callback. */
1900 te->addr_write = address | TLB_MMIO;
1901 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1902 !cpu_physical_memory_is_dirty(pd)) {
1903 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 1904 } else {
0f459d16 1905 te->addr_write = address;
9fa3e853 1906 }
0f459d16
PB
1907 } else {
1908 te->addr_write = -1;
9fa3e853 1909 }
9fa3e853
FB
1910 return ret;
1911}
1912
0124311e
FB
1913#else
1914
ee8b7021 1915void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1916{
1917}
1918
2e12669a 1919void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1920{
1921}
1922
5fafdf24
TS
1923int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1924 target_phys_addr_t paddr, int prot,
6ebbf390 1925 int mmu_idx, int is_softmmu)
9fa3e853
FB
1926{
1927 return 0;
1928}
0124311e 1929
9fa3e853
FB
1930/* dump memory mappings */
1931void page_dump(FILE *f)
33417e70 1932{
9fa3e853
FB
1933 unsigned long start, end;
1934 int i, j, prot, prot1;
1935 PageDesc *p;
33417e70 1936
9fa3e853
FB
1937 fprintf(f, "%-8s %-8s %-8s %s\n",
1938 "start", "end", "size", "prot");
1939 start = -1;
1940 end = -1;
1941 prot = 0;
1942 for(i = 0; i <= L1_SIZE; i++) {
1943 if (i < L1_SIZE)
1944 p = l1_map[i];
1945 else
1946 p = NULL;
1947 for(j = 0;j < L2_SIZE; j++) {
1948 if (!p)
1949 prot1 = 0;
1950 else
1951 prot1 = p[j].flags;
1952 if (prot1 != prot) {
1953 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1954 if (start != -1) {
1955 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1956 start, end, end - start,
9fa3e853
FB
1957 prot & PAGE_READ ? 'r' : '-',
1958 prot & PAGE_WRITE ? 'w' : '-',
1959 prot & PAGE_EXEC ? 'x' : '-');
1960 }
1961 if (prot1 != 0)
1962 start = end;
1963 else
1964 start = -1;
1965 prot = prot1;
1966 }
1967 if (!p)
1968 break;
1969 }
33417e70 1970 }
33417e70
FB
1971}
1972
53a5960a 1973int page_get_flags(target_ulong address)
33417e70 1974{
9fa3e853
FB
1975 PageDesc *p;
1976
1977 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1978 if (!p)
9fa3e853
FB
1979 return 0;
1980 return p->flags;
1981}
1982
1983/* modify the flags of a page and invalidate the code if
1984 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1985 depending on PAGE_WRITE */
53a5960a 1986void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1987{
1988 PageDesc *p;
53a5960a 1989 target_ulong addr;
9fa3e853 1990
c8a706fe 1991 /* mmap_lock should already be held. */
9fa3e853
FB
1992 start = start & TARGET_PAGE_MASK;
1993 end = TARGET_PAGE_ALIGN(end);
1994 if (flags & PAGE_WRITE)
1995 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
1996 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1997 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
1998 /* We may be called for host regions that are outside guest
1999 address space. */
2000 if (!p)
2001 return;
9fa3e853
FB
2002 /* if the write protection is set, then we invalidate the code
2003 inside */
5fafdf24 2004 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2005 (flags & PAGE_WRITE) &&
2006 p->first_tb) {
d720b93d 2007 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2008 }
2009 p->flags = flags;
2010 }
33417e70
FB
2011}
2012
3d97b40b
TS
2013int page_check_range(target_ulong start, target_ulong len, int flags)
2014{
2015 PageDesc *p;
2016 target_ulong end;
2017 target_ulong addr;
2018
2019 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2020 start = start & TARGET_PAGE_MASK;
2021
2022 if( end < start )
2023 /* we've wrapped around */
2024 return -1;
2025 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2026 p = page_find(addr >> TARGET_PAGE_BITS);
2027 if( !p )
2028 return -1;
2029 if( !(p->flags & PAGE_VALID) )
2030 return -1;
2031
dae3270c 2032 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2033 return -1;
dae3270c
FB
2034 if (flags & PAGE_WRITE) {
2035 if (!(p->flags & PAGE_WRITE_ORG))
2036 return -1;
2037 /* unprotect the page if it was put read-only because it
2038 contains translated code */
2039 if (!(p->flags & PAGE_WRITE)) {
2040 if (!page_unprotect(addr, 0, NULL))
2041 return -1;
2042 }
2043 return 0;
2044 }
3d97b40b
TS
2045 }
2046 return 0;
2047}
2048
9fa3e853
FB
2049/* called from signal handler: invalidate the code and unprotect the
2050 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2051int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2052{
2053 unsigned int page_index, prot, pindex;
2054 PageDesc *p, *p1;
53a5960a 2055 target_ulong host_start, host_end, addr;
9fa3e853 2056
c8a706fe
PB
2057 /* Technically this isn't safe inside a signal handler. However we
2058 know this only ever happens in a synchronous SEGV handler, so in
2059 practice it seems to be ok. */
2060 mmap_lock();
2061
83fb7adf 2062 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2063 page_index = host_start >> TARGET_PAGE_BITS;
2064 p1 = page_find(page_index);
c8a706fe
PB
2065 if (!p1) {
2066 mmap_unlock();
9fa3e853 2067 return 0;
c8a706fe 2068 }
83fb7adf 2069 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2070 p = p1;
2071 prot = 0;
2072 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2073 prot |= p->flags;
2074 p++;
2075 }
2076 /* if the page was really writable, then we change its
2077 protection back to writable */
2078 if (prot & PAGE_WRITE_ORG) {
2079 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2080 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2081 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2082 (prot & PAGE_BITS) | PAGE_WRITE);
2083 p1[pindex].flags |= PAGE_WRITE;
2084 /* and since the content will be modified, we must invalidate
2085 the corresponding translated code. */
d720b93d 2086 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2087#ifdef DEBUG_TB_CHECK
2088 tb_invalidate_check(address);
2089#endif
c8a706fe 2090 mmap_unlock();
9fa3e853
FB
2091 return 1;
2092 }
2093 }
c8a706fe 2094 mmap_unlock();
9fa3e853
FB
2095 return 0;
2096}
2097
6a00d601
FB
2098static inline void tlb_set_dirty(CPUState *env,
2099 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2100{
2101}
9fa3e853
FB
2102#endif /* defined(CONFIG_USER_ONLY) */
2103
e2eef170 2104#if !defined(CONFIG_USER_ONLY)
db7b5426 2105static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2106 ram_addr_t memory);
2107static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2108 ram_addr_t orig_memory);
db7b5426
BS
2109#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2110 need_subpage) \
2111 do { \
2112 if (addr > start_addr) \
2113 start_addr2 = 0; \
2114 else { \
2115 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2116 if (start_addr2 > 0) \
2117 need_subpage = 1; \
2118 } \
2119 \
49e9fba2 2120 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2121 end_addr2 = TARGET_PAGE_SIZE - 1; \
2122 else { \
2123 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2124 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2125 need_subpage = 1; \
2126 } \
2127 } while (0)
2128
33417e70
FB
2129/* register physical memory. 'size' must be a multiple of the target
2130 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2131 io memory page */
5fafdf24 2132void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2133 ram_addr_t size,
2134 ram_addr_t phys_offset)
33417e70 2135{
108c49b8 2136 target_phys_addr_t addr, end_addr;
92e873b9 2137 PhysPageDesc *p;
9d42037b 2138 CPUState *env;
00f82b8a 2139 ram_addr_t orig_size = size;
db7b5426 2140 void *subpage;
33417e70 2141
da260249
FB
2142#ifdef USE_KQEMU
2143 /* XXX: should not depend on cpu context */
2144 env = first_cpu;
2145 if (env->kqemu_enabled) {
2146 kqemu_set_phys_mem(start_addr, size, phys_offset);
2147 }
2148#endif
5fd386f6 2149 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2150 end_addr = start_addr + (target_phys_addr_t)size;
2151 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2152 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2153 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2154 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2155 target_phys_addr_t start_addr2, end_addr2;
2156 int need_subpage = 0;
2157
2158 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2159 need_subpage);
4254fab8 2160 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2161 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2162 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2163 &p->phys_offset, orig_memory);
2164 } else {
2165 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2166 >> IO_MEM_SHIFT];
2167 }
2168 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2169 } else {
2170 p->phys_offset = phys_offset;
2171 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2172 (phys_offset & IO_MEM_ROMD))
2173 phys_offset += TARGET_PAGE_SIZE;
2174 }
2175 } else {
2176 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2177 p->phys_offset = phys_offset;
2178 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2179 (phys_offset & IO_MEM_ROMD))
2180 phys_offset += TARGET_PAGE_SIZE;
2181 else {
2182 target_phys_addr_t start_addr2, end_addr2;
2183 int need_subpage = 0;
2184
2185 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2186 end_addr2, need_subpage);
2187
4254fab8 2188 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2189 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2190 &p->phys_offset, IO_MEM_UNASSIGNED);
2191 subpage_register(subpage, start_addr2, end_addr2,
2192 phys_offset);
2193 }
2194 }
2195 }
33417e70 2196 }
3b46e624 2197
9d42037b
FB
2198 /* since each CPU stores ram addresses in its TLB cache, we must
2199 reset the modified entries */
2200 /* XXX: slow ! */
2201 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2202 tlb_flush(env, 1);
2203 }
33417e70
FB
2204}
2205
ba863458 2206/* XXX: temporary until new memory mapping API */
00f82b8a 2207ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2208{
2209 PhysPageDesc *p;
2210
2211 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2212 if (!p)
2213 return IO_MEM_UNASSIGNED;
2214 return p->phys_offset;
2215}
2216
e9a1ab19 2217/* XXX: better than nothing */
00f82b8a 2218ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2219{
2220 ram_addr_t addr;
7fb4fdcf 2221 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
ed441467
FB
2222 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2223 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2224 abort();
2225 }
2226 addr = phys_ram_alloc_offset;
2227 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2228 return addr;
2229}
2230
2231void qemu_ram_free(ram_addr_t addr)
2232{
2233}
2234
a4193c8a 2235static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2236{
67d3b957 2237#ifdef DEBUG_UNASSIGNED
ab3d1727 2238 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2239#endif
2240#ifdef TARGET_SPARC
6c36d3fa 2241 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2242#elif TARGET_CRIS
2243 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2244#endif
33417e70
FB
2245 return 0;
2246}
2247
a4193c8a 2248static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2249{
67d3b957 2250#ifdef DEBUG_UNASSIGNED
ab3d1727 2251 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2252#endif
b4f0a316 2253#ifdef TARGET_SPARC
6c36d3fa 2254 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2255#elif TARGET_CRIS
2256 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2257#endif
33417e70
FB
2258}
2259
2260static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2261 unassigned_mem_readb,
2262 unassigned_mem_readb,
2263 unassigned_mem_readb,
2264};
2265
2266static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2267 unassigned_mem_writeb,
2268 unassigned_mem_writeb,
2269 unassigned_mem_writeb,
2270};
2271
0f459d16
PB
2272static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2273 uint32_t val)
9fa3e853 2274{
3a7d929e 2275 int dirty_flags;
3a7d929e
FB
2276 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2277 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2278#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2279 tb_invalidate_phys_page_fast(ram_addr, 1);
2280 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2281#endif
3a7d929e 2282 }
0f459d16 2283 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2284#ifdef USE_KQEMU
2285 if (cpu_single_env->kqemu_enabled &&
2286 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2287 kqemu_modify_page(cpu_single_env, ram_addr);
2288#endif
f23db169
FB
2289 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2290 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2291 /* we remove the notdirty callback only if the code has been
2292 flushed */
2293 if (dirty_flags == 0xff)
2e70f6ef 2294 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2295}
2296
0f459d16
PB
2297static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2298 uint32_t val)
9fa3e853 2299{
3a7d929e 2300 int dirty_flags;
3a7d929e
FB
2301 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2302 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2303#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2304 tb_invalidate_phys_page_fast(ram_addr, 2);
2305 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2306#endif
3a7d929e 2307 }
0f459d16 2308 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2309#ifdef USE_KQEMU
2310 if (cpu_single_env->kqemu_enabled &&
2311 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2312 kqemu_modify_page(cpu_single_env, ram_addr);
2313#endif
f23db169
FB
2314 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2315 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2316 /* we remove the notdirty callback only if the code has been
2317 flushed */
2318 if (dirty_flags == 0xff)
2e70f6ef 2319 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2320}
2321
0f459d16
PB
2322static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2323 uint32_t val)
9fa3e853 2324{
3a7d929e 2325 int dirty_flags;
3a7d929e
FB
2326 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2327 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2328#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2329 tb_invalidate_phys_page_fast(ram_addr, 4);
2330 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2331#endif
3a7d929e 2332 }
0f459d16 2333 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2334#ifdef USE_KQEMU
2335 if (cpu_single_env->kqemu_enabled &&
2336 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2337 kqemu_modify_page(cpu_single_env, ram_addr);
2338#endif
f23db169
FB
2339 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2340 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2341 /* we remove the notdirty callback only if the code has been
2342 flushed */
2343 if (dirty_flags == 0xff)
2e70f6ef 2344 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2345}
2346
3a7d929e 2347static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2348 NULL, /* never used */
2349 NULL, /* never used */
2350 NULL, /* never used */
2351};
2352
1ccde1cb
FB
2353static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2354 notdirty_mem_writeb,
2355 notdirty_mem_writew,
2356 notdirty_mem_writel,
2357};
2358
0f459d16
PB
2359/* Generate a debug exception if a watchpoint has been hit. */
2360static void check_watchpoint(int offset, int flags)
2361{
2362 CPUState *env = cpu_single_env;
2363 target_ulong vaddr;
2364 int i;
2365
2e70f6ef 2366 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
0f459d16
PB
2367 for (i = 0; i < env->nb_watchpoints; i++) {
2368 if (vaddr == env->watchpoint[i].vaddr
2369 && (env->watchpoint[i].type & flags)) {
2370 env->watchpoint_hit = i + 1;
2371 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2372 break;
2373 }
2374 }
2375}
2376
6658ffb8
PB
2377/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2378 so these check for a hit then pass through to the normal out-of-line
2379 phys routines. */
2380static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2381{
0f459d16 2382 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2383 return ldub_phys(addr);
2384}
2385
2386static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2387{
0f459d16 2388 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2389 return lduw_phys(addr);
2390}
2391
2392static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2393{
0f459d16 2394 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2395 return ldl_phys(addr);
2396}
2397
6658ffb8
PB
2398static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2399 uint32_t val)
2400{
0f459d16 2401 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2402 stb_phys(addr, val);
2403}
2404
2405static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2406 uint32_t val)
2407{
0f459d16 2408 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2409 stw_phys(addr, val);
2410}
2411
2412static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2413 uint32_t val)
2414{
0f459d16 2415 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2416 stl_phys(addr, val);
2417}
2418
2419static CPUReadMemoryFunc *watch_mem_read[3] = {
2420 watch_mem_readb,
2421 watch_mem_readw,
2422 watch_mem_readl,
2423};
2424
2425static CPUWriteMemoryFunc *watch_mem_write[3] = {
2426 watch_mem_writeb,
2427 watch_mem_writew,
2428 watch_mem_writel,
2429};
6658ffb8 2430
db7b5426
BS
2431static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2432 unsigned int len)
2433{
db7b5426
BS
2434 uint32_t ret;
2435 unsigned int idx;
2436
2437 idx = SUBPAGE_IDX(addr - mmio->base);
2438#if defined(DEBUG_SUBPAGE)
2439 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2440 mmio, len, addr, idx);
2441#endif
3ee89922 2442 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2443
2444 return ret;
2445}
2446
2447static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2448 uint32_t value, unsigned int len)
2449{
db7b5426
BS
2450 unsigned int idx;
2451
2452 idx = SUBPAGE_IDX(addr - mmio->base);
2453#if defined(DEBUG_SUBPAGE)
2454 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2455 mmio, len, addr, idx, value);
2456#endif
3ee89922 2457 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2458}
2459
2460static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2461{
2462#if defined(DEBUG_SUBPAGE)
2463 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2464#endif
2465
2466 return subpage_readlen(opaque, addr, 0);
2467}
2468
2469static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2470 uint32_t value)
2471{
2472#if defined(DEBUG_SUBPAGE)
2473 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2474#endif
2475 subpage_writelen(opaque, addr, value, 0);
2476}
2477
2478static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2479{
2480#if defined(DEBUG_SUBPAGE)
2481 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2482#endif
2483
2484 return subpage_readlen(opaque, addr, 1);
2485}
2486
2487static void subpage_writew (void *opaque, target_phys_addr_t addr,
2488 uint32_t value)
2489{
2490#if defined(DEBUG_SUBPAGE)
2491 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2492#endif
2493 subpage_writelen(opaque, addr, value, 1);
2494}
2495
2496static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2497{
2498#if defined(DEBUG_SUBPAGE)
2499 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2500#endif
2501
2502 return subpage_readlen(opaque, addr, 2);
2503}
2504
2505static void subpage_writel (void *opaque,
2506 target_phys_addr_t addr, uint32_t value)
2507{
2508#if defined(DEBUG_SUBPAGE)
2509 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2510#endif
2511 subpage_writelen(opaque, addr, value, 2);
2512}
2513
2514static CPUReadMemoryFunc *subpage_read[] = {
2515 &subpage_readb,
2516 &subpage_readw,
2517 &subpage_readl,
2518};
2519
2520static CPUWriteMemoryFunc *subpage_write[] = {
2521 &subpage_writeb,
2522 &subpage_writew,
2523 &subpage_writel,
2524};
2525
2526static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2527 ram_addr_t memory)
db7b5426
BS
2528{
2529 int idx, eidx;
4254fab8 2530 unsigned int i;
db7b5426
BS
2531
2532 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2533 return -1;
2534 idx = SUBPAGE_IDX(start);
2535 eidx = SUBPAGE_IDX(end);
2536#if defined(DEBUG_SUBPAGE)
2537 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2538 mmio, start, end, idx, eidx, memory);
2539#endif
2540 memory >>= IO_MEM_SHIFT;
2541 for (; idx <= eidx; idx++) {
4254fab8 2542 for (i = 0; i < 4; i++) {
3ee89922
BS
2543 if (io_mem_read[memory][i]) {
2544 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2545 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2546 }
2547 if (io_mem_write[memory][i]) {
2548 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2549 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2550 }
4254fab8 2551 }
db7b5426
BS
2552 }
2553
2554 return 0;
2555}
2556
00f82b8a
AJ
2557static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2558 ram_addr_t orig_memory)
db7b5426
BS
2559{
2560 subpage_t *mmio;
2561 int subpage_memory;
2562
2563 mmio = qemu_mallocz(sizeof(subpage_t));
2564 if (mmio != NULL) {
2565 mmio->base = base;
2566 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2567#if defined(DEBUG_SUBPAGE)
2568 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2569 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2570#endif
2571 *phys = subpage_memory | IO_MEM_SUBPAGE;
2572 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2573 }
2574
2575 return mmio;
2576}
2577
33417e70
FB
2578static void io_mem_init(void)
2579{
3a7d929e 2580 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2581 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2582 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2583 io_mem_nb = 5;
2584
0f459d16 2585 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2586 watch_mem_write, NULL);
1ccde1cb 2587 /* alloc dirty bits array */
0a962c02 2588 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2589 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2590}
2591
2592/* mem_read and mem_write are arrays of functions containing the
2593 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2594 2). Functions can be omitted with a NULL function pointer. The
2595 registered functions may be modified dynamically later.
2596 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2597 modified. If it is zero, a new io zone is allocated. The return
2598 value can be used with cpu_register_physical_memory(). (-1) is
2599 returned if error. */
33417e70
FB
2600int cpu_register_io_memory(int io_index,
2601 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2602 CPUWriteMemoryFunc **mem_write,
2603 void *opaque)
33417e70 2604{
4254fab8 2605 int i, subwidth = 0;
33417e70
FB
2606
2607 if (io_index <= 0) {
b5ff1b31 2608 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2609 return -1;
2610 io_index = io_mem_nb++;
2611 } else {
2612 if (io_index >= IO_MEM_NB_ENTRIES)
2613 return -1;
2614 }
b5ff1b31 2615
33417e70 2616 for(i = 0;i < 3; i++) {
4254fab8
BS
2617 if (!mem_read[i] || !mem_write[i])
2618 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2619 io_mem_read[io_index][i] = mem_read[i];
2620 io_mem_write[io_index][i] = mem_write[i];
2621 }
a4193c8a 2622 io_mem_opaque[io_index] = opaque;
4254fab8 2623 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2624}
61382a50 2625
8926b517
FB
2626CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2627{
2628 return io_mem_write[io_index >> IO_MEM_SHIFT];
2629}
2630
2631CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2632{
2633 return io_mem_read[io_index >> IO_MEM_SHIFT];
2634}
2635
e2eef170
PB
2636#endif /* !defined(CONFIG_USER_ONLY) */
2637
13eb76e0
FB
2638/* physical memory access (slow version, mainly for debug) */
2639#if defined(CONFIG_USER_ONLY)
5fafdf24 2640void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2641 int len, int is_write)
2642{
2643 int l, flags;
2644 target_ulong page;
53a5960a 2645 void * p;
13eb76e0
FB
2646
2647 while (len > 0) {
2648 page = addr & TARGET_PAGE_MASK;
2649 l = (page + TARGET_PAGE_SIZE) - addr;
2650 if (l > len)
2651 l = len;
2652 flags = page_get_flags(page);
2653 if (!(flags & PAGE_VALID))
2654 return;
2655 if (is_write) {
2656 if (!(flags & PAGE_WRITE))
2657 return;
579a97f7 2658 /* XXX: this code should not depend on lock_user */
72fb7daa 2659 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2660 /* FIXME - should this return an error rather than just fail? */
2661 return;
72fb7daa
AJ
2662 memcpy(p, buf, l);
2663 unlock_user(p, addr, l);
13eb76e0
FB
2664 } else {
2665 if (!(flags & PAGE_READ))
2666 return;
579a97f7 2667 /* XXX: this code should not depend on lock_user */
72fb7daa 2668 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2669 /* FIXME - should this return an error rather than just fail? */
2670 return;
72fb7daa 2671 memcpy(buf, p, l);
5b257578 2672 unlock_user(p, addr, 0);
13eb76e0
FB
2673 }
2674 len -= l;
2675 buf += l;
2676 addr += l;
2677 }
2678}
8df1cd07 2679
13eb76e0 2680#else
5fafdf24 2681void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2682 int len, int is_write)
2683{
2684 int l, io_index;
2685 uint8_t *ptr;
2686 uint32_t val;
2e12669a
FB
2687 target_phys_addr_t page;
2688 unsigned long pd;
92e873b9 2689 PhysPageDesc *p;
3b46e624 2690
13eb76e0
FB
2691 while (len > 0) {
2692 page = addr & TARGET_PAGE_MASK;
2693 l = (page + TARGET_PAGE_SIZE) - addr;
2694 if (l > len)
2695 l = len;
92e873b9 2696 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2697 if (!p) {
2698 pd = IO_MEM_UNASSIGNED;
2699 } else {
2700 pd = p->phys_offset;
2701 }
3b46e624 2702
13eb76e0 2703 if (is_write) {
3a7d929e 2704 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2705 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2706 /* XXX: could force cpu_single_env to NULL to avoid
2707 potential bugs */
13eb76e0 2708 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2709 /* 32 bit write access */
c27004ec 2710 val = ldl_p(buf);
a4193c8a 2711 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2712 l = 4;
2713 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2714 /* 16 bit write access */
c27004ec 2715 val = lduw_p(buf);
a4193c8a 2716 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2717 l = 2;
2718 } else {
1c213d19 2719 /* 8 bit write access */
c27004ec 2720 val = ldub_p(buf);
a4193c8a 2721 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2722 l = 1;
2723 }
2724 } else {
b448f2f3
FB
2725 unsigned long addr1;
2726 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2727 /* RAM case */
b448f2f3 2728 ptr = phys_ram_base + addr1;
13eb76e0 2729 memcpy(ptr, buf, l);
3a7d929e
FB
2730 if (!cpu_physical_memory_is_dirty(addr1)) {
2731 /* invalidate code */
2732 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2733 /* set dirty bit */
5fafdf24 2734 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2735 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2736 }
13eb76e0
FB
2737 }
2738 } else {
5fafdf24 2739 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2740 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2741 /* I/O case */
2742 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2743 if (l >= 4 && ((addr & 3) == 0)) {
2744 /* 32 bit read access */
a4193c8a 2745 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2746 stl_p(buf, val);
13eb76e0
FB
2747 l = 4;
2748 } else if (l >= 2 && ((addr & 1) == 0)) {
2749 /* 16 bit read access */
a4193c8a 2750 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2751 stw_p(buf, val);
13eb76e0
FB
2752 l = 2;
2753 } else {
1c213d19 2754 /* 8 bit read access */
a4193c8a 2755 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2756 stb_p(buf, val);
13eb76e0
FB
2757 l = 1;
2758 }
2759 } else {
2760 /* RAM case */
5fafdf24 2761 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2762 (addr & ~TARGET_PAGE_MASK);
2763 memcpy(buf, ptr, l);
2764 }
2765 }
2766 len -= l;
2767 buf += l;
2768 addr += l;
2769 }
2770}
8df1cd07 2771
d0ecd2aa 2772/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2773void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2774 const uint8_t *buf, int len)
2775{
2776 int l;
2777 uint8_t *ptr;
2778 target_phys_addr_t page;
2779 unsigned long pd;
2780 PhysPageDesc *p;
3b46e624 2781
d0ecd2aa
FB
2782 while (len > 0) {
2783 page = addr & TARGET_PAGE_MASK;
2784 l = (page + TARGET_PAGE_SIZE) - addr;
2785 if (l > len)
2786 l = len;
2787 p = phys_page_find(page >> TARGET_PAGE_BITS);
2788 if (!p) {
2789 pd = IO_MEM_UNASSIGNED;
2790 } else {
2791 pd = p->phys_offset;
2792 }
3b46e624 2793
d0ecd2aa 2794 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2795 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2796 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2797 /* do nothing */
2798 } else {
2799 unsigned long addr1;
2800 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2801 /* ROM/RAM case */
2802 ptr = phys_ram_base + addr1;
2803 memcpy(ptr, buf, l);
2804 }
2805 len -= l;
2806 buf += l;
2807 addr += l;
2808 }
2809}
2810
2811
8df1cd07
FB
2812/* warning: addr must be aligned */
2813uint32_t ldl_phys(target_phys_addr_t addr)
2814{
2815 int io_index;
2816 uint8_t *ptr;
2817 uint32_t val;
2818 unsigned long pd;
2819 PhysPageDesc *p;
2820
2821 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2822 if (!p) {
2823 pd = IO_MEM_UNASSIGNED;
2824 } else {
2825 pd = p->phys_offset;
2826 }
3b46e624 2827
5fafdf24 2828 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2829 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2830 /* I/O case */
2831 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2832 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2833 } else {
2834 /* RAM case */
5fafdf24 2835 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2836 (addr & ~TARGET_PAGE_MASK);
2837 val = ldl_p(ptr);
2838 }
2839 return val;
2840}
2841
84b7b8e7
FB
2842/* warning: addr must be aligned */
2843uint64_t ldq_phys(target_phys_addr_t addr)
2844{
2845 int io_index;
2846 uint8_t *ptr;
2847 uint64_t val;
2848 unsigned long pd;
2849 PhysPageDesc *p;
2850
2851 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2852 if (!p) {
2853 pd = IO_MEM_UNASSIGNED;
2854 } else {
2855 pd = p->phys_offset;
2856 }
3b46e624 2857
2a4188a3
FB
2858 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2859 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2860 /* I/O case */
2861 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2862#ifdef TARGET_WORDS_BIGENDIAN
2863 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2864 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2865#else
2866 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2867 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2868#endif
2869 } else {
2870 /* RAM case */
5fafdf24 2871 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2872 (addr & ~TARGET_PAGE_MASK);
2873 val = ldq_p(ptr);
2874 }
2875 return val;
2876}
2877
aab33094
FB
2878/* XXX: optimize */
2879uint32_t ldub_phys(target_phys_addr_t addr)
2880{
2881 uint8_t val;
2882 cpu_physical_memory_read(addr, &val, 1);
2883 return val;
2884}
2885
2886/* XXX: optimize */
2887uint32_t lduw_phys(target_phys_addr_t addr)
2888{
2889 uint16_t val;
2890 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2891 return tswap16(val);
2892}
2893
8df1cd07
FB
2894/* warning: addr must be aligned. The ram page is not masked as dirty
2895 and the code inside is not invalidated. It is useful if the dirty
2896 bits are used to track modified PTEs */
2897void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2898{
2899 int io_index;
2900 uint8_t *ptr;
2901 unsigned long pd;
2902 PhysPageDesc *p;
2903
2904 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2905 if (!p) {
2906 pd = IO_MEM_UNASSIGNED;
2907 } else {
2908 pd = p->phys_offset;
2909 }
3b46e624 2910
3a7d929e 2911 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2912 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2913 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2914 } else {
5fafdf24 2915 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2916 (addr & ~TARGET_PAGE_MASK);
2917 stl_p(ptr, val);
2918 }
2919}
2920
bc98a7ef
JM
2921void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2922{
2923 int io_index;
2924 uint8_t *ptr;
2925 unsigned long pd;
2926 PhysPageDesc *p;
2927
2928 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2929 if (!p) {
2930 pd = IO_MEM_UNASSIGNED;
2931 } else {
2932 pd = p->phys_offset;
2933 }
3b46e624 2934
bc98a7ef
JM
2935 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2936 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2937#ifdef TARGET_WORDS_BIGENDIAN
2938 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2939 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2940#else
2941 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2942 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2943#endif
2944 } else {
5fafdf24 2945 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2946 (addr & ~TARGET_PAGE_MASK);
2947 stq_p(ptr, val);
2948 }
2949}
2950
8df1cd07 2951/* warning: addr must be aligned */
8df1cd07
FB
2952void stl_phys(target_phys_addr_t addr, uint32_t val)
2953{
2954 int io_index;
2955 uint8_t *ptr;
2956 unsigned long pd;
2957 PhysPageDesc *p;
2958
2959 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2960 if (!p) {
2961 pd = IO_MEM_UNASSIGNED;
2962 } else {
2963 pd = p->phys_offset;
2964 }
3b46e624 2965
3a7d929e 2966 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2967 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2968 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2969 } else {
2970 unsigned long addr1;
2971 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2972 /* RAM case */
2973 ptr = phys_ram_base + addr1;
2974 stl_p(ptr, val);
3a7d929e
FB
2975 if (!cpu_physical_memory_is_dirty(addr1)) {
2976 /* invalidate code */
2977 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2978 /* set dirty bit */
f23db169
FB
2979 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2980 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2981 }
8df1cd07
FB
2982 }
2983}
2984
aab33094
FB
2985/* XXX: optimize */
2986void stb_phys(target_phys_addr_t addr, uint32_t val)
2987{
2988 uint8_t v = val;
2989 cpu_physical_memory_write(addr, &v, 1);
2990}
2991
2992/* XXX: optimize */
2993void stw_phys(target_phys_addr_t addr, uint32_t val)
2994{
2995 uint16_t v = tswap16(val);
2996 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2997}
2998
2999/* XXX: optimize */
3000void stq_phys(target_phys_addr_t addr, uint64_t val)
3001{
3002 val = tswap64(val);
3003 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3004}
3005
13eb76e0
FB
3006#endif
3007
3008/* virtual memory access for debug */
5fafdf24 3009int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3010 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3011{
3012 int l;
9b3c35e0
JM
3013 target_phys_addr_t phys_addr;
3014 target_ulong page;
13eb76e0
FB
3015
3016 while (len > 0) {
3017 page = addr & TARGET_PAGE_MASK;
3018 phys_addr = cpu_get_phys_page_debug(env, page);
3019 /* if no physical page mapped, return an error */
3020 if (phys_addr == -1)
3021 return -1;
3022 l = (page + TARGET_PAGE_SIZE) - addr;
3023 if (l > len)
3024 l = len;
5fafdf24 3025 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3026 buf, l, is_write);
13eb76e0
FB
3027 len -= l;
3028 buf += l;
3029 addr += l;
3030 }
3031 return 0;
3032}
3033
2e70f6ef
PB
3034/* in deterministic execution mode, instructions doing device I/Os
3035 must be at the end of the TB */
3036void cpu_io_recompile(CPUState *env, void *retaddr)
3037{
3038 TranslationBlock *tb;
3039 uint32_t n, cflags;
3040 target_ulong pc, cs_base;
3041 uint64_t flags;
3042
3043 tb = tb_find_pc((unsigned long)retaddr);
3044 if (!tb) {
3045 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3046 retaddr);
3047 }
3048 n = env->icount_decr.u16.low + tb->icount;
3049 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3050 /* Calculate how many instructions had been executed before the fault
bf20dc07 3051 occurred. */
2e70f6ef
PB
3052 n = n - env->icount_decr.u16.low;
3053 /* Generate a new TB ending on the I/O insn. */
3054 n++;
3055 /* On MIPS and SH, delay slot instructions can only be restarted if
3056 they were already the first instruction in the TB. If this is not
bf20dc07 3057 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3058 branch. */
3059#if defined(TARGET_MIPS)
3060 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3061 env->active_tc.PC -= 4;
3062 env->icount_decr.u16.low++;
3063 env->hflags &= ~MIPS_HFLAG_BMASK;
3064 }
3065#elif defined(TARGET_SH4)
3066 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3067 && n > 1) {
3068 env->pc -= 2;
3069 env->icount_decr.u16.low++;
3070 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3071 }
3072#endif
3073 /* This should never happen. */
3074 if (n > CF_COUNT_MASK)
3075 cpu_abort(env, "TB too big during recompile");
3076
3077 cflags = n | CF_LAST_IO;
3078 pc = tb->pc;
3079 cs_base = tb->cs_base;
3080 flags = tb->flags;
3081 tb_phys_invalidate(tb, -1);
3082 /* FIXME: In theory this could raise an exception. In practice
3083 we have already translated the block once so it's probably ok. */
3084 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3085 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3086 the first in the TB) then we end up generating a whole new TB and
3087 repeating the fault, which is horribly inefficient.
3088 Better would be to execute just this insn uncached, or generate a
3089 second new TB. */
3090 cpu_resume_from_signal(env, NULL);
3091}
3092
e3db7226
FB
3093void dump_exec_info(FILE *f,
3094 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3095{
3096 int i, target_code_size, max_target_code_size;
3097 int direct_jmp_count, direct_jmp2_count, cross_page;
3098 TranslationBlock *tb;
3b46e624 3099
e3db7226
FB
3100 target_code_size = 0;
3101 max_target_code_size = 0;
3102 cross_page = 0;
3103 direct_jmp_count = 0;
3104 direct_jmp2_count = 0;
3105 for(i = 0; i < nb_tbs; i++) {
3106 tb = &tbs[i];
3107 target_code_size += tb->size;
3108 if (tb->size > max_target_code_size)
3109 max_target_code_size = tb->size;
3110 if (tb->page_addr[1] != -1)
3111 cross_page++;
3112 if (tb->tb_next_offset[0] != 0xffff) {
3113 direct_jmp_count++;
3114 if (tb->tb_next_offset[1] != 0xffff) {
3115 direct_jmp2_count++;
3116 }
3117 }
3118 }
3119 /* XXX: avoid using doubles ? */
57fec1fe 3120 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3121 cpu_fprintf(f, "gen code size %ld/%ld\n",
3122 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3123 cpu_fprintf(f, "TB count %d/%d\n",
3124 nb_tbs, code_gen_max_blocks);
5fafdf24 3125 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3126 nb_tbs ? target_code_size / nb_tbs : 0,
3127 max_target_code_size);
5fafdf24 3128 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3129 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3130 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3131 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3132 cross_page,
e3db7226
FB
3133 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3134 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3135 direct_jmp_count,
e3db7226
FB
3136 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3137 direct_jmp2_count,
3138 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3139 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3140 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3141 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3142 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3143 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3144}
3145
5fafdf24 3146#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3147
3148#define MMUSUFFIX _cmmu
3149#define GETPC() NULL
3150#define env cpu_single_env
b769d8fe 3151#define SOFTMMU_CODE_ACCESS
61382a50
FB
3152
3153#define SHIFT 0
3154#include "softmmu_template.h"
3155
3156#define SHIFT 1
3157#include "softmmu_template.h"
3158
3159#define SHIFT 2
3160#include "softmmu_template.h"
3161
3162#define SHIFT 3
3163#include "softmmu_template.h"
3164
3165#undef env
3166
3167#endif