]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Move CPU save/load registration to common code.
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
b3c7724c 40#include "hw/hw.h"
53a5960a
PB
41#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
43#endif
54936004 44
fd6ce8f6 45//#define DEBUG_TB_INVALIDATE
66e85a21 46//#define DEBUG_FLUSH
9fa3e853 47//#define DEBUG_TLB
67d3b957 48//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
49
50/* make various TB consistency checks */
5fafdf24
TS
51//#define DEBUG_TB_CHECK
52//#define DEBUG_TLB_CHECK
fd6ce8f6 53
1196be37 54//#define DEBUG_IOPORT
db7b5426 55//#define DEBUG_SUBPAGE
1196be37 56
99773bd4
PB
57#if !defined(CONFIG_USER_ONLY)
58/* TB consistency checks only implemented for usermode emulation. */
59#undef DEBUG_TB_CHECK
60#endif
61
9fa3e853
FB
62#define SMC_BITMAP_USE_THRESHOLD 10
63
64#define MMAP_AREA_START 0x00000000
65#define MMAP_AREA_END 0xa8000000
fd6ce8f6 66
108c49b8
FB
67#if defined(TARGET_SPARC64)
68#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
69#elif defined(TARGET_SPARC)
70#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
71#elif defined(TARGET_ALPHA)
72#define TARGET_PHYS_ADDR_SPACE_BITS 42
73#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
74#elif defined(TARGET_PPC64)
75#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
76#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
78#elif defined(TARGET_I386) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
80#else
81/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
82#define TARGET_PHYS_ADDR_SPACE_BITS 32
83#endif
84
fab94c0e 85TranslationBlock *tbs;
26a5f13b 86int code_gen_max_blocks;
9fa3e853 87TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 88int nb_tbs;
eb51d102
FB
89/* any access to the tbs or the page table must use this lock */
90spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 91
7cb69cae 92uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
26a5f13b
FB
93uint8_t *code_gen_buffer;
94unsigned long code_gen_buffer_size;
95/* threshold to flush the translated code buffer */
96unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
97uint8_t *code_gen_ptr;
98
e2eef170 99#if !defined(CONFIG_USER_ONLY)
00f82b8a 100ram_addr_t phys_ram_size;
9fa3e853
FB
101int phys_ram_fd;
102uint8_t *phys_ram_base;
1ccde1cb 103uint8_t *phys_ram_dirty;
e9a1ab19 104static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 105#endif
9fa3e853 106
6a00d601
FB
107CPUState *first_cpu;
108/* current CPU in the current thread. It is only valid inside
109 cpu_exec() */
5fafdf24 110CPUState *cpu_single_env;
2e70f6ef
PB
111/* 0 = Do not count executed instructions.
112 1 = Precice instruction counting.
113 2 = Adaptive rate instruction counting. */
114int use_icount = 0;
115/* Current instruction counter. While executing translated code this may
116 include some instructions that have not yet been executed. */
117int64_t qemu_icount;
6a00d601 118
54936004 119typedef struct PageDesc {
92e873b9 120 /* list of TBs intersecting this ram page */
fd6ce8f6 121 TranslationBlock *first_tb;
9fa3e853
FB
122 /* in order to optimize self modifying code, we count the number
123 of lookups we do to a given page to use a bitmap */
124 unsigned int code_write_count;
125 uint8_t *code_bitmap;
126#if defined(CONFIG_USER_ONLY)
127 unsigned long flags;
128#endif
54936004
FB
129} PageDesc;
130
92e873b9 131typedef struct PhysPageDesc {
0f459d16 132 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 133 ram_addr_t phys_offset;
92e873b9
FB
134} PhysPageDesc;
135
54936004 136#define L2_BITS 10
bedb69ea
JM
137#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
138/* XXX: this is a temporary hack for alpha target.
139 * In the future, this is to be replaced by a multi-level table
140 * to actually be able to handle the complete 64 bits address space.
141 */
142#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
143#else
03875444 144#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 145#endif
54936004
FB
146
147#define L1_SIZE (1 << L1_BITS)
148#define L2_SIZE (1 << L2_BITS)
149
83fb7adf
FB
150unsigned long qemu_real_host_page_size;
151unsigned long qemu_host_page_bits;
152unsigned long qemu_host_page_size;
153unsigned long qemu_host_page_mask;
54936004 154
92e873b9 155/* XXX: for system emulation, it could just be an array */
54936004 156static PageDesc *l1_map[L1_SIZE];
0a962c02 157PhysPageDesc **l1_phys_map;
54936004 158
e2eef170
PB
159#if !defined(CONFIG_USER_ONLY)
160static void io_mem_init(void);
161
33417e70 162/* io memory support */
33417e70
FB
163CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
164CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 165void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 166static int io_mem_nb;
6658ffb8
PB
167static int io_mem_watch;
168#endif
33417e70 169
34865134
FB
170/* log support */
171char *logfilename = "/tmp/qemu.log";
172FILE *logfile;
173int loglevel;
e735b91c 174static int log_append = 0;
34865134 175
e3db7226
FB
176/* statistics */
177static int tlb_flush_count;
178static int tb_flush_count;
179static int tb_phys_invalidate_count;
180
db7b5426
BS
181#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
182typedef struct subpage_t {
183 target_phys_addr_t base;
3ee89922
BS
184 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
185 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
186 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
187} subpage_t;
188
7cb69cae
FB
189#ifdef _WIN32
190static void map_exec(void *addr, long size)
191{
192 DWORD old_protect;
193 VirtualProtect(addr, size,
194 PAGE_EXECUTE_READWRITE, &old_protect);
195
196}
197#else
198static void map_exec(void *addr, long size)
199{
4369415f 200 unsigned long start, end, page_size;
7cb69cae 201
4369415f 202 page_size = getpagesize();
7cb69cae 203 start = (unsigned long)addr;
4369415f 204 start &= ~(page_size - 1);
7cb69cae
FB
205
206 end = (unsigned long)addr + size;
4369415f
FB
207 end += page_size - 1;
208 end &= ~(page_size - 1);
7cb69cae
FB
209
210 mprotect((void *)start, end - start,
211 PROT_READ | PROT_WRITE | PROT_EXEC);
212}
213#endif
214
b346ff46 215static void page_init(void)
54936004 216{
83fb7adf 217 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 218 TARGET_PAGE_SIZE */
67b915a5 219#ifdef _WIN32
d5a8f07c
FB
220 {
221 SYSTEM_INFO system_info;
222 DWORD old_protect;
3b46e624 223
d5a8f07c
FB
224 GetSystemInfo(&system_info);
225 qemu_real_host_page_size = system_info.dwPageSize;
d5a8f07c 226 }
67b915a5 227#else
83fb7adf 228 qemu_real_host_page_size = getpagesize();
67b915a5 229#endif
83fb7adf
FB
230 if (qemu_host_page_size == 0)
231 qemu_host_page_size = qemu_real_host_page_size;
232 if (qemu_host_page_size < TARGET_PAGE_SIZE)
233 qemu_host_page_size = TARGET_PAGE_SIZE;
234 qemu_host_page_bits = 0;
235 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
236 qemu_host_page_bits++;
237 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
238 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
239 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
240
241#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
242 {
243 long long startaddr, endaddr;
244 FILE *f;
245 int n;
246
c8a706fe 247 mmap_lock();
0776590d 248 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
249 f = fopen("/proc/self/maps", "r");
250 if (f) {
251 do {
252 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
253 if (n == 2) {
e0b8d65a
BS
254 startaddr = MIN(startaddr,
255 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
256 endaddr = MIN(endaddr,
257 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 258 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
259 TARGET_PAGE_ALIGN(endaddr),
260 PAGE_RESERVED);
261 }
262 } while (!feof(f));
263 fclose(f);
264 }
c8a706fe 265 mmap_unlock();
50a9569b
AZ
266 }
267#endif
54936004
FB
268}
269
00f82b8a 270static inline PageDesc *page_find_alloc(target_ulong index)
54936004 271{
54936004
FB
272 PageDesc **lp, *p;
273
17e2377a
PB
274#if TARGET_LONG_BITS > 32
275 /* Host memory outside guest VM. For 32-bit targets we have already
276 excluded high addresses. */
277 if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
278 return NULL;
279#endif
54936004
FB
280 lp = &l1_map[index >> L2_BITS];
281 p = *lp;
282 if (!p) {
283 /* allocate if not found */
17e2377a
PB
284#if defined(CONFIG_USER_ONLY)
285 unsigned long addr;
286 size_t len = sizeof(PageDesc) * L2_SIZE;
287 /* Don't use qemu_malloc because it may recurse. */
288 p = mmap(0, len, PROT_READ | PROT_WRITE,
289 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 290 *lp = p;
17e2377a
PB
291 addr = h2g(p);
292 if (addr == (target_ulong)addr) {
293 page_set_flags(addr & TARGET_PAGE_MASK,
294 TARGET_PAGE_ALIGN(addr + len),
295 PAGE_RESERVED);
296 }
297#else
298 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
299 *lp = p;
300#endif
54936004
FB
301 }
302 return p + (index & (L2_SIZE - 1));
303}
304
00f82b8a 305static inline PageDesc *page_find(target_ulong index)
54936004 306{
54936004
FB
307 PageDesc *p;
308
54936004
FB
309 p = l1_map[index >> L2_BITS];
310 if (!p)
311 return 0;
fd6ce8f6
FB
312 return p + (index & (L2_SIZE - 1));
313}
314
108c49b8 315static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 316{
108c49b8 317 void **lp, **p;
e3f4e2a4 318 PhysPageDesc *pd;
92e873b9 319
108c49b8
FB
320 p = (void **)l1_phys_map;
321#if TARGET_PHYS_ADDR_SPACE_BITS > 32
322
323#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
324#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
325#endif
326 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
327 p = *lp;
328 if (!p) {
329 /* allocate if not found */
108c49b8
FB
330 if (!alloc)
331 return NULL;
332 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
333 memset(p, 0, sizeof(void *) * L1_SIZE);
334 *lp = p;
335 }
336#endif
337 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
338 pd = *lp;
339 if (!pd) {
340 int i;
108c49b8
FB
341 /* allocate if not found */
342 if (!alloc)
343 return NULL;
e3f4e2a4
PB
344 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
345 *lp = pd;
346 for (i = 0; i < L2_SIZE; i++)
347 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 348 }
e3f4e2a4 349 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
350}
351
108c49b8 352static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 353{
108c49b8 354 return phys_page_find_alloc(index, 0);
92e873b9
FB
355}
356
9fa3e853 357#if !defined(CONFIG_USER_ONLY)
6a00d601 358static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 359static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 360 target_ulong vaddr);
c8a706fe
PB
361#define mmap_lock() do { } while(0)
362#define mmap_unlock() do { } while(0)
9fa3e853 363#endif
fd6ce8f6 364
4369415f
FB
365#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
366
367#if defined(CONFIG_USER_ONLY)
368/* Currently it is not recommanded to allocate big chunks of data in
369 user mode. It will change when a dedicated libc will be used */
370#define USE_STATIC_CODE_GEN_BUFFER
371#endif
372
373#ifdef USE_STATIC_CODE_GEN_BUFFER
374static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
375#endif
376
26a5f13b
FB
377void code_gen_alloc(unsigned long tb_size)
378{
4369415f
FB
379#ifdef USE_STATIC_CODE_GEN_BUFFER
380 code_gen_buffer = static_code_gen_buffer;
381 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
382 map_exec(code_gen_buffer, code_gen_buffer_size);
383#else
26a5f13b
FB
384 code_gen_buffer_size = tb_size;
385 if (code_gen_buffer_size == 0) {
4369415f
FB
386#if defined(CONFIG_USER_ONLY)
387 /* in user mode, phys_ram_size is not meaningful */
388 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
389#else
26a5f13b
FB
390 /* XXX: needs ajustments */
391 code_gen_buffer_size = (int)(phys_ram_size / 4);
4369415f 392#endif
26a5f13b
FB
393 }
394 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
395 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
396 /* The code gen buffer location may have constraints depending on
397 the host cpu and OS */
398#if defined(__linux__)
399 {
400 int flags;
401 flags = MAP_PRIVATE | MAP_ANONYMOUS;
402#if defined(__x86_64__)
403 flags |= MAP_32BIT;
404 /* Cannot map more than that */
405 if (code_gen_buffer_size > (800 * 1024 * 1024))
406 code_gen_buffer_size = (800 * 1024 * 1024);
407#endif
408 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
409 PROT_WRITE | PROT_READ | PROT_EXEC,
410 flags, -1, 0);
411 if (code_gen_buffer == MAP_FAILED) {
412 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
413 exit(1);
414 }
415 }
416#else
417 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
418 if (!code_gen_buffer) {
419 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
420 exit(1);
421 }
422 map_exec(code_gen_buffer, code_gen_buffer_size);
423#endif
4369415f 424#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
425 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
426 code_gen_buffer_max_size = code_gen_buffer_size -
427 code_gen_max_block_size();
428 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
429 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
430}
431
432/* Must be called before using the QEMU cpus. 'tb_size' is the size
433 (in bytes) allocated to the translation buffer. Zero means default
434 size. */
435void cpu_exec_init_all(unsigned long tb_size)
436{
26a5f13b
FB
437 cpu_gen_init();
438 code_gen_alloc(tb_size);
439 code_gen_ptr = code_gen_buffer;
4369415f 440 page_init();
e2eef170 441#if !defined(CONFIG_USER_ONLY)
26a5f13b 442 io_mem_init();
e2eef170 443#endif
26a5f13b
FB
444}
445
6a00d601 446void cpu_exec_init(CPUState *env)
fd6ce8f6 447{
6a00d601
FB
448 CPUState **penv;
449 int cpu_index;
450
6a00d601
FB
451 env->next_cpu = NULL;
452 penv = &first_cpu;
453 cpu_index = 0;
454 while (*penv != NULL) {
455 penv = (CPUState **)&(*penv)->next_cpu;
456 cpu_index++;
457 }
458 env->cpu_index = cpu_index;
6658ffb8 459 env->nb_watchpoints = 0;
6a00d601 460 *penv = env;
b3c7724c
PB
461#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
462 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
463 cpu_save, cpu_load, env);
464#endif
fd6ce8f6
FB
465}
466
9fa3e853
FB
467static inline void invalidate_page_bitmap(PageDesc *p)
468{
469 if (p->code_bitmap) {
59817ccb 470 qemu_free(p->code_bitmap);
9fa3e853
FB
471 p->code_bitmap = NULL;
472 }
473 p->code_write_count = 0;
474}
475
fd6ce8f6
FB
476/* set to NULL all the 'first_tb' fields in all PageDescs */
477static void page_flush_tb(void)
478{
479 int i, j;
480 PageDesc *p;
481
482 for(i = 0; i < L1_SIZE; i++) {
483 p = l1_map[i];
484 if (p) {
9fa3e853
FB
485 for(j = 0; j < L2_SIZE; j++) {
486 p->first_tb = NULL;
487 invalidate_page_bitmap(p);
488 p++;
489 }
fd6ce8f6
FB
490 }
491 }
492}
493
494/* flush all the translation blocks */
d4e8164f 495/* XXX: tb_flush is currently not thread safe */
6a00d601 496void tb_flush(CPUState *env1)
fd6ce8f6 497{
6a00d601 498 CPUState *env;
0124311e 499#if defined(DEBUG_FLUSH)
ab3d1727
BS
500 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
501 (unsigned long)(code_gen_ptr - code_gen_buffer),
502 nb_tbs, nb_tbs > 0 ?
503 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 504#endif
26a5f13b 505 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
506 cpu_abort(env1, "Internal error: code buffer overflow\n");
507
fd6ce8f6 508 nb_tbs = 0;
3b46e624 509
6a00d601
FB
510 for(env = first_cpu; env != NULL; env = env->next_cpu) {
511 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
512 }
9fa3e853 513
8a8a608f 514 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 515 page_flush_tb();
9fa3e853 516
fd6ce8f6 517 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
518 /* XXX: flush processor icache at this point if cache flush is
519 expensive */
e3db7226 520 tb_flush_count++;
fd6ce8f6
FB
521}
522
523#ifdef DEBUG_TB_CHECK
524
bc98a7ef 525static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
526{
527 TranslationBlock *tb;
528 int i;
529 address &= TARGET_PAGE_MASK;
99773bd4
PB
530 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
531 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
532 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
533 address >= tb->pc + tb->size)) {
534 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 535 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
536 }
537 }
538 }
539}
540
541/* verify that all the pages have correct rights for code */
542static void tb_page_check(void)
543{
544 TranslationBlock *tb;
545 int i, flags1, flags2;
3b46e624 546
99773bd4
PB
547 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
548 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
549 flags1 = page_get_flags(tb->pc);
550 flags2 = page_get_flags(tb->pc + tb->size - 1);
551 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
552 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 553 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
554 }
555 }
556 }
557}
558
d4e8164f
FB
559void tb_jmp_check(TranslationBlock *tb)
560{
561 TranslationBlock *tb1;
562 unsigned int n1;
563
564 /* suppress any remaining jumps to this TB */
565 tb1 = tb->jmp_first;
566 for(;;) {
567 n1 = (long)tb1 & 3;
568 tb1 = (TranslationBlock *)((long)tb1 & ~3);
569 if (n1 == 2)
570 break;
571 tb1 = tb1->jmp_next[n1];
572 }
573 /* check end of list */
574 if (tb1 != tb) {
575 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
576 }
577}
578
fd6ce8f6
FB
579#endif
580
581/* invalidate one TB */
582static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
583 int next_offset)
584{
585 TranslationBlock *tb1;
586 for(;;) {
587 tb1 = *ptb;
588 if (tb1 == tb) {
589 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
590 break;
591 }
592 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
593 }
594}
595
9fa3e853
FB
596static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
597{
598 TranslationBlock *tb1;
599 unsigned int n1;
600
601 for(;;) {
602 tb1 = *ptb;
603 n1 = (long)tb1 & 3;
604 tb1 = (TranslationBlock *)((long)tb1 & ~3);
605 if (tb1 == tb) {
606 *ptb = tb1->page_next[n1];
607 break;
608 }
609 ptb = &tb1->page_next[n1];
610 }
611}
612
d4e8164f
FB
613static inline void tb_jmp_remove(TranslationBlock *tb, int n)
614{
615 TranslationBlock *tb1, **ptb;
616 unsigned int n1;
617
618 ptb = &tb->jmp_next[n];
619 tb1 = *ptb;
620 if (tb1) {
621 /* find tb(n) in circular list */
622 for(;;) {
623 tb1 = *ptb;
624 n1 = (long)tb1 & 3;
625 tb1 = (TranslationBlock *)((long)tb1 & ~3);
626 if (n1 == n && tb1 == tb)
627 break;
628 if (n1 == 2) {
629 ptb = &tb1->jmp_first;
630 } else {
631 ptb = &tb1->jmp_next[n1];
632 }
633 }
634 /* now we can suppress tb(n) from the list */
635 *ptb = tb->jmp_next[n];
636
637 tb->jmp_next[n] = NULL;
638 }
639}
640
641/* reset the jump entry 'n' of a TB so that it is not chained to
642 another TB */
643static inline void tb_reset_jump(TranslationBlock *tb, int n)
644{
645 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
646}
647
2e70f6ef 648void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 649{
6a00d601 650 CPUState *env;
8a40a180 651 PageDesc *p;
d4e8164f 652 unsigned int h, n1;
00f82b8a 653 target_phys_addr_t phys_pc;
8a40a180 654 TranslationBlock *tb1, *tb2;
3b46e624 655
8a40a180
FB
656 /* remove the TB from the hash list */
657 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
658 h = tb_phys_hash_func(phys_pc);
5fafdf24 659 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
660 offsetof(TranslationBlock, phys_hash_next));
661
662 /* remove the TB from the page list */
663 if (tb->page_addr[0] != page_addr) {
664 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
665 tb_page_remove(&p->first_tb, tb);
666 invalidate_page_bitmap(p);
667 }
668 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
669 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
670 tb_page_remove(&p->first_tb, tb);
671 invalidate_page_bitmap(p);
672 }
673
36bdbe54 674 tb_invalidated_flag = 1;
59817ccb 675
fd6ce8f6 676 /* remove the TB from the hash list */
8a40a180 677 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
678 for(env = first_cpu; env != NULL; env = env->next_cpu) {
679 if (env->tb_jmp_cache[h] == tb)
680 env->tb_jmp_cache[h] = NULL;
681 }
d4e8164f
FB
682
683 /* suppress this TB from the two jump lists */
684 tb_jmp_remove(tb, 0);
685 tb_jmp_remove(tb, 1);
686
687 /* suppress any remaining jumps to this TB */
688 tb1 = tb->jmp_first;
689 for(;;) {
690 n1 = (long)tb1 & 3;
691 if (n1 == 2)
692 break;
693 tb1 = (TranslationBlock *)((long)tb1 & ~3);
694 tb2 = tb1->jmp_next[n1];
695 tb_reset_jump(tb1, n1);
696 tb1->jmp_next[n1] = NULL;
697 tb1 = tb2;
698 }
699 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 700
e3db7226 701 tb_phys_invalidate_count++;
9fa3e853
FB
702}
703
704static inline void set_bits(uint8_t *tab, int start, int len)
705{
706 int end, mask, end1;
707
708 end = start + len;
709 tab += start >> 3;
710 mask = 0xff << (start & 7);
711 if ((start & ~7) == (end & ~7)) {
712 if (start < end) {
713 mask &= ~(0xff << (end & 7));
714 *tab |= mask;
715 }
716 } else {
717 *tab++ |= mask;
718 start = (start + 8) & ~7;
719 end1 = end & ~7;
720 while (start < end1) {
721 *tab++ = 0xff;
722 start += 8;
723 }
724 if (start < end) {
725 mask = ~(0xff << (end & 7));
726 *tab |= mask;
727 }
728 }
729}
730
731static void build_page_bitmap(PageDesc *p)
732{
733 int n, tb_start, tb_end;
734 TranslationBlock *tb;
3b46e624 735
b2a7081a 736 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
737 if (!p->code_bitmap)
738 return;
9fa3e853
FB
739
740 tb = p->first_tb;
741 while (tb != NULL) {
742 n = (long)tb & 3;
743 tb = (TranslationBlock *)((long)tb & ~3);
744 /* NOTE: this is subtle as a TB may span two physical pages */
745 if (n == 0) {
746 /* NOTE: tb_end may be after the end of the page, but
747 it is not a problem */
748 tb_start = tb->pc & ~TARGET_PAGE_MASK;
749 tb_end = tb_start + tb->size;
750 if (tb_end > TARGET_PAGE_SIZE)
751 tb_end = TARGET_PAGE_SIZE;
752 } else {
753 tb_start = 0;
754 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
755 }
756 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
757 tb = tb->page_next[n];
758 }
759}
760
2e70f6ef
PB
761TranslationBlock *tb_gen_code(CPUState *env,
762 target_ulong pc, target_ulong cs_base,
763 int flags, int cflags)
d720b93d
FB
764{
765 TranslationBlock *tb;
766 uint8_t *tc_ptr;
767 target_ulong phys_pc, phys_page2, virt_page2;
768 int code_gen_size;
769
c27004ec
FB
770 phys_pc = get_phys_addr_code(env, pc);
771 tb = tb_alloc(pc);
d720b93d
FB
772 if (!tb) {
773 /* flush must be done */
774 tb_flush(env);
775 /* cannot fail at this point */
c27004ec 776 tb = tb_alloc(pc);
2e70f6ef
PB
777 /* Don't forget to invalidate previous TB info. */
778 tb_invalidated_flag = 1;
d720b93d
FB
779 }
780 tc_ptr = code_gen_ptr;
781 tb->tc_ptr = tc_ptr;
782 tb->cs_base = cs_base;
783 tb->flags = flags;
784 tb->cflags = cflags;
d07bde88 785 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 786 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 787
d720b93d 788 /* check next page if needed */
c27004ec 789 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 790 phys_page2 = -1;
c27004ec 791 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
792 phys_page2 = get_phys_addr_code(env, virt_page2);
793 }
794 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 795 return tb;
d720b93d 796}
3b46e624 797
9fa3e853
FB
798/* invalidate all TBs which intersect with the target physical page
799 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
800 the same physical page. 'is_cpu_write_access' should be true if called
801 from a real cpu write access: the virtual CPU will exit the current
802 TB if code is modified inside this TB. */
00f82b8a 803void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
804 int is_cpu_write_access)
805{
806 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 807 CPUState *env = cpu_single_env;
9fa3e853 808 PageDesc *p;
ea1c1802 809 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 810 target_ulong tb_start, tb_end;
d720b93d 811 target_ulong current_pc, current_cs_base;
9fa3e853
FB
812
813 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 814 if (!p)
9fa3e853 815 return;
5fafdf24 816 if (!p->code_bitmap &&
d720b93d
FB
817 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
818 is_cpu_write_access) {
9fa3e853
FB
819 /* build code bitmap */
820 build_page_bitmap(p);
821 }
822
823 /* we remove all the TBs in the range [start, end[ */
824 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
825 current_tb_not_found = is_cpu_write_access;
826 current_tb_modified = 0;
827 current_tb = NULL; /* avoid warning */
828 current_pc = 0; /* avoid warning */
829 current_cs_base = 0; /* avoid warning */
830 current_flags = 0; /* avoid warning */
9fa3e853
FB
831 tb = p->first_tb;
832 while (tb != NULL) {
833 n = (long)tb & 3;
834 tb = (TranslationBlock *)((long)tb & ~3);
835 tb_next = tb->page_next[n];
836 /* NOTE: this is subtle as a TB may span two physical pages */
837 if (n == 0) {
838 /* NOTE: tb_end may be after the end of the page, but
839 it is not a problem */
840 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
841 tb_end = tb_start + tb->size;
842 } else {
843 tb_start = tb->page_addr[1];
844 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
845 }
846 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
847#ifdef TARGET_HAS_PRECISE_SMC
848 if (current_tb_not_found) {
849 current_tb_not_found = 0;
850 current_tb = NULL;
2e70f6ef 851 if (env->mem_io_pc) {
d720b93d 852 /* now we have a real cpu fault */
2e70f6ef 853 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
854 }
855 }
856 if (current_tb == tb &&
2e70f6ef 857 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
858 /* If we are modifying the current TB, we must stop
859 its execution. We could be more precise by checking
860 that the modification is after the current PC, but it
861 would require a specialized function to partially
862 restore the CPU state */
3b46e624 863
d720b93d 864 current_tb_modified = 1;
5fafdf24 865 cpu_restore_state(current_tb, env,
2e70f6ef 866 env->mem_io_pc, NULL);
d720b93d
FB
867#if defined(TARGET_I386)
868 current_flags = env->hflags;
869 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
870 current_cs_base = (target_ulong)env->segs[R_CS].base;
871 current_pc = current_cs_base + env->eip;
872#else
873#error unsupported CPU
874#endif
875 }
876#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
877 /* we need to do that to handle the case where a signal
878 occurs while doing tb_phys_invalidate() */
879 saved_tb = NULL;
880 if (env) {
881 saved_tb = env->current_tb;
882 env->current_tb = NULL;
883 }
9fa3e853 884 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
885 if (env) {
886 env->current_tb = saved_tb;
887 if (env->interrupt_request && env->current_tb)
888 cpu_interrupt(env, env->interrupt_request);
889 }
9fa3e853
FB
890 }
891 tb = tb_next;
892 }
893#if !defined(CONFIG_USER_ONLY)
894 /* if no code remaining, no need to continue to use slow writes */
895 if (!p->first_tb) {
896 invalidate_page_bitmap(p);
d720b93d 897 if (is_cpu_write_access) {
2e70f6ef 898 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
899 }
900 }
901#endif
902#ifdef TARGET_HAS_PRECISE_SMC
903 if (current_tb_modified) {
904 /* we generate a block containing just the instruction
905 modifying the memory. It will ensure that it cannot modify
906 itself */
ea1c1802 907 env->current_tb = NULL;
2e70f6ef 908 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 909 cpu_resume_from_signal(env, NULL);
9fa3e853 910 }
fd6ce8f6 911#endif
9fa3e853 912}
fd6ce8f6 913
9fa3e853 914/* len must be <= 8 and start must be a multiple of len */
00f82b8a 915static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
916{
917 PageDesc *p;
918 int offset, b;
59817ccb 919#if 0
a4193c8a
FB
920 if (1) {
921 if (loglevel) {
5fafdf24 922 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
2e70f6ef 923 cpu_single_env->mem_io_vaddr, len,
5fafdf24 924 cpu_single_env->eip,
a4193c8a
FB
925 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
926 }
59817ccb
FB
927 }
928#endif
9fa3e853 929 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 930 if (!p)
9fa3e853
FB
931 return;
932 if (p->code_bitmap) {
933 offset = start & ~TARGET_PAGE_MASK;
934 b = p->code_bitmap[offset >> 3] >> (offset & 7);
935 if (b & ((1 << len) - 1))
936 goto do_invalidate;
937 } else {
938 do_invalidate:
d720b93d 939 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
940 }
941}
942
9fa3e853 943#if !defined(CONFIG_SOFTMMU)
00f82b8a 944static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 945 unsigned long pc, void *puc)
9fa3e853 946{
d720b93d
FB
947 int n, current_flags, current_tb_modified;
948 target_ulong current_pc, current_cs_base;
9fa3e853 949 PageDesc *p;
d720b93d
FB
950 TranslationBlock *tb, *current_tb;
951#ifdef TARGET_HAS_PRECISE_SMC
952 CPUState *env = cpu_single_env;
953#endif
9fa3e853
FB
954
955 addr &= TARGET_PAGE_MASK;
956 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 957 if (!p)
9fa3e853
FB
958 return;
959 tb = p->first_tb;
d720b93d
FB
960 current_tb_modified = 0;
961 current_tb = NULL;
962 current_pc = 0; /* avoid warning */
963 current_cs_base = 0; /* avoid warning */
964 current_flags = 0; /* avoid warning */
965#ifdef TARGET_HAS_PRECISE_SMC
966 if (tb && pc != 0) {
967 current_tb = tb_find_pc(pc);
968 }
969#endif
9fa3e853
FB
970 while (tb != NULL) {
971 n = (long)tb & 3;
972 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
973#ifdef TARGET_HAS_PRECISE_SMC
974 if (current_tb == tb &&
2e70f6ef 975 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
976 /* If we are modifying the current TB, we must stop
977 its execution. We could be more precise by checking
978 that the modification is after the current PC, but it
979 would require a specialized function to partially
980 restore the CPU state */
3b46e624 981
d720b93d
FB
982 current_tb_modified = 1;
983 cpu_restore_state(current_tb, env, pc, puc);
984#if defined(TARGET_I386)
985 current_flags = env->hflags;
986 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
987 current_cs_base = (target_ulong)env->segs[R_CS].base;
988 current_pc = current_cs_base + env->eip;
989#else
990#error unsupported CPU
991#endif
992 }
993#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
994 tb_phys_invalidate(tb, addr);
995 tb = tb->page_next[n];
996 }
fd6ce8f6 997 p->first_tb = NULL;
d720b93d
FB
998#ifdef TARGET_HAS_PRECISE_SMC
999 if (current_tb_modified) {
1000 /* we generate a block containing just the instruction
1001 modifying the memory. It will ensure that it cannot modify
1002 itself */
ea1c1802 1003 env->current_tb = NULL;
2e70f6ef 1004 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1005 cpu_resume_from_signal(env, puc);
1006 }
1007#endif
fd6ce8f6 1008}
9fa3e853 1009#endif
fd6ce8f6
FB
1010
1011/* add the tb in the target page and protect it if necessary */
5fafdf24 1012static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1013 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1014{
1015 PageDesc *p;
9fa3e853
FB
1016 TranslationBlock *last_first_tb;
1017
1018 tb->page_addr[n] = page_addr;
3a7d929e 1019 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1020 tb->page_next[n] = p->first_tb;
1021 last_first_tb = p->first_tb;
1022 p->first_tb = (TranslationBlock *)((long)tb | n);
1023 invalidate_page_bitmap(p);
fd6ce8f6 1024
107db443 1025#if defined(TARGET_HAS_SMC) || 1
d720b93d 1026
9fa3e853 1027#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1028 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1029 target_ulong addr;
1030 PageDesc *p2;
9fa3e853
FB
1031 int prot;
1032
fd6ce8f6
FB
1033 /* force the host page as non writable (writes will have a
1034 page fault + mprotect overhead) */
53a5960a 1035 page_addr &= qemu_host_page_mask;
fd6ce8f6 1036 prot = 0;
53a5960a
PB
1037 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1038 addr += TARGET_PAGE_SIZE) {
1039
1040 p2 = page_find (addr >> TARGET_PAGE_BITS);
1041 if (!p2)
1042 continue;
1043 prot |= p2->flags;
1044 p2->flags &= ~PAGE_WRITE;
1045 page_get_flags(addr);
1046 }
5fafdf24 1047 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1048 (prot & PAGE_BITS) & ~PAGE_WRITE);
1049#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1050 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1051 page_addr);
fd6ce8f6 1052#endif
fd6ce8f6 1053 }
9fa3e853
FB
1054#else
1055 /* if some code is already present, then the pages are already
1056 protected. So we handle the case where only the first TB is
1057 allocated in a physical page */
1058 if (!last_first_tb) {
6a00d601 1059 tlb_protect_code(page_addr);
9fa3e853
FB
1060 }
1061#endif
d720b93d
FB
1062
1063#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1064}
1065
1066/* Allocate a new translation block. Flush the translation buffer if
1067 too many translation blocks or too much generated code. */
c27004ec 1068TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1069{
1070 TranslationBlock *tb;
fd6ce8f6 1071
26a5f13b
FB
1072 if (nb_tbs >= code_gen_max_blocks ||
1073 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1074 return NULL;
fd6ce8f6
FB
1075 tb = &tbs[nb_tbs++];
1076 tb->pc = pc;
b448f2f3 1077 tb->cflags = 0;
d4e8164f
FB
1078 return tb;
1079}
1080
2e70f6ef
PB
1081void tb_free(TranslationBlock *tb)
1082{
1083 /* In practice this is mostly used for single use temorary TB
1084 Ignore the hard cases and just back up if this TB happens to
1085 be the last one generated. */
1086 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1087 code_gen_ptr = tb->tc_ptr;
1088 nb_tbs--;
1089 }
1090}
1091
9fa3e853
FB
1092/* add a new TB and link it to the physical page tables. phys_page2 is
1093 (-1) to indicate that only one page contains the TB. */
5fafdf24 1094void tb_link_phys(TranslationBlock *tb,
9fa3e853 1095 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1096{
9fa3e853
FB
1097 unsigned int h;
1098 TranslationBlock **ptb;
1099
c8a706fe
PB
1100 /* Grab the mmap lock to stop another thread invalidating this TB
1101 before we are done. */
1102 mmap_lock();
9fa3e853
FB
1103 /* add in the physical hash table */
1104 h = tb_phys_hash_func(phys_pc);
1105 ptb = &tb_phys_hash[h];
1106 tb->phys_hash_next = *ptb;
1107 *ptb = tb;
fd6ce8f6
FB
1108
1109 /* add in the page list */
9fa3e853
FB
1110 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1111 if (phys_page2 != -1)
1112 tb_alloc_page(tb, 1, phys_page2);
1113 else
1114 tb->page_addr[1] = -1;
9fa3e853 1115
d4e8164f
FB
1116 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1117 tb->jmp_next[0] = NULL;
1118 tb->jmp_next[1] = NULL;
1119
1120 /* init original jump addresses */
1121 if (tb->tb_next_offset[0] != 0xffff)
1122 tb_reset_jump(tb, 0);
1123 if (tb->tb_next_offset[1] != 0xffff)
1124 tb_reset_jump(tb, 1);
8a40a180
FB
1125
1126#ifdef DEBUG_TB_CHECK
1127 tb_page_check();
1128#endif
c8a706fe 1129 mmap_unlock();
fd6ce8f6
FB
1130}
1131
9fa3e853
FB
1132/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1133 tb[1].tc_ptr. Return NULL if not found */
1134TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1135{
9fa3e853
FB
1136 int m_min, m_max, m;
1137 unsigned long v;
1138 TranslationBlock *tb;
a513fe19
FB
1139
1140 if (nb_tbs <= 0)
1141 return NULL;
1142 if (tc_ptr < (unsigned long)code_gen_buffer ||
1143 tc_ptr >= (unsigned long)code_gen_ptr)
1144 return NULL;
1145 /* binary search (cf Knuth) */
1146 m_min = 0;
1147 m_max = nb_tbs - 1;
1148 while (m_min <= m_max) {
1149 m = (m_min + m_max) >> 1;
1150 tb = &tbs[m];
1151 v = (unsigned long)tb->tc_ptr;
1152 if (v == tc_ptr)
1153 return tb;
1154 else if (tc_ptr < v) {
1155 m_max = m - 1;
1156 } else {
1157 m_min = m + 1;
1158 }
5fafdf24 1159 }
a513fe19
FB
1160 return &tbs[m_max];
1161}
7501267e 1162
ea041c0e
FB
1163static void tb_reset_jump_recursive(TranslationBlock *tb);
1164
1165static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1166{
1167 TranslationBlock *tb1, *tb_next, **ptb;
1168 unsigned int n1;
1169
1170 tb1 = tb->jmp_next[n];
1171 if (tb1 != NULL) {
1172 /* find head of list */
1173 for(;;) {
1174 n1 = (long)tb1 & 3;
1175 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1176 if (n1 == 2)
1177 break;
1178 tb1 = tb1->jmp_next[n1];
1179 }
1180 /* we are now sure now that tb jumps to tb1 */
1181 tb_next = tb1;
1182
1183 /* remove tb from the jmp_first list */
1184 ptb = &tb_next->jmp_first;
1185 for(;;) {
1186 tb1 = *ptb;
1187 n1 = (long)tb1 & 3;
1188 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1189 if (n1 == n && tb1 == tb)
1190 break;
1191 ptb = &tb1->jmp_next[n1];
1192 }
1193 *ptb = tb->jmp_next[n];
1194 tb->jmp_next[n] = NULL;
3b46e624 1195
ea041c0e
FB
1196 /* suppress the jump to next tb in generated code */
1197 tb_reset_jump(tb, n);
1198
0124311e 1199 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1200 tb_reset_jump_recursive(tb_next);
1201 }
1202}
1203
1204static void tb_reset_jump_recursive(TranslationBlock *tb)
1205{
1206 tb_reset_jump_recursive2(tb, 0);
1207 tb_reset_jump_recursive2(tb, 1);
1208}
1209
1fddef4b 1210#if defined(TARGET_HAS_ICE)
d720b93d
FB
1211static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1212{
9b3c35e0
JM
1213 target_phys_addr_t addr;
1214 target_ulong pd;
c2f07f81
PB
1215 ram_addr_t ram_addr;
1216 PhysPageDesc *p;
d720b93d 1217
c2f07f81
PB
1218 addr = cpu_get_phys_page_debug(env, pc);
1219 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1220 if (!p) {
1221 pd = IO_MEM_UNASSIGNED;
1222 } else {
1223 pd = p->phys_offset;
1224 }
1225 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1226 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1227}
c27004ec 1228#endif
d720b93d 1229
6658ffb8 1230/* Add a watchpoint. */
0f459d16 1231int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
6658ffb8
PB
1232{
1233 int i;
1234
1235 for (i = 0; i < env->nb_watchpoints; i++) {
1236 if (addr == env->watchpoint[i].vaddr)
1237 return 0;
1238 }
1239 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1240 return -1;
1241
1242 i = env->nb_watchpoints++;
1243 env->watchpoint[i].vaddr = addr;
0f459d16 1244 env->watchpoint[i].type = type;
6658ffb8
PB
1245 tlb_flush_page(env, addr);
1246 /* FIXME: This flush is needed because of the hack to make memory ops
1247 terminate the TB. It can be removed once the proper IO trap and
1248 re-execute bits are in. */
1249 tb_flush(env);
1250 return i;
1251}
1252
1253/* Remove a watchpoint. */
1254int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1255{
1256 int i;
1257
1258 for (i = 0; i < env->nb_watchpoints; i++) {
1259 if (addr == env->watchpoint[i].vaddr) {
1260 env->nb_watchpoints--;
1261 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1262 tlb_flush_page(env, addr);
1263 return 0;
1264 }
1265 }
1266 return -1;
1267}
1268
7d03f82f
EI
1269/* Remove all watchpoints. */
1270void cpu_watchpoint_remove_all(CPUState *env) {
1271 int i;
1272
1273 for (i = 0; i < env->nb_watchpoints; i++) {
1274 tlb_flush_page(env, env->watchpoint[i].vaddr);
1275 }
1276 env->nb_watchpoints = 0;
1277}
1278
c33a346e
FB
1279/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1280 breakpoint is reached */
2e12669a 1281int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1282{
1fddef4b 1283#if defined(TARGET_HAS_ICE)
4c3a88a2 1284 int i;
3b46e624 1285
4c3a88a2
FB
1286 for(i = 0; i < env->nb_breakpoints; i++) {
1287 if (env->breakpoints[i] == pc)
1288 return 0;
1289 }
1290
1291 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1292 return -1;
1293 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1294
d720b93d 1295 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1296 return 0;
1297#else
1298 return -1;
1299#endif
1300}
1301
7d03f82f
EI
1302/* remove all breakpoints */
1303void cpu_breakpoint_remove_all(CPUState *env) {
1304#if defined(TARGET_HAS_ICE)
1305 int i;
1306 for(i = 0; i < env->nb_breakpoints; i++) {
1307 breakpoint_invalidate(env, env->breakpoints[i]);
1308 }
1309 env->nb_breakpoints = 0;
1310#endif
1311}
1312
4c3a88a2 1313/* remove a breakpoint */
2e12669a 1314int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1315{
1fddef4b 1316#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1317 int i;
1318 for(i = 0; i < env->nb_breakpoints; i++) {
1319 if (env->breakpoints[i] == pc)
1320 goto found;
1321 }
1322 return -1;
1323 found:
4c3a88a2 1324 env->nb_breakpoints--;
1fddef4b
FB
1325 if (i < env->nb_breakpoints)
1326 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1327
1328 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1329 return 0;
1330#else
1331 return -1;
1332#endif
1333}
1334
c33a346e
FB
1335/* enable or disable single step mode. EXCP_DEBUG is returned by the
1336 CPU loop after each instruction */
1337void cpu_single_step(CPUState *env, int enabled)
1338{
1fddef4b 1339#if defined(TARGET_HAS_ICE)
c33a346e
FB
1340 if (env->singlestep_enabled != enabled) {
1341 env->singlestep_enabled = enabled;
1342 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1343 /* XXX: only flush what is necessary */
0124311e 1344 tb_flush(env);
c33a346e
FB
1345 }
1346#endif
1347}
1348
34865134
FB
1349/* enable or disable low levels log */
1350void cpu_set_log(int log_flags)
1351{
1352 loglevel = log_flags;
1353 if (loglevel && !logfile) {
11fcfab4 1354 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1355 if (!logfile) {
1356 perror(logfilename);
1357 _exit(1);
1358 }
9fa3e853
FB
1359#if !defined(CONFIG_SOFTMMU)
1360 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1361 {
1362 static uint8_t logfile_buf[4096];
1363 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1364 }
1365#else
34865134 1366 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1367#endif
e735b91c
PB
1368 log_append = 1;
1369 }
1370 if (!loglevel && logfile) {
1371 fclose(logfile);
1372 logfile = NULL;
34865134
FB
1373 }
1374}
1375
1376void cpu_set_log_filename(const char *filename)
1377{
1378 logfilename = strdup(filename);
e735b91c
PB
1379 if (logfile) {
1380 fclose(logfile);
1381 logfile = NULL;
1382 }
1383 cpu_set_log(loglevel);
34865134 1384}
c33a346e 1385
0124311e 1386/* mask must never be zero, except for A20 change call */
68a79315 1387void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1388{
d5975363 1389#if !defined(USE_NPTL)
ea041c0e 1390 TranslationBlock *tb;
15a51156 1391 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1392#endif
2e70f6ef 1393 int old_mask;
59817ccb 1394
2e70f6ef 1395 old_mask = env->interrupt_request;
d5975363
PB
1396 /* FIXME: This is probably not threadsafe. A different thread could
1397 be in the mittle of a read-modify-write operation. */
68a79315 1398 env->interrupt_request |= mask;
d5975363
PB
1399#if defined(USE_NPTL)
1400 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1401 problem and hope the cpu will stop of its own accord. For userspace
1402 emulation this often isn't actually as bad as it sounds. Often
1403 signals are used primarily to interrupt blocking syscalls. */
1404#else
2e70f6ef
PB
1405 if (use_icount) {
1406 env->icount_decr.u16.high = 0x8000;
1407#ifndef CONFIG_USER_ONLY
1408 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1409 an async event happened and we need to process it. */
1410 if (!can_do_io(env)
1411 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1412 cpu_abort(env, "Raised interrupt while not in I/O function");
1413 }
1414#endif
1415 } else {
1416 tb = env->current_tb;
1417 /* if the cpu is currently executing code, we must unlink it and
1418 all the potentially executing TB */
1419 if (tb && !testandset(&interrupt_lock)) {
1420 env->current_tb = NULL;
1421 tb_reset_jump_recursive(tb);
1422 resetlock(&interrupt_lock);
1423 }
ea041c0e 1424 }
d5975363 1425#endif
ea041c0e
FB
1426}
1427
b54ad049
FB
1428void cpu_reset_interrupt(CPUState *env, int mask)
1429{
1430 env->interrupt_request &= ~mask;
1431}
1432
f193c797 1433CPULogItem cpu_log_items[] = {
5fafdf24 1434 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1435 "show generated host assembly code for each compiled TB" },
1436 { CPU_LOG_TB_IN_ASM, "in_asm",
1437 "show target assembly code for each compiled TB" },
5fafdf24 1438 { CPU_LOG_TB_OP, "op",
57fec1fe 1439 "show micro ops for each compiled TB" },
f193c797 1440 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1441 "show micro ops "
1442#ifdef TARGET_I386
1443 "before eflags optimization and "
f193c797 1444#endif
e01a1157 1445 "after liveness analysis" },
f193c797
FB
1446 { CPU_LOG_INT, "int",
1447 "show interrupts/exceptions in short format" },
1448 { CPU_LOG_EXEC, "exec",
1449 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1450 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1451 "show CPU state before block translation" },
f193c797
FB
1452#ifdef TARGET_I386
1453 { CPU_LOG_PCALL, "pcall",
1454 "show protected mode far calls/returns/exceptions" },
1455#endif
8e3a9fd2 1456#ifdef DEBUG_IOPORT
fd872598
FB
1457 { CPU_LOG_IOPORT, "ioport",
1458 "show all i/o ports accesses" },
8e3a9fd2 1459#endif
f193c797
FB
1460 { 0, NULL, NULL },
1461};
1462
1463static int cmp1(const char *s1, int n, const char *s2)
1464{
1465 if (strlen(s2) != n)
1466 return 0;
1467 return memcmp(s1, s2, n) == 0;
1468}
3b46e624 1469
f193c797
FB
1470/* takes a comma separated list of log masks. Return 0 if error. */
1471int cpu_str_to_log_mask(const char *str)
1472{
1473 CPULogItem *item;
1474 int mask;
1475 const char *p, *p1;
1476
1477 p = str;
1478 mask = 0;
1479 for(;;) {
1480 p1 = strchr(p, ',');
1481 if (!p1)
1482 p1 = p + strlen(p);
8e3a9fd2
FB
1483 if(cmp1(p,p1-p,"all")) {
1484 for(item = cpu_log_items; item->mask != 0; item++) {
1485 mask |= item->mask;
1486 }
1487 } else {
f193c797
FB
1488 for(item = cpu_log_items; item->mask != 0; item++) {
1489 if (cmp1(p, p1 - p, item->name))
1490 goto found;
1491 }
1492 return 0;
8e3a9fd2 1493 }
f193c797
FB
1494 found:
1495 mask |= item->mask;
1496 if (*p1 != ',')
1497 break;
1498 p = p1 + 1;
1499 }
1500 return mask;
1501}
ea041c0e 1502
7501267e
FB
1503void cpu_abort(CPUState *env, const char *fmt, ...)
1504{
1505 va_list ap;
493ae1f0 1506 va_list ap2;
7501267e
FB
1507
1508 va_start(ap, fmt);
493ae1f0 1509 va_copy(ap2, ap);
7501267e
FB
1510 fprintf(stderr, "qemu: fatal: ");
1511 vfprintf(stderr, fmt, ap);
1512 fprintf(stderr, "\n");
1513#ifdef TARGET_I386
7fe48483
FB
1514 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1515#else
1516 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1517#endif
924edcae 1518 if (logfile) {
f9373291 1519 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1520 vfprintf(logfile, fmt, ap2);
f9373291
JM
1521 fprintf(logfile, "\n");
1522#ifdef TARGET_I386
1523 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1524#else
1525 cpu_dump_state(env, logfile, fprintf, 0);
1526#endif
924edcae
AZ
1527 fflush(logfile);
1528 fclose(logfile);
1529 }
493ae1f0 1530 va_end(ap2);
f9373291 1531 va_end(ap);
7501267e
FB
1532 abort();
1533}
1534
c5be9f08
TS
1535CPUState *cpu_copy(CPUState *env)
1536{
01ba9816 1537 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1538 /* preserve chaining and index */
1539 CPUState *next_cpu = new_env->next_cpu;
1540 int cpu_index = new_env->cpu_index;
1541 memcpy(new_env, env, sizeof(CPUState));
1542 new_env->next_cpu = next_cpu;
1543 new_env->cpu_index = cpu_index;
1544 return new_env;
1545}
1546
0124311e
FB
1547#if !defined(CONFIG_USER_ONLY)
1548
5c751e99
EI
1549static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1550{
1551 unsigned int i;
1552
1553 /* Discard jump cache entries for any tb which might potentially
1554 overlap the flushed page. */
1555 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1556 memset (&env->tb_jmp_cache[i], 0,
1557 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1558
1559 i = tb_jmp_cache_hash_page(addr);
1560 memset (&env->tb_jmp_cache[i], 0,
1561 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1562}
1563
ee8b7021
FB
1564/* NOTE: if flush_global is true, also flush global entries (not
1565 implemented yet) */
1566void tlb_flush(CPUState *env, int flush_global)
33417e70 1567{
33417e70 1568 int i;
0124311e 1569
9fa3e853
FB
1570#if defined(DEBUG_TLB)
1571 printf("tlb_flush:\n");
1572#endif
0124311e
FB
1573 /* must reset current TB so that interrupts cannot modify the
1574 links while we are modifying them */
1575 env->current_tb = NULL;
1576
33417e70 1577 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1578 env->tlb_table[0][i].addr_read = -1;
1579 env->tlb_table[0][i].addr_write = -1;
1580 env->tlb_table[0][i].addr_code = -1;
1581 env->tlb_table[1][i].addr_read = -1;
1582 env->tlb_table[1][i].addr_write = -1;
1583 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1584#if (NB_MMU_MODES >= 3)
1585 env->tlb_table[2][i].addr_read = -1;
1586 env->tlb_table[2][i].addr_write = -1;
1587 env->tlb_table[2][i].addr_code = -1;
1588#if (NB_MMU_MODES == 4)
1589 env->tlb_table[3][i].addr_read = -1;
1590 env->tlb_table[3][i].addr_write = -1;
1591 env->tlb_table[3][i].addr_code = -1;
1592#endif
1593#endif
33417e70 1594 }
9fa3e853 1595
8a40a180 1596 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1597
0a962c02
FB
1598#ifdef USE_KQEMU
1599 if (env->kqemu_enabled) {
1600 kqemu_flush(env, flush_global);
1601 }
9fa3e853 1602#endif
e3db7226 1603 tlb_flush_count++;
33417e70
FB
1604}
1605
274da6b2 1606static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1607{
5fafdf24 1608 if (addr == (tlb_entry->addr_read &
84b7b8e7 1609 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1610 addr == (tlb_entry->addr_write &
84b7b8e7 1611 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1612 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1613 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1614 tlb_entry->addr_read = -1;
1615 tlb_entry->addr_write = -1;
1616 tlb_entry->addr_code = -1;
1617 }
61382a50
FB
1618}
1619
2e12669a 1620void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1621{
8a40a180 1622 int i;
0124311e 1623
9fa3e853 1624#if defined(DEBUG_TLB)
108c49b8 1625 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1626#endif
0124311e
FB
1627 /* must reset current TB so that interrupts cannot modify the
1628 links while we are modifying them */
1629 env->current_tb = NULL;
61382a50
FB
1630
1631 addr &= TARGET_PAGE_MASK;
1632 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1633 tlb_flush_entry(&env->tlb_table[0][i], addr);
1634 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1635#if (NB_MMU_MODES >= 3)
1636 tlb_flush_entry(&env->tlb_table[2][i], addr);
1637#if (NB_MMU_MODES == 4)
1638 tlb_flush_entry(&env->tlb_table[3][i], addr);
1639#endif
1640#endif
0124311e 1641
5c751e99 1642 tlb_flush_jmp_cache(env, addr);
9fa3e853 1643
0a962c02
FB
1644#ifdef USE_KQEMU
1645 if (env->kqemu_enabled) {
1646 kqemu_flush_page(env, addr);
1647 }
1648#endif
9fa3e853
FB
1649}
1650
9fa3e853
FB
1651/* update the TLBs so that writes to code in the virtual page 'addr'
1652 can be detected */
6a00d601 1653static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1654{
5fafdf24 1655 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1656 ram_addr + TARGET_PAGE_SIZE,
1657 CODE_DIRTY_FLAG);
9fa3e853
FB
1658}
1659
9fa3e853 1660/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1661 tested for self modifying code */
5fafdf24 1662static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1663 target_ulong vaddr)
9fa3e853 1664{
3a7d929e 1665 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1666}
1667
5fafdf24 1668static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1669 unsigned long start, unsigned long length)
1670{
1671 unsigned long addr;
84b7b8e7
FB
1672 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1673 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1674 if ((addr - start) < length) {
0f459d16 1675 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1676 }
1677 }
1678}
1679
3a7d929e 1680void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1681 int dirty_flags)
1ccde1cb
FB
1682{
1683 CPUState *env;
4f2ac237 1684 unsigned long length, start1;
0a962c02
FB
1685 int i, mask, len;
1686 uint8_t *p;
1ccde1cb
FB
1687
1688 start &= TARGET_PAGE_MASK;
1689 end = TARGET_PAGE_ALIGN(end);
1690
1691 length = end - start;
1692 if (length == 0)
1693 return;
0a962c02 1694 len = length >> TARGET_PAGE_BITS;
3a7d929e 1695#ifdef USE_KQEMU
6a00d601
FB
1696 /* XXX: should not depend on cpu context */
1697 env = first_cpu;
3a7d929e 1698 if (env->kqemu_enabled) {
f23db169
FB
1699 ram_addr_t addr;
1700 addr = start;
1701 for(i = 0; i < len; i++) {
1702 kqemu_set_notdirty(env, addr);
1703 addr += TARGET_PAGE_SIZE;
1704 }
3a7d929e
FB
1705 }
1706#endif
f23db169
FB
1707 mask = ~dirty_flags;
1708 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1709 for(i = 0; i < len; i++)
1710 p[i] &= mask;
1711
1ccde1cb
FB
1712 /* we modify the TLB cache so that the dirty bit will be set again
1713 when accessing the range */
59817ccb 1714 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1715 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1716 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1717 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1718 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1719 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1720#if (NB_MMU_MODES >= 3)
1721 for(i = 0; i < CPU_TLB_SIZE; i++)
1722 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1723#if (NB_MMU_MODES == 4)
1724 for(i = 0; i < CPU_TLB_SIZE; i++)
1725 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1726#endif
1727#endif
6a00d601 1728 }
1ccde1cb
FB
1729}
1730
3a7d929e
FB
1731static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1732{
1733 ram_addr_t ram_addr;
1734
84b7b8e7 1735 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1736 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1737 tlb_entry->addend - (unsigned long)phys_ram_base;
1738 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1739 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1740 }
1741 }
1742}
1743
1744/* update the TLB according to the current state of the dirty bits */
1745void cpu_tlb_update_dirty(CPUState *env)
1746{
1747 int i;
1748 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1749 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1750 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1751 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1752#if (NB_MMU_MODES >= 3)
1753 for(i = 0; i < CPU_TLB_SIZE; i++)
1754 tlb_update_dirty(&env->tlb_table[2][i]);
1755#if (NB_MMU_MODES == 4)
1756 for(i = 0; i < CPU_TLB_SIZE; i++)
1757 tlb_update_dirty(&env->tlb_table[3][i]);
1758#endif
1759#endif
3a7d929e
FB
1760}
1761
0f459d16 1762static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1763{
0f459d16
PB
1764 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1765 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1766}
1767
0f459d16
PB
1768/* update the TLB corresponding to virtual page vaddr
1769 so that it is no longer dirty */
1770static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1771{
1ccde1cb
FB
1772 int i;
1773
0f459d16 1774 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1775 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1776 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1777 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1778#if (NB_MMU_MODES >= 3)
0f459d16 1779 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1780#if (NB_MMU_MODES == 4)
0f459d16 1781 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1782#endif
1783#endif
9fa3e853
FB
1784}
1785
59817ccb
FB
1786/* add a new TLB entry. At most one entry for a given virtual address
1787 is permitted. Return 0 if OK or 2 if the page could not be mapped
1788 (can only happen in non SOFTMMU mode for I/O pages or pages
1789 conflicting with the host address space). */
5fafdf24
TS
1790int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1791 target_phys_addr_t paddr, int prot,
6ebbf390 1792 int mmu_idx, int is_softmmu)
9fa3e853 1793{
92e873b9 1794 PhysPageDesc *p;
4f2ac237 1795 unsigned long pd;
9fa3e853 1796 unsigned int index;
4f2ac237 1797 target_ulong address;
0f459d16 1798 target_ulong code_address;
108c49b8 1799 target_phys_addr_t addend;
9fa3e853 1800 int ret;
84b7b8e7 1801 CPUTLBEntry *te;
6658ffb8 1802 int i;
0f459d16 1803 target_phys_addr_t iotlb;
9fa3e853 1804
92e873b9 1805 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1806 if (!p) {
1807 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1808 } else {
1809 pd = p->phys_offset;
9fa3e853
FB
1810 }
1811#if defined(DEBUG_TLB)
6ebbf390
JM
1812 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1813 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1814#endif
1815
1816 ret = 0;
0f459d16
PB
1817 address = vaddr;
1818 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1819 /* IO memory case (romd handled later) */
1820 address |= TLB_MMIO;
1821 }
1822 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1823 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1824 /* Normal RAM. */
1825 iotlb = pd & TARGET_PAGE_MASK;
1826 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1827 iotlb |= IO_MEM_NOTDIRTY;
1828 else
1829 iotlb |= IO_MEM_ROM;
1830 } else {
1831 /* IO handlers are currently passed a phsical address.
1832 It would be nice to pass an offset from the base address
1833 of that region. This would avoid having to special case RAM,
1834 and avoid full address decoding in every device.
1835 We can't use the high bits of pd for this because
1836 IO_MEM_ROMD uses these as a ram address. */
1837 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1838 }
1839
1840 code_address = address;
1841 /* Make accesses to pages with watchpoints go via the
1842 watchpoint trap routines. */
1843 for (i = 0; i < env->nb_watchpoints; i++) {
1844 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1845 iotlb = io_mem_watch + paddr;
1846 /* TODO: The memory case can be optimized by not trapping
1847 reads of pages with a write breakpoint. */
1848 address |= TLB_MMIO;
6658ffb8 1849 }
0f459d16 1850 }
d79acba4 1851
0f459d16
PB
1852 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1853 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1854 te = &env->tlb_table[mmu_idx][index];
1855 te->addend = addend - vaddr;
1856 if (prot & PAGE_READ) {
1857 te->addr_read = address;
1858 } else {
1859 te->addr_read = -1;
1860 }
5c751e99 1861
0f459d16
PB
1862 if (prot & PAGE_EXEC) {
1863 te->addr_code = code_address;
1864 } else {
1865 te->addr_code = -1;
1866 }
1867 if (prot & PAGE_WRITE) {
1868 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1869 (pd & IO_MEM_ROMD)) {
1870 /* Write access calls the I/O callback. */
1871 te->addr_write = address | TLB_MMIO;
1872 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1873 !cpu_physical_memory_is_dirty(pd)) {
1874 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 1875 } else {
0f459d16 1876 te->addr_write = address;
9fa3e853 1877 }
0f459d16
PB
1878 } else {
1879 te->addr_write = -1;
9fa3e853 1880 }
9fa3e853
FB
1881 return ret;
1882}
1883
0124311e
FB
1884#else
1885
ee8b7021 1886void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1887{
1888}
1889
2e12669a 1890void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1891{
1892}
1893
5fafdf24
TS
1894int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1895 target_phys_addr_t paddr, int prot,
6ebbf390 1896 int mmu_idx, int is_softmmu)
9fa3e853
FB
1897{
1898 return 0;
1899}
0124311e 1900
9fa3e853
FB
1901/* dump memory mappings */
1902void page_dump(FILE *f)
33417e70 1903{
9fa3e853
FB
1904 unsigned long start, end;
1905 int i, j, prot, prot1;
1906 PageDesc *p;
33417e70 1907
9fa3e853
FB
1908 fprintf(f, "%-8s %-8s %-8s %s\n",
1909 "start", "end", "size", "prot");
1910 start = -1;
1911 end = -1;
1912 prot = 0;
1913 for(i = 0; i <= L1_SIZE; i++) {
1914 if (i < L1_SIZE)
1915 p = l1_map[i];
1916 else
1917 p = NULL;
1918 for(j = 0;j < L2_SIZE; j++) {
1919 if (!p)
1920 prot1 = 0;
1921 else
1922 prot1 = p[j].flags;
1923 if (prot1 != prot) {
1924 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1925 if (start != -1) {
1926 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1927 start, end, end - start,
9fa3e853
FB
1928 prot & PAGE_READ ? 'r' : '-',
1929 prot & PAGE_WRITE ? 'w' : '-',
1930 prot & PAGE_EXEC ? 'x' : '-');
1931 }
1932 if (prot1 != 0)
1933 start = end;
1934 else
1935 start = -1;
1936 prot = prot1;
1937 }
1938 if (!p)
1939 break;
1940 }
33417e70 1941 }
33417e70
FB
1942}
1943
53a5960a 1944int page_get_flags(target_ulong address)
33417e70 1945{
9fa3e853
FB
1946 PageDesc *p;
1947
1948 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1949 if (!p)
9fa3e853
FB
1950 return 0;
1951 return p->flags;
1952}
1953
1954/* modify the flags of a page and invalidate the code if
1955 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1956 depending on PAGE_WRITE */
53a5960a 1957void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1958{
1959 PageDesc *p;
53a5960a 1960 target_ulong addr;
9fa3e853 1961
c8a706fe 1962 /* mmap_lock should already be held. */
9fa3e853
FB
1963 start = start & TARGET_PAGE_MASK;
1964 end = TARGET_PAGE_ALIGN(end);
1965 if (flags & PAGE_WRITE)
1966 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
1967 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1968 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
1969 /* We may be called for host regions that are outside guest
1970 address space. */
1971 if (!p)
1972 return;
9fa3e853
FB
1973 /* if the write protection is set, then we invalidate the code
1974 inside */
5fafdf24 1975 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
1976 (flags & PAGE_WRITE) &&
1977 p->first_tb) {
d720b93d 1978 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1979 }
1980 p->flags = flags;
1981 }
33417e70
FB
1982}
1983
3d97b40b
TS
1984int page_check_range(target_ulong start, target_ulong len, int flags)
1985{
1986 PageDesc *p;
1987 target_ulong end;
1988 target_ulong addr;
1989
1990 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1991 start = start & TARGET_PAGE_MASK;
1992
1993 if( end < start )
1994 /* we've wrapped around */
1995 return -1;
1996 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1997 p = page_find(addr >> TARGET_PAGE_BITS);
1998 if( !p )
1999 return -1;
2000 if( !(p->flags & PAGE_VALID) )
2001 return -1;
2002
dae3270c 2003 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2004 return -1;
dae3270c
FB
2005 if (flags & PAGE_WRITE) {
2006 if (!(p->flags & PAGE_WRITE_ORG))
2007 return -1;
2008 /* unprotect the page if it was put read-only because it
2009 contains translated code */
2010 if (!(p->flags & PAGE_WRITE)) {
2011 if (!page_unprotect(addr, 0, NULL))
2012 return -1;
2013 }
2014 return 0;
2015 }
3d97b40b
TS
2016 }
2017 return 0;
2018}
2019
9fa3e853
FB
2020/* called from signal handler: invalidate the code and unprotect the
2021 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2022int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2023{
2024 unsigned int page_index, prot, pindex;
2025 PageDesc *p, *p1;
53a5960a 2026 target_ulong host_start, host_end, addr;
9fa3e853 2027
c8a706fe
PB
2028 /* Technically this isn't safe inside a signal handler. However we
2029 know this only ever happens in a synchronous SEGV handler, so in
2030 practice it seems to be ok. */
2031 mmap_lock();
2032
83fb7adf 2033 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2034 page_index = host_start >> TARGET_PAGE_BITS;
2035 p1 = page_find(page_index);
c8a706fe
PB
2036 if (!p1) {
2037 mmap_unlock();
9fa3e853 2038 return 0;
c8a706fe 2039 }
83fb7adf 2040 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2041 p = p1;
2042 prot = 0;
2043 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2044 prot |= p->flags;
2045 p++;
2046 }
2047 /* if the page was really writable, then we change its
2048 protection back to writable */
2049 if (prot & PAGE_WRITE_ORG) {
2050 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2051 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2052 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2053 (prot & PAGE_BITS) | PAGE_WRITE);
2054 p1[pindex].flags |= PAGE_WRITE;
2055 /* and since the content will be modified, we must invalidate
2056 the corresponding translated code. */
d720b93d 2057 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2058#ifdef DEBUG_TB_CHECK
2059 tb_invalidate_check(address);
2060#endif
c8a706fe 2061 mmap_unlock();
9fa3e853
FB
2062 return 1;
2063 }
2064 }
c8a706fe 2065 mmap_unlock();
9fa3e853
FB
2066 return 0;
2067}
2068
6a00d601
FB
2069static inline void tlb_set_dirty(CPUState *env,
2070 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2071{
2072}
9fa3e853
FB
2073#endif /* defined(CONFIG_USER_ONLY) */
2074
e2eef170 2075#if !defined(CONFIG_USER_ONLY)
db7b5426 2076static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2077 ram_addr_t memory);
2078static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2079 ram_addr_t orig_memory);
db7b5426
BS
2080#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2081 need_subpage) \
2082 do { \
2083 if (addr > start_addr) \
2084 start_addr2 = 0; \
2085 else { \
2086 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2087 if (start_addr2 > 0) \
2088 need_subpage = 1; \
2089 } \
2090 \
49e9fba2 2091 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2092 end_addr2 = TARGET_PAGE_SIZE - 1; \
2093 else { \
2094 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2095 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2096 need_subpage = 1; \
2097 } \
2098 } while (0)
2099
33417e70
FB
2100/* register physical memory. 'size' must be a multiple of the target
2101 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2102 io memory page */
5fafdf24 2103void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2104 ram_addr_t size,
2105 ram_addr_t phys_offset)
33417e70 2106{
108c49b8 2107 target_phys_addr_t addr, end_addr;
92e873b9 2108 PhysPageDesc *p;
9d42037b 2109 CPUState *env;
00f82b8a 2110 ram_addr_t orig_size = size;
db7b5426 2111 void *subpage;
33417e70 2112
da260249
FB
2113#ifdef USE_KQEMU
2114 /* XXX: should not depend on cpu context */
2115 env = first_cpu;
2116 if (env->kqemu_enabled) {
2117 kqemu_set_phys_mem(start_addr, size, phys_offset);
2118 }
2119#endif
5fd386f6 2120 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2121 end_addr = start_addr + (target_phys_addr_t)size;
2122 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2123 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2124 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2125 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2126 target_phys_addr_t start_addr2, end_addr2;
2127 int need_subpage = 0;
2128
2129 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2130 need_subpage);
4254fab8 2131 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2132 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2133 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2134 &p->phys_offset, orig_memory);
2135 } else {
2136 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2137 >> IO_MEM_SHIFT];
2138 }
2139 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2140 } else {
2141 p->phys_offset = phys_offset;
2142 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2143 (phys_offset & IO_MEM_ROMD))
2144 phys_offset += TARGET_PAGE_SIZE;
2145 }
2146 } else {
2147 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2148 p->phys_offset = phys_offset;
2149 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2150 (phys_offset & IO_MEM_ROMD))
2151 phys_offset += TARGET_PAGE_SIZE;
2152 else {
2153 target_phys_addr_t start_addr2, end_addr2;
2154 int need_subpage = 0;
2155
2156 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2157 end_addr2, need_subpage);
2158
4254fab8 2159 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2160 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2161 &p->phys_offset, IO_MEM_UNASSIGNED);
2162 subpage_register(subpage, start_addr2, end_addr2,
2163 phys_offset);
2164 }
2165 }
2166 }
33417e70 2167 }
3b46e624 2168
9d42037b
FB
2169 /* since each CPU stores ram addresses in its TLB cache, we must
2170 reset the modified entries */
2171 /* XXX: slow ! */
2172 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2173 tlb_flush(env, 1);
2174 }
33417e70
FB
2175}
2176
ba863458 2177/* XXX: temporary until new memory mapping API */
00f82b8a 2178ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2179{
2180 PhysPageDesc *p;
2181
2182 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2183 if (!p)
2184 return IO_MEM_UNASSIGNED;
2185 return p->phys_offset;
2186}
2187
e9a1ab19 2188/* XXX: better than nothing */
00f82b8a 2189ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2190{
2191 ram_addr_t addr;
7fb4fdcf 2192 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
ed441467
FB
2193 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2194 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2195 abort();
2196 }
2197 addr = phys_ram_alloc_offset;
2198 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2199 return addr;
2200}
2201
2202void qemu_ram_free(ram_addr_t addr)
2203{
2204}
2205
a4193c8a 2206static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2207{
67d3b957 2208#ifdef DEBUG_UNASSIGNED
ab3d1727 2209 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2210#endif
2211#ifdef TARGET_SPARC
6c36d3fa 2212 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2213#elif TARGET_CRIS
2214 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2215#endif
33417e70
FB
2216 return 0;
2217}
2218
a4193c8a 2219static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2220{
67d3b957 2221#ifdef DEBUG_UNASSIGNED
ab3d1727 2222 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2223#endif
b4f0a316 2224#ifdef TARGET_SPARC
6c36d3fa 2225 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2226#elif TARGET_CRIS
2227 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2228#endif
33417e70
FB
2229}
2230
2231static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2232 unassigned_mem_readb,
2233 unassigned_mem_readb,
2234 unassigned_mem_readb,
2235};
2236
2237static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2238 unassigned_mem_writeb,
2239 unassigned_mem_writeb,
2240 unassigned_mem_writeb,
2241};
2242
0f459d16
PB
2243static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2244 uint32_t val)
9fa3e853 2245{
3a7d929e 2246 int dirty_flags;
3a7d929e
FB
2247 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2248 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2249#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2250 tb_invalidate_phys_page_fast(ram_addr, 1);
2251 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2252#endif
3a7d929e 2253 }
0f459d16 2254 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2255#ifdef USE_KQEMU
2256 if (cpu_single_env->kqemu_enabled &&
2257 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2258 kqemu_modify_page(cpu_single_env, ram_addr);
2259#endif
f23db169
FB
2260 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2261 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2262 /* we remove the notdirty callback only if the code has been
2263 flushed */
2264 if (dirty_flags == 0xff)
2e70f6ef 2265 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2266}
2267
0f459d16
PB
2268static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2269 uint32_t val)
9fa3e853 2270{
3a7d929e 2271 int dirty_flags;
3a7d929e
FB
2272 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2273 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2274#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2275 tb_invalidate_phys_page_fast(ram_addr, 2);
2276 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2277#endif
3a7d929e 2278 }
0f459d16 2279 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2280#ifdef USE_KQEMU
2281 if (cpu_single_env->kqemu_enabled &&
2282 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2283 kqemu_modify_page(cpu_single_env, ram_addr);
2284#endif
f23db169
FB
2285 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2286 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2287 /* we remove the notdirty callback only if the code has been
2288 flushed */
2289 if (dirty_flags == 0xff)
2e70f6ef 2290 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2291}
2292
0f459d16
PB
2293static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2294 uint32_t val)
9fa3e853 2295{
3a7d929e 2296 int dirty_flags;
3a7d929e
FB
2297 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2298 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2299#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2300 tb_invalidate_phys_page_fast(ram_addr, 4);
2301 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2302#endif
3a7d929e 2303 }
0f459d16 2304 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2305#ifdef USE_KQEMU
2306 if (cpu_single_env->kqemu_enabled &&
2307 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2308 kqemu_modify_page(cpu_single_env, ram_addr);
2309#endif
f23db169
FB
2310 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2311 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2312 /* we remove the notdirty callback only if the code has been
2313 flushed */
2314 if (dirty_flags == 0xff)
2e70f6ef 2315 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2316}
2317
3a7d929e 2318static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2319 NULL, /* never used */
2320 NULL, /* never used */
2321 NULL, /* never used */
2322};
2323
1ccde1cb
FB
2324static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2325 notdirty_mem_writeb,
2326 notdirty_mem_writew,
2327 notdirty_mem_writel,
2328};
2329
0f459d16
PB
2330/* Generate a debug exception if a watchpoint has been hit. */
2331static void check_watchpoint(int offset, int flags)
2332{
2333 CPUState *env = cpu_single_env;
2334 target_ulong vaddr;
2335 int i;
2336
2e70f6ef 2337 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
0f459d16
PB
2338 for (i = 0; i < env->nb_watchpoints; i++) {
2339 if (vaddr == env->watchpoint[i].vaddr
2340 && (env->watchpoint[i].type & flags)) {
2341 env->watchpoint_hit = i + 1;
2342 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2343 break;
2344 }
2345 }
2346}
2347
6658ffb8
PB
2348/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2349 so these check for a hit then pass through to the normal out-of-line
2350 phys routines. */
2351static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2352{
0f459d16 2353 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2354 return ldub_phys(addr);
2355}
2356
2357static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2358{
0f459d16 2359 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2360 return lduw_phys(addr);
2361}
2362
2363static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2364{
0f459d16 2365 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2366 return ldl_phys(addr);
2367}
2368
6658ffb8
PB
2369static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2370 uint32_t val)
2371{
0f459d16 2372 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2373 stb_phys(addr, val);
2374}
2375
2376static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2377 uint32_t val)
2378{
0f459d16 2379 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2380 stw_phys(addr, val);
2381}
2382
2383static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2384 uint32_t val)
2385{
0f459d16 2386 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2387 stl_phys(addr, val);
2388}
2389
2390static CPUReadMemoryFunc *watch_mem_read[3] = {
2391 watch_mem_readb,
2392 watch_mem_readw,
2393 watch_mem_readl,
2394};
2395
2396static CPUWriteMemoryFunc *watch_mem_write[3] = {
2397 watch_mem_writeb,
2398 watch_mem_writew,
2399 watch_mem_writel,
2400};
6658ffb8 2401
db7b5426
BS
2402static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2403 unsigned int len)
2404{
db7b5426
BS
2405 uint32_t ret;
2406 unsigned int idx;
2407
2408 idx = SUBPAGE_IDX(addr - mmio->base);
2409#if defined(DEBUG_SUBPAGE)
2410 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2411 mmio, len, addr, idx);
2412#endif
3ee89922 2413 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2414
2415 return ret;
2416}
2417
2418static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2419 uint32_t value, unsigned int len)
2420{
db7b5426
BS
2421 unsigned int idx;
2422
2423 idx = SUBPAGE_IDX(addr - mmio->base);
2424#if defined(DEBUG_SUBPAGE)
2425 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2426 mmio, len, addr, idx, value);
2427#endif
3ee89922 2428 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2429}
2430
2431static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2432{
2433#if defined(DEBUG_SUBPAGE)
2434 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2435#endif
2436
2437 return subpage_readlen(opaque, addr, 0);
2438}
2439
2440static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2441 uint32_t value)
2442{
2443#if defined(DEBUG_SUBPAGE)
2444 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2445#endif
2446 subpage_writelen(opaque, addr, value, 0);
2447}
2448
2449static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2450{
2451#if defined(DEBUG_SUBPAGE)
2452 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2453#endif
2454
2455 return subpage_readlen(opaque, addr, 1);
2456}
2457
2458static void subpage_writew (void *opaque, target_phys_addr_t addr,
2459 uint32_t value)
2460{
2461#if defined(DEBUG_SUBPAGE)
2462 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2463#endif
2464 subpage_writelen(opaque, addr, value, 1);
2465}
2466
2467static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2468{
2469#if defined(DEBUG_SUBPAGE)
2470 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2471#endif
2472
2473 return subpage_readlen(opaque, addr, 2);
2474}
2475
2476static void subpage_writel (void *opaque,
2477 target_phys_addr_t addr, uint32_t value)
2478{
2479#if defined(DEBUG_SUBPAGE)
2480 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2481#endif
2482 subpage_writelen(opaque, addr, value, 2);
2483}
2484
2485static CPUReadMemoryFunc *subpage_read[] = {
2486 &subpage_readb,
2487 &subpage_readw,
2488 &subpage_readl,
2489};
2490
2491static CPUWriteMemoryFunc *subpage_write[] = {
2492 &subpage_writeb,
2493 &subpage_writew,
2494 &subpage_writel,
2495};
2496
2497static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2498 ram_addr_t memory)
db7b5426
BS
2499{
2500 int idx, eidx;
4254fab8 2501 unsigned int i;
db7b5426
BS
2502
2503 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2504 return -1;
2505 idx = SUBPAGE_IDX(start);
2506 eidx = SUBPAGE_IDX(end);
2507#if defined(DEBUG_SUBPAGE)
2508 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2509 mmio, start, end, idx, eidx, memory);
2510#endif
2511 memory >>= IO_MEM_SHIFT;
2512 for (; idx <= eidx; idx++) {
4254fab8 2513 for (i = 0; i < 4; i++) {
3ee89922
BS
2514 if (io_mem_read[memory][i]) {
2515 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2516 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2517 }
2518 if (io_mem_write[memory][i]) {
2519 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2520 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2521 }
4254fab8 2522 }
db7b5426
BS
2523 }
2524
2525 return 0;
2526}
2527
00f82b8a
AJ
2528static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2529 ram_addr_t orig_memory)
db7b5426
BS
2530{
2531 subpage_t *mmio;
2532 int subpage_memory;
2533
2534 mmio = qemu_mallocz(sizeof(subpage_t));
2535 if (mmio != NULL) {
2536 mmio->base = base;
2537 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2538#if defined(DEBUG_SUBPAGE)
2539 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2540 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2541#endif
2542 *phys = subpage_memory | IO_MEM_SUBPAGE;
2543 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2544 }
2545
2546 return mmio;
2547}
2548
33417e70
FB
2549static void io_mem_init(void)
2550{
3a7d929e 2551 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2552 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2553 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2554 io_mem_nb = 5;
2555
0f459d16 2556 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2557 watch_mem_write, NULL);
1ccde1cb 2558 /* alloc dirty bits array */
0a962c02 2559 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2560 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2561}
2562
2563/* mem_read and mem_write are arrays of functions containing the
2564 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2565 2). Functions can be omitted with a NULL function pointer. The
2566 registered functions may be modified dynamically later.
2567 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2568 modified. If it is zero, a new io zone is allocated. The return
2569 value can be used with cpu_register_physical_memory(). (-1) is
2570 returned if error. */
33417e70
FB
2571int cpu_register_io_memory(int io_index,
2572 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2573 CPUWriteMemoryFunc **mem_write,
2574 void *opaque)
33417e70 2575{
4254fab8 2576 int i, subwidth = 0;
33417e70
FB
2577
2578 if (io_index <= 0) {
b5ff1b31 2579 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2580 return -1;
2581 io_index = io_mem_nb++;
2582 } else {
2583 if (io_index >= IO_MEM_NB_ENTRIES)
2584 return -1;
2585 }
b5ff1b31 2586
33417e70 2587 for(i = 0;i < 3; i++) {
4254fab8
BS
2588 if (!mem_read[i] || !mem_write[i])
2589 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2590 io_mem_read[io_index][i] = mem_read[i];
2591 io_mem_write[io_index][i] = mem_write[i];
2592 }
a4193c8a 2593 io_mem_opaque[io_index] = opaque;
4254fab8 2594 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2595}
61382a50 2596
8926b517
FB
2597CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2598{
2599 return io_mem_write[io_index >> IO_MEM_SHIFT];
2600}
2601
2602CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2603{
2604 return io_mem_read[io_index >> IO_MEM_SHIFT];
2605}
2606
e2eef170
PB
2607#endif /* !defined(CONFIG_USER_ONLY) */
2608
13eb76e0
FB
2609/* physical memory access (slow version, mainly for debug) */
2610#if defined(CONFIG_USER_ONLY)
5fafdf24 2611void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2612 int len, int is_write)
2613{
2614 int l, flags;
2615 target_ulong page;
53a5960a 2616 void * p;
13eb76e0
FB
2617
2618 while (len > 0) {
2619 page = addr & TARGET_PAGE_MASK;
2620 l = (page + TARGET_PAGE_SIZE) - addr;
2621 if (l > len)
2622 l = len;
2623 flags = page_get_flags(page);
2624 if (!(flags & PAGE_VALID))
2625 return;
2626 if (is_write) {
2627 if (!(flags & PAGE_WRITE))
2628 return;
579a97f7 2629 /* XXX: this code should not depend on lock_user */
72fb7daa 2630 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2631 /* FIXME - should this return an error rather than just fail? */
2632 return;
72fb7daa
AJ
2633 memcpy(p, buf, l);
2634 unlock_user(p, addr, l);
13eb76e0
FB
2635 } else {
2636 if (!(flags & PAGE_READ))
2637 return;
579a97f7 2638 /* XXX: this code should not depend on lock_user */
72fb7daa 2639 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2640 /* FIXME - should this return an error rather than just fail? */
2641 return;
72fb7daa 2642 memcpy(buf, p, l);
5b257578 2643 unlock_user(p, addr, 0);
13eb76e0
FB
2644 }
2645 len -= l;
2646 buf += l;
2647 addr += l;
2648 }
2649}
8df1cd07 2650
13eb76e0 2651#else
5fafdf24 2652void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2653 int len, int is_write)
2654{
2655 int l, io_index;
2656 uint8_t *ptr;
2657 uint32_t val;
2e12669a
FB
2658 target_phys_addr_t page;
2659 unsigned long pd;
92e873b9 2660 PhysPageDesc *p;
3b46e624 2661
13eb76e0
FB
2662 while (len > 0) {
2663 page = addr & TARGET_PAGE_MASK;
2664 l = (page + TARGET_PAGE_SIZE) - addr;
2665 if (l > len)
2666 l = len;
92e873b9 2667 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2668 if (!p) {
2669 pd = IO_MEM_UNASSIGNED;
2670 } else {
2671 pd = p->phys_offset;
2672 }
3b46e624 2673
13eb76e0 2674 if (is_write) {
3a7d929e 2675 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2676 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2677 /* XXX: could force cpu_single_env to NULL to avoid
2678 potential bugs */
13eb76e0 2679 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2680 /* 32 bit write access */
c27004ec 2681 val = ldl_p(buf);
a4193c8a 2682 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2683 l = 4;
2684 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2685 /* 16 bit write access */
c27004ec 2686 val = lduw_p(buf);
a4193c8a 2687 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2688 l = 2;
2689 } else {
1c213d19 2690 /* 8 bit write access */
c27004ec 2691 val = ldub_p(buf);
a4193c8a 2692 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2693 l = 1;
2694 }
2695 } else {
b448f2f3
FB
2696 unsigned long addr1;
2697 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2698 /* RAM case */
b448f2f3 2699 ptr = phys_ram_base + addr1;
13eb76e0 2700 memcpy(ptr, buf, l);
3a7d929e
FB
2701 if (!cpu_physical_memory_is_dirty(addr1)) {
2702 /* invalidate code */
2703 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2704 /* set dirty bit */
5fafdf24 2705 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2706 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2707 }
13eb76e0
FB
2708 }
2709 } else {
5fafdf24 2710 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2711 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2712 /* I/O case */
2713 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2714 if (l >= 4 && ((addr & 3) == 0)) {
2715 /* 32 bit read access */
a4193c8a 2716 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2717 stl_p(buf, val);
13eb76e0
FB
2718 l = 4;
2719 } else if (l >= 2 && ((addr & 1) == 0)) {
2720 /* 16 bit read access */
a4193c8a 2721 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2722 stw_p(buf, val);
13eb76e0
FB
2723 l = 2;
2724 } else {
1c213d19 2725 /* 8 bit read access */
a4193c8a 2726 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2727 stb_p(buf, val);
13eb76e0
FB
2728 l = 1;
2729 }
2730 } else {
2731 /* RAM case */
5fafdf24 2732 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2733 (addr & ~TARGET_PAGE_MASK);
2734 memcpy(buf, ptr, l);
2735 }
2736 }
2737 len -= l;
2738 buf += l;
2739 addr += l;
2740 }
2741}
8df1cd07 2742
d0ecd2aa 2743/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2744void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2745 const uint8_t *buf, int len)
2746{
2747 int l;
2748 uint8_t *ptr;
2749 target_phys_addr_t page;
2750 unsigned long pd;
2751 PhysPageDesc *p;
3b46e624 2752
d0ecd2aa
FB
2753 while (len > 0) {
2754 page = addr & TARGET_PAGE_MASK;
2755 l = (page + TARGET_PAGE_SIZE) - addr;
2756 if (l > len)
2757 l = len;
2758 p = phys_page_find(page >> TARGET_PAGE_BITS);
2759 if (!p) {
2760 pd = IO_MEM_UNASSIGNED;
2761 } else {
2762 pd = p->phys_offset;
2763 }
3b46e624 2764
d0ecd2aa 2765 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2766 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2767 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2768 /* do nothing */
2769 } else {
2770 unsigned long addr1;
2771 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2772 /* ROM/RAM case */
2773 ptr = phys_ram_base + addr1;
2774 memcpy(ptr, buf, l);
2775 }
2776 len -= l;
2777 buf += l;
2778 addr += l;
2779 }
2780}
2781
2782
8df1cd07
FB
2783/* warning: addr must be aligned */
2784uint32_t ldl_phys(target_phys_addr_t addr)
2785{
2786 int io_index;
2787 uint8_t *ptr;
2788 uint32_t val;
2789 unsigned long pd;
2790 PhysPageDesc *p;
2791
2792 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2793 if (!p) {
2794 pd = IO_MEM_UNASSIGNED;
2795 } else {
2796 pd = p->phys_offset;
2797 }
3b46e624 2798
5fafdf24 2799 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2800 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2801 /* I/O case */
2802 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2803 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2804 } else {
2805 /* RAM case */
5fafdf24 2806 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2807 (addr & ~TARGET_PAGE_MASK);
2808 val = ldl_p(ptr);
2809 }
2810 return val;
2811}
2812
84b7b8e7
FB
2813/* warning: addr must be aligned */
2814uint64_t ldq_phys(target_phys_addr_t addr)
2815{
2816 int io_index;
2817 uint8_t *ptr;
2818 uint64_t val;
2819 unsigned long pd;
2820 PhysPageDesc *p;
2821
2822 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2823 if (!p) {
2824 pd = IO_MEM_UNASSIGNED;
2825 } else {
2826 pd = p->phys_offset;
2827 }
3b46e624 2828
2a4188a3
FB
2829 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2830 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2831 /* I/O case */
2832 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2833#ifdef TARGET_WORDS_BIGENDIAN
2834 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2835 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2836#else
2837 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2838 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2839#endif
2840 } else {
2841 /* RAM case */
5fafdf24 2842 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2843 (addr & ~TARGET_PAGE_MASK);
2844 val = ldq_p(ptr);
2845 }
2846 return val;
2847}
2848
aab33094
FB
2849/* XXX: optimize */
2850uint32_t ldub_phys(target_phys_addr_t addr)
2851{
2852 uint8_t val;
2853 cpu_physical_memory_read(addr, &val, 1);
2854 return val;
2855}
2856
2857/* XXX: optimize */
2858uint32_t lduw_phys(target_phys_addr_t addr)
2859{
2860 uint16_t val;
2861 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2862 return tswap16(val);
2863}
2864
8df1cd07
FB
2865/* warning: addr must be aligned. The ram page is not masked as dirty
2866 and the code inside is not invalidated. It is useful if the dirty
2867 bits are used to track modified PTEs */
2868void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2869{
2870 int io_index;
2871 uint8_t *ptr;
2872 unsigned long pd;
2873 PhysPageDesc *p;
2874
2875 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2876 if (!p) {
2877 pd = IO_MEM_UNASSIGNED;
2878 } else {
2879 pd = p->phys_offset;
2880 }
3b46e624 2881
3a7d929e 2882 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2883 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2884 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2885 } else {
5fafdf24 2886 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2887 (addr & ~TARGET_PAGE_MASK);
2888 stl_p(ptr, val);
2889 }
2890}
2891
bc98a7ef
JM
2892void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2893{
2894 int io_index;
2895 uint8_t *ptr;
2896 unsigned long pd;
2897 PhysPageDesc *p;
2898
2899 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2900 if (!p) {
2901 pd = IO_MEM_UNASSIGNED;
2902 } else {
2903 pd = p->phys_offset;
2904 }
3b46e624 2905
bc98a7ef
JM
2906 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2907 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2908#ifdef TARGET_WORDS_BIGENDIAN
2909 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2910 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2911#else
2912 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2913 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2914#endif
2915 } else {
5fafdf24 2916 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2917 (addr & ~TARGET_PAGE_MASK);
2918 stq_p(ptr, val);
2919 }
2920}
2921
8df1cd07 2922/* warning: addr must be aligned */
8df1cd07
FB
2923void stl_phys(target_phys_addr_t addr, uint32_t val)
2924{
2925 int io_index;
2926 uint8_t *ptr;
2927 unsigned long pd;
2928 PhysPageDesc *p;
2929
2930 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2931 if (!p) {
2932 pd = IO_MEM_UNASSIGNED;
2933 } else {
2934 pd = p->phys_offset;
2935 }
3b46e624 2936
3a7d929e 2937 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2938 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2939 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2940 } else {
2941 unsigned long addr1;
2942 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2943 /* RAM case */
2944 ptr = phys_ram_base + addr1;
2945 stl_p(ptr, val);
3a7d929e
FB
2946 if (!cpu_physical_memory_is_dirty(addr1)) {
2947 /* invalidate code */
2948 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2949 /* set dirty bit */
f23db169
FB
2950 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2951 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2952 }
8df1cd07
FB
2953 }
2954}
2955
aab33094
FB
2956/* XXX: optimize */
2957void stb_phys(target_phys_addr_t addr, uint32_t val)
2958{
2959 uint8_t v = val;
2960 cpu_physical_memory_write(addr, &v, 1);
2961}
2962
2963/* XXX: optimize */
2964void stw_phys(target_phys_addr_t addr, uint32_t val)
2965{
2966 uint16_t v = tswap16(val);
2967 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2968}
2969
2970/* XXX: optimize */
2971void stq_phys(target_phys_addr_t addr, uint64_t val)
2972{
2973 val = tswap64(val);
2974 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2975}
2976
13eb76e0
FB
2977#endif
2978
2979/* virtual memory access for debug */
5fafdf24 2980int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 2981 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2982{
2983 int l;
9b3c35e0
JM
2984 target_phys_addr_t phys_addr;
2985 target_ulong page;
13eb76e0
FB
2986
2987 while (len > 0) {
2988 page = addr & TARGET_PAGE_MASK;
2989 phys_addr = cpu_get_phys_page_debug(env, page);
2990 /* if no physical page mapped, return an error */
2991 if (phys_addr == -1)
2992 return -1;
2993 l = (page + TARGET_PAGE_SIZE) - addr;
2994 if (l > len)
2995 l = len;
5fafdf24 2996 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 2997 buf, l, is_write);
13eb76e0
FB
2998 len -= l;
2999 buf += l;
3000 addr += l;
3001 }
3002 return 0;
3003}
3004
2e70f6ef
PB
3005/* in deterministic execution mode, instructions doing device I/Os
3006 must be at the end of the TB */
3007void cpu_io_recompile(CPUState *env, void *retaddr)
3008{
3009 TranslationBlock *tb;
3010 uint32_t n, cflags;
3011 target_ulong pc, cs_base;
3012 uint64_t flags;
3013
3014 tb = tb_find_pc((unsigned long)retaddr);
3015 if (!tb) {
3016 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3017 retaddr);
3018 }
3019 n = env->icount_decr.u16.low + tb->icount;
3020 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3021 /* Calculate how many instructions had been executed before the fault
3022 occured. */
3023 n = n - env->icount_decr.u16.low;
3024 /* Generate a new TB ending on the I/O insn. */
3025 n++;
3026 /* On MIPS and SH, delay slot instructions can only be restarted if
3027 they were already the first instruction in the TB. If this is not
3028 the first instruction in a TB then re-execute the preceeding
3029 branch. */
3030#if defined(TARGET_MIPS)
3031 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3032 env->active_tc.PC -= 4;
3033 env->icount_decr.u16.low++;
3034 env->hflags &= ~MIPS_HFLAG_BMASK;
3035 }
3036#elif defined(TARGET_SH4)
3037 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3038 && n > 1) {
3039 env->pc -= 2;
3040 env->icount_decr.u16.low++;
3041 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3042 }
3043#endif
3044 /* This should never happen. */
3045 if (n > CF_COUNT_MASK)
3046 cpu_abort(env, "TB too big during recompile");
3047
3048 cflags = n | CF_LAST_IO;
3049 pc = tb->pc;
3050 cs_base = tb->cs_base;
3051 flags = tb->flags;
3052 tb_phys_invalidate(tb, -1);
3053 /* FIXME: In theory this could raise an exception. In practice
3054 we have already translated the block once so it's probably ok. */
3055 tb_gen_code(env, pc, cs_base, flags, cflags);
3056 /* TODO: If env->pc != tb->pc (i.e. the failuting instruction was not
3057 the first in the TB) then we end up generating a whole new TB and
3058 repeating the fault, which is horribly inefficient.
3059 Better would be to execute just this insn uncached, or generate a
3060 second new TB. */
3061 cpu_resume_from_signal(env, NULL);
3062}
3063
e3db7226
FB
3064void dump_exec_info(FILE *f,
3065 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3066{
3067 int i, target_code_size, max_target_code_size;
3068 int direct_jmp_count, direct_jmp2_count, cross_page;
3069 TranslationBlock *tb;
3b46e624 3070
e3db7226
FB
3071 target_code_size = 0;
3072 max_target_code_size = 0;
3073 cross_page = 0;
3074 direct_jmp_count = 0;
3075 direct_jmp2_count = 0;
3076 for(i = 0; i < nb_tbs; i++) {
3077 tb = &tbs[i];
3078 target_code_size += tb->size;
3079 if (tb->size > max_target_code_size)
3080 max_target_code_size = tb->size;
3081 if (tb->page_addr[1] != -1)
3082 cross_page++;
3083 if (tb->tb_next_offset[0] != 0xffff) {
3084 direct_jmp_count++;
3085 if (tb->tb_next_offset[1] != 0xffff) {
3086 direct_jmp2_count++;
3087 }
3088 }
3089 }
3090 /* XXX: avoid using doubles ? */
57fec1fe 3091 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3092 cpu_fprintf(f, "gen code size %ld/%ld\n",
3093 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3094 cpu_fprintf(f, "TB count %d/%d\n",
3095 nb_tbs, code_gen_max_blocks);
5fafdf24 3096 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3097 nb_tbs ? target_code_size / nb_tbs : 0,
3098 max_target_code_size);
5fafdf24 3099 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3100 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3101 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3102 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3103 cross_page,
e3db7226
FB
3104 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3105 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3106 direct_jmp_count,
e3db7226
FB
3107 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3108 direct_jmp2_count,
3109 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3110 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3111 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3112 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3113 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3114 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3115}
3116
5fafdf24 3117#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3118
3119#define MMUSUFFIX _cmmu
3120#define GETPC() NULL
3121#define env cpu_single_env
b769d8fe 3122#define SOFTMMU_CODE_ACCESS
61382a50
FB
3123
3124#define SHIFT 0
3125#include "softmmu_template.h"
3126
3127#define SHIFT 1
3128#include "softmmu_template.h"
3129
3130#define SHIFT 2
3131#include "softmmu_template.h"
3132
3133#define SHIFT 3
3134#include "softmmu_template.h"
3135
3136#undef env
3137
3138#endif