]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Do not scare users with ominous error messages from AUD_open*
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
42#endif
54936004 43
fd6ce8f6 44//#define DEBUG_TB_INVALIDATE
66e85a21 45//#define DEBUG_FLUSH
9fa3e853 46//#define DEBUG_TLB
67d3b957 47//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
48
49/* make various TB consistency checks */
5fafdf24
TS
50//#define DEBUG_TB_CHECK
51//#define DEBUG_TLB_CHECK
fd6ce8f6 52
1196be37 53//#define DEBUG_IOPORT
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
99773bd4
PB
56#if !defined(CONFIG_USER_ONLY)
57/* TB consistency checks only implemented for usermode emulation. */
58#undef DEBUG_TB_CHECK
59#endif
60
9fa3e853
FB
61#define SMC_BITMAP_USE_THRESHOLD 10
62
63#define MMAP_AREA_START 0x00000000
64#define MMAP_AREA_END 0xa8000000
fd6ce8f6 65
108c49b8
FB
66#if defined(TARGET_SPARC64)
67#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
68#elif defined(TARGET_SPARC)
69#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
70#elif defined(TARGET_ALPHA)
71#define TARGET_PHYS_ADDR_SPACE_BITS 42
72#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
73#elif defined(TARGET_PPC64)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
75#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76#define TARGET_PHYS_ADDR_SPACE_BITS 42
77#elif defined(TARGET_I386) && !defined(USE_KQEMU)
78#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
79#else
80/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81#define TARGET_PHYS_ADDR_SPACE_BITS 32
82#endif
83
fab94c0e 84TranslationBlock *tbs;
26a5f13b 85int code_gen_max_blocks;
9fa3e853 86TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 87int nb_tbs;
eb51d102
FB
88/* any access to the tbs or the page table must use this lock */
89spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 90
7cb69cae 91uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
26a5f13b
FB
92uint8_t *code_gen_buffer;
93unsigned long code_gen_buffer_size;
94/* threshold to flush the translated code buffer */
95unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
96uint8_t *code_gen_ptr;
97
00f82b8a 98ram_addr_t phys_ram_size;
9fa3e853
FB
99int phys_ram_fd;
100uint8_t *phys_ram_base;
1ccde1cb 101uint8_t *phys_ram_dirty;
e9a1ab19 102static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 103
6a00d601
FB
104CPUState *first_cpu;
105/* current CPU in the current thread. It is only valid inside
106 cpu_exec() */
5fafdf24 107CPUState *cpu_single_env;
6a00d601 108
54936004 109typedef struct PageDesc {
92e873b9 110 /* list of TBs intersecting this ram page */
fd6ce8f6 111 TranslationBlock *first_tb;
9fa3e853
FB
112 /* in order to optimize self modifying code, we count the number
113 of lookups we do to a given page to use a bitmap */
114 unsigned int code_write_count;
115 uint8_t *code_bitmap;
116#if defined(CONFIG_USER_ONLY)
117 unsigned long flags;
118#endif
54936004
FB
119} PageDesc;
120
92e873b9
FB
121typedef struct PhysPageDesc {
122 /* offset in host memory of the page + io_index in the low 12 bits */
00f82b8a 123 ram_addr_t phys_offset;
92e873b9
FB
124} PhysPageDesc;
125
54936004 126#define L2_BITS 10
bedb69ea
JM
127#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
128/* XXX: this is a temporary hack for alpha target.
129 * In the future, this is to be replaced by a multi-level table
130 * to actually be able to handle the complete 64 bits address space.
131 */
132#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
133#else
03875444 134#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 135#endif
54936004
FB
136
137#define L1_SIZE (1 << L1_BITS)
138#define L2_SIZE (1 << L2_BITS)
139
33417e70 140static void io_mem_init(void);
fd6ce8f6 141
83fb7adf
FB
142unsigned long qemu_real_host_page_size;
143unsigned long qemu_host_page_bits;
144unsigned long qemu_host_page_size;
145unsigned long qemu_host_page_mask;
54936004 146
92e873b9 147/* XXX: for system emulation, it could just be an array */
54936004 148static PageDesc *l1_map[L1_SIZE];
0a962c02 149PhysPageDesc **l1_phys_map;
54936004 150
33417e70 151/* io memory support */
33417e70
FB
152CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
153CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 154void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 155static int io_mem_nb;
6658ffb8
PB
156#if defined(CONFIG_SOFTMMU)
157static int io_mem_watch;
158#endif
33417e70 159
34865134
FB
160/* log support */
161char *logfilename = "/tmp/qemu.log";
162FILE *logfile;
163int loglevel;
e735b91c 164static int log_append = 0;
34865134 165
e3db7226
FB
166/* statistics */
167static int tlb_flush_count;
168static int tb_flush_count;
169static int tb_phys_invalidate_count;
170
db7b5426
BS
171#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
172typedef struct subpage_t {
173 target_phys_addr_t base;
3ee89922
BS
174 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
175 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
176 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
177} subpage_t;
178
7cb69cae
FB
179#ifdef _WIN32
180static void map_exec(void *addr, long size)
181{
182 DWORD old_protect;
183 VirtualProtect(addr, size,
184 PAGE_EXECUTE_READWRITE, &old_protect);
185
186}
187#else
188static void map_exec(void *addr, long size)
189{
4369415f 190 unsigned long start, end, page_size;
7cb69cae 191
4369415f 192 page_size = getpagesize();
7cb69cae 193 start = (unsigned long)addr;
4369415f 194 start &= ~(page_size - 1);
7cb69cae
FB
195
196 end = (unsigned long)addr + size;
4369415f
FB
197 end += page_size - 1;
198 end &= ~(page_size - 1);
7cb69cae
FB
199
200 mprotect((void *)start, end - start,
201 PROT_READ | PROT_WRITE | PROT_EXEC);
202}
203#endif
204
b346ff46 205static void page_init(void)
54936004 206{
83fb7adf 207 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 208 TARGET_PAGE_SIZE */
67b915a5 209#ifdef _WIN32
d5a8f07c
FB
210 {
211 SYSTEM_INFO system_info;
212 DWORD old_protect;
3b46e624 213
d5a8f07c
FB
214 GetSystemInfo(&system_info);
215 qemu_real_host_page_size = system_info.dwPageSize;
d5a8f07c 216 }
67b915a5 217#else
83fb7adf 218 qemu_real_host_page_size = getpagesize();
67b915a5 219#endif
83fb7adf
FB
220 if (qemu_host_page_size == 0)
221 qemu_host_page_size = qemu_real_host_page_size;
222 if (qemu_host_page_size < TARGET_PAGE_SIZE)
223 qemu_host_page_size = TARGET_PAGE_SIZE;
224 qemu_host_page_bits = 0;
225 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
226 qemu_host_page_bits++;
227 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
228 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
229 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
230
231#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
232 {
233 long long startaddr, endaddr;
234 FILE *f;
235 int n;
236
c8a706fe 237 mmap_lock();
0776590d 238 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
239 f = fopen("/proc/self/maps", "r");
240 if (f) {
241 do {
242 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
243 if (n == 2) {
e0b8d65a
BS
244 startaddr = MIN(startaddr,
245 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
246 endaddr = MIN(endaddr,
247 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 248 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
249 TARGET_PAGE_ALIGN(endaddr),
250 PAGE_RESERVED);
251 }
252 } while (!feof(f));
253 fclose(f);
254 }
c8a706fe 255 mmap_unlock();
50a9569b
AZ
256 }
257#endif
54936004
FB
258}
259
00f82b8a 260static inline PageDesc *page_find_alloc(target_ulong index)
54936004 261{
54936004
FB
262 PageDesc **lp, *p;
263
54936004
FB
264 lp = &l1_map[index >> L2_BITS];
265 p = *lp;
266 if (!p) {
267 /* allocate if not found */
59817ccb 268 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 269 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
270 *lp = p;
271 }
272 return p + (index & (L2_SIZE - 1));
273}
274
00f82b8a 275static inline PageDesc *page_find(target_ulong index)
54936004 276{
54936004
FB
277 PageDesc *p;
278
54936004
FB
279 p = l1_map[index >> L2_BITS];
280 if (!p)
281 return 0;
fd6ce8f6
FB
282 return p + (index & (L2_SIZE - 1));
283}
284
108c49b8 285static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 286{
108c49b8 287 void **lp, **p;
e3f4e2a4 288 PhysPageDesc *pd;
92e873b9 289
108c49b8
FB
290 p = (void **)l1_phys_map;
291#if TARGET_PHYS_ADDR_SPACE_BITS > 32
292
293#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
294#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
295#endif
296 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
297 p = *lp;
298 if (!p) {
299 /* allocate if not found */
108c49b8
FB
300 if (!alloc)
301 return NULL;
302 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
303 memset(p, 0, sizeof(void *) * L1_SIZE);
304 *lp = p;
305 }
306#endif
307 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
308 pd = *lp;
309 if (!pd) {
310 int i;
108c49b8
FB
311 /* allocate if not found */
312 if (!alloc)
313 return NULL;
e3f4e2a4
PB
314 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
315 *lp = pd;
316 for (i = 0; i < L2_SIZE; i++)
317 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 318 }
e3f4e2a4 319 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
320}
321
108c49b8 322static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 323{
108c49b8 324 return phys_page_find_alloc(index, 0);
92e873b9
FB
325}
326
9fa3e853 327#if !defined(CONFIG_USER_ONLY)
6a00d601 328static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 329static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 330 target_ulong vaddr);
c8a706fe
PB
331#define mmap_lock() do { } while(0)
332#define mmap_unlock() do { } while(0)
9fa3e853 333#endif
fd6ce8f6 334
4369415f
FB
335#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
336
337#if defined(CONFIG_USER_ONLY)
338/* Currently it is not recommanded to allocate big chunks of data in
339 user mode. It will change when a dedicated libc will be used */
340#define USE_STATIC_CODE_GEN_BUFFER
341#endif
342
343#ifdef USE_STATIC_CODE_GEN_BUFFER
344static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
345#endif
346
26a5f13b
FB
347void code_gen_alloc(unsigned long tb_size)
348{
4369415f
FB
349#ifdef USE_STATIC_CODE_GEN_BUFFER
350 code_gen_buffer = static_code_gen_buffer;
351 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
352 map_exec(code_gen_buffer, code_gen_buffer_size);
353#else
26a5f13b
FB
354 code_gen_buffer_size = tb_size;
355 if (code_gen_buffer_size == 0) {
4369415f
FB
356#if defined(CONFIG_USER_ONLY)
357 /* in user mode, phys_ram_size is not meaningful */
358 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
359#else
26a5f13b
FB
360 /* XXX: needs ajustments */
361 code_gen_buffer_size = (int)(phys_ram_size / 4);
4369415f 362#endif
26a5f13b
FB
363 }
364 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
365 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
366 /* The code gen buffer location may have constraints depending on
367 the host cpu and OS */
368#if defined(__linux__)
369 {
370 int flags;
371 flags = MAP_PRIVATE | MAP_ANONYMOUS;
372#if defined(__x86_64__)
373 flags |= MAP_32BIT;
374 /* Cannot map more than that */
375 if (code_gen_buffer_size > (800 * 1024 * 1024))
376 code_gen_buffer_size = (800 * 1024 * 1024);
377#endif
378 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
379 PROT_WRITE | PROT_READ | PROT_EXEC,
380 flags, -1, 0);
381 if (code_gen_buffer == MAP_FAILED) {
382 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
383 exit(1);
384 }
385 }
386#else
387 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
388 if (!code_gen_buffer) {
389 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
390 exit(1);
391 }
392 map_exec(code_gen_buffer, code_gen_buffer_size);
393#endif
4369415f 394#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
395 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
396 code_gen_buffer_max_size = code_gen_buffer_size -
397 code_gen_max_block_size();
398 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
399 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
400}
401
402/* Must be called before using the QEMU cpus. 'tb_size' is the size
403 (in bytes) allocated to the translation buffer. Zero means default
404 size. */
405void cpu_exec_init_all(unsigned long tb_size)
406{
26a5f13b
FB
407 cpu_gen_init();
408 code_gen_alloc(tb_size);
409 code_gen_ptr = code_gen_buffer;
4369415f 410 page_init();
26a5f13b
FB
411 io_mem_init();
412}
413
6a00d601 414void cpu_exec_init(CPUState *env)
fd6ce8f6 415{
6a00d601
FB
416 CPUState **penv;
417 int cpu_index;
418
6a00d601
FB
419 env->next_cpu = NULL;
420 penv = &first_cpu;
421 cpu_index = 0;
422 while (*penv != NULL) {
423 penv = (CPUState **)&(*penv)->next_cpu;
424 cpu_index++;
425 }
426 env->cpu_index = cpu_index;
6658ffb8 427 env->nb_watchpoints = 0;
6a00d601 428 *penv = env;
fd6ce8f6
FB
429}
430
9fa3e853
FB
431static inline void invalidate_page_bitmap(PageDesc *p)
432{
433 if (p->code_bitmap) {
59817ccb 434 qemu_free(p->code_bitmap);
9fa3e853
FB
435 p->code_bitmap = NULL;
436 }
437 p->code_write_count = 0;
438}
439
fd6ce8f6
FB
440/* set to NULL all the 'first_tb' fields in all PageDescs */
441static void page_flush_tb(void)
442{
443 int i, j;
444 PageDesc *p;
445
446 for(i = 0; i < L1_SIZE; i++) {
447 p = l1_map[i];
448 if (p) {
9fa3e853
FB
449 for(j = 0; j < L2_SIZE; j++) {
450 p->first_tb = NULL;
451 invalidate_page_bitmap(p);
452 p++;
453 }
fd6ce8f6
FB
454 }
455 }
456}
457
458/* flush all the translation blocks */
d4e8164f 459/* XXX: tb_flush is currently not thread safe */
6a00d601 460void tb_flush(CPUState *env1)
fd6ce8f6 461{
6a00d601 462 CPUState *env;
0124311e 463#if defined(DEBUG_FLUSH)
ab3d1727
BS
464 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
465 (unsigned long)(code_gen_ptr - code_gen_buffer),
466 nb_tbs, nb_tbs > 0 ?
467 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 468#endif
26a5f13b 469 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
470 cpu_abort(env1, "Internal error: code buffer overflow\n");
471
fd6ce8f6 472 nb_tbs = 0;
3b46e624 473
6a00d601
FB
474 for(env = first_cpu; env != NULL; env = env->next_cpu) {
475 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
476 }
9fa3e853 477
8a8a608f 478 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 479 page_flush_tb();
9fa3e853 480
fd6ce8f6 481 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
482 /* XXX: flush processor icache at this point if cache flush is
483 expensive */
e3db7226 484 tb_flush_count++;
fd6ce8f6
FB
485}
486
487#ifdef DEBUG_TB_CHECK
488
bc98a7ef 489static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
490{
491 TranslationBlock *tb;
492 int i;
493 address &= TARGET_PAGE_MASK;
99773bd4
PB
494 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
495 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
496 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
497 address >= tb->pc + tb->size)) {
498 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 499 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
500 }
501 }
502 }
503}
504
505/* verify that all the pages have correct rights for code */
506static void tb_page_check(void)
507{
508 TranslationBlock *tb;
509 int i, flags1, flags2;
3b46e624 510
99773bd4
PB
511 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
512 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
513 flags1 = page_get_flags(tb->pc);
514 flags2 = page_get_flags(tb->pc + tb->size - 1);
515 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
516 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 517 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
518 }
519 }
520 }
521}
522
d4e8164f
FB
523void tb_jmp_check(TranslationBlock *tb)
524{
525 TranslationBlock *tb1;
526 unsigned int n1;
527
528 /* suppress any remaining jumps to this TB */
529 tb1 = tb->jmp_first;
530 for(;;) {
531 n1 = (long)tb1 & 3;
532 tb1 = (TranslationBlock *)((long)tb1 & ~3);
533 if (n1 == 2)
534 break;
535 tb1 = tb1->jmp_next[n1];
536 }
537 /* check end of list */
538 if (tb1 != tb) {
539 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
540 }
541}
542
fd6ce8f6
FB
543#endif
544
545/* invalidate one TB */
546static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
547 int next_offset)
548{
549 TranslationBlock *tb1;
550 for(;;) {
551 tb1 = *ptb;
552 if (tb1 == tb) {
553 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
554 break;
555 }
556 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
557 }
558}
559
9fa3e853
FB
560static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
561{
562 TranslationBlock *tb1;
563 unsigned int n1;
564
565 for(;;) {
566 tb1 = *ptb;
567 n1 = (long)tb1 & 3;
568 tb1 = (TranslationBlock *)((long)tb1 & ~3);
569 if (tb1 == tb) {
570 *ptb = tb1->page_next[n1];
571 break;
572 }
573 ptb = &tb1->page_next[n1];
574 }
575}
576
d4e8164f
FB
577static inline void tb_jmp_remove(TranslationBlock *tb, int n)
578{
579 TranslationBlock *tb1, **ptb;
580 unsigned int n1;
581
582 ptb = &tb->jmp_next[n];
583 tb1 = *ptb;
584 if (tb1) {
585 /* find tb(n) in circular list */
586 for(;;) {
587 tb1 = *ptb;
588 n1 = (long)tb1 & 3;
589 tb1 = (TranslationBlock *)((long)tb1 & ~3);
590 if (n1 == n && tb1 == tb)
591 break;
592 if (n1 == 2) {
593 ptb = &tb1->jmp_first;
594 } else {
595 ptb = &tb1->jmp_next[n1];
596 }
597 }
598 /* now we can suppress tb(n) from the list */
599 *ptb = tb->jmp_next[n];
600
601 tb->jmp_next[n] = NULL;
602 }
603}
604
605/* reset the jump entry 'n' of a TB so that it is not chained to
606 another TB */
607static inline void tb_reset_jump(TranslationBlock *tb, int n)
608{
609 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
610}
611
00f82b8a 612static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 613{
6a00d601 614 CPUState *env;
8a40a180 615 PageDesc *p;
d4e8164f 616 unsigned int h, n1;
00f82b8a 617 target_phys_addr_t phys_pc;
8a40a180 618 TranslationBlock *tb1, *tb2;
3b46e624 619
8a40a180
FB
620 /* remove the TB from the hash list */
621 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
622 h = tb_phys_hash_func(phys_pc);
5fafdf24 623 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
624 offsetof(TranslationBlock, phys_hash_next));
625
626 /* remove the TB from the page list */
627 if (tb->page_addr[0] != page_addr) {
628 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
629 tb_page_remove(&p->first_tb, tb);
630 invalidate_page_bitmap(p);
631 }
632 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
633 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
634 tb_page_remove(&p->first_tb, tb);
635 invalidate_page_bitmap(p);
636 }
637
36bdbe54 638 tb_invalidated_flag = 1;
59817ccb 639
fd6ce8f6 640 /* remove the TB from the hash list */
8a40a180 641 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
642 for(env = first_cpu; env != NULL; env = env->next_cpu) {
643 if (env->tb_jmp_cache[h] == tb)
644 env->tb_jmp_cache[h] = NULL;
645 }
d4e8164f
FB
646
647 /* suppress this TB from the two jump lists */
648 tb_jmp_remove(tb, 0);
649 tb_jmp_remove(tb, 1);
650
651 /* suppress any remaining jumps to this TB */
652 tb1 = tb->jmp_first;
653 for(;;) {
654 n1 = (long)tb1 & 3;
655 if (n1 == 2)
656 break;
657 tb1 = (TranslationBlock *)((long)tb1 & ~3);
658 tb2 = tb1->jmp_next[n1];
659 tb_reset_jump(tb1, n1);
660 tb1->jmp_next[n1] = NULL;
661 tb1 = tb2;
662 }
663 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 664
e3db7226 665 tb_phys_invalidate_count++;
9fa3e853
FB
666}
667
668static inline void set_bits(uint8_t *tab, int start, int len)
669{
670 int end, mask, end1;
671
672 end = start + len;
673 tab += start >> 3;
674 mask = 0xff << (start & 7);
675 if ((start & ~7) == (end & ~7)) {
676 if (start < end) {
677 mask &= ~(0xff << (end & 7));
678 *tab |= mask;
679 }
680 } else {
681 *tab++ |= mask;
682 start = (start + 8) & ~7;
683 end1 = end & ~7;
684 while (start < end1) {
685 *tab++ = 0xff;
686 start += 8;
687 }
688 if (start < end) {
689 mask = ~(0xff << (end & 7));
690 *tab |= mask;
691 }
692 }
693}
694
695static void build_page_bitmap(PageDesc *p)
696{
697 int n, tb_start, tb_end;
698 TranslationBlock *tb;
3b46e624 699
59817ccb 700 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
701 if (!p->code_bitmap)
702 return;
703 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
704
705 tb = p->first_tb;
706 while (tb != NULL) {
707 n = (long)tb & 3;
708 tb = (TranslationBlock *)((long)tb & ~3);
709 /* NOTE: this is subtle as a TB may span two physical pages */
710 if (n == 0) {
711 /* NOTE: tb_end may be after the end of the page, but
712 it is not a problem */
713 tb_start = tb->pc & ~TARGET_PAGE_MASK;
714 tb_end = tb_start + tb->size;
715 if (tb_end > TARGET_PAGE_SIZE)
716 tb_end = TARGET_PAGE_SIZE;
717 } else {
718 tb_start = 0;
719 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
720 }
721 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
722 tb = tb->page_next[n];
723 }
724}
725
d720b93d
FB
726#ifdef TARGET_HAS_PRECISE_SMC
727
5fafdf24 728static void tb_gen_code(CPUState *env,
d720b93d
FB
729 target_ulong pc, target_ulong cs_base, int flags,
730 int cflags)
731{
732 TranslationBlock *tb;
733 uint8_t *tc_ptr;
734 target_ulong phys_pc, phys_page2, virt_page2;
735 int code_gen_size;
736
c27004ec
FB
737 phys_pc = get_phys_addr_code(env, pc);
738 tb = tb_alloc(pc);
d720b93d
FB
739 if (!tb) {
740 /* flush must be done */
741 tb_flush(env);
742 /* cannot fail at this point */
c27004ec 743 tb = tb_alloc(pc);
d720b93d
FB
744 }
745 tc_ptr = code_gen_ptr;
746 tb->tc_ptr = tc_ptr;
747 tb->cs_base = cs_base;
748 tb->flags = flags;
749 tb->cflags = cflags;
d07bde88 750 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 751 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 752
d720b93d 753 /* check next page if needed */
c27004ec 754 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 755 phys_page2 = -1;
c27004ec 756 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
757 phys_page2 = get_phys_addr_code(env, virt_page2);
758 }
759 tb_link_phys(tb, phys_pc, phys_page2);
760}
761#endif
3b46e624 762
9fa3e853
FB
763/* invalidate all TBs which intersect with the target physical page
764 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
765 the same physical page. 'is_cpu_write_access' should be true if called
766 from a real cpu write access: the virtual CPU will exit the current
767 TB if code is modified inside this TB. */
00f82b8a 768void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
769 int is_cpu_write_access)
770{
771 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 772 CPUState *env = cpu_single_env;
9fa3e853 773 PageDesc *p;
ea1c1802 774 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 775 target_ulong tb_start, tb_end;
d720b93d 776 target_ulong current_pc, current_cs_base;
9fa3e853
FB
777
778 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 779 if (!p)
9fa3e853 780 return;
5fafdf24 781 if (!p->code_bitmap &&
d720b93d
FB
782 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
783 is_cpu_write_access) {
9fa3e853
FB
784 /* build code bitmap */
785 build_page_bitmap(p);
786 }
787
788 /* we remove all the TBs in the range [start, end[ */
789 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
790 current_tb_not_found = is_cpu_write_access;
791 current_tb_modified = 0;
792 current_tb = NULL; /* avoid warning */
793 current_pc = 0; /* avoid warning */
794 current_cs_base = 0; /* avoid warning */
795 current_flags = 0; /* avoid warning */
9fa3e853
FB
796 tb = p->first_tb;
797 while (tb != NULL) {
798 n = (long)tb & 3;
799 tb = (TranslationBlock *)((long)tb & ~3);
800 tb_next = tb->page_next[n];
801 /* NOTE: this is subtle as a TB may span two physical pages */
802 if (n == 0) {
803 /* NOTE: tb_end may be after the end of the page, but
804 it is not a problem */
805 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
806 tb_end = tb_start + tb->size;
807 } else {
808 tb_start = tb->page_addr[1];
809 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
810 }
811 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
812#ifdef TARGET_HAS_PRECISE_SMC
813 if (current_tb_not_found) {
814 current_tb_not_found = 0;
815 current_tb = NULL;
816 if (env->mem_write_pc) {
817 /* now we have a real cpu fault */
818 current_tb = tb_find_pc(env->mem_write_pc);
819 }
820 }
821 if (current_tb == tb &&
822 !(current_tb->cflags & CF_SINGLE_INSN)) {
823 /* If we are modifying the current TB, we must stop
824 its execution. We could be more precise by checking
825 that the modification is after the current PC, but it
826 would require a specialized function to partially
827 restore the CPU state */
3b46e624 828
d720b93d 829 current_tb_modified = 1;
5fafdf24 830 cpu_restore_state(current_tb, env,
d720b93d
FB
831 env->mem_write_pc, NULL);
832#if defined(TARGET_I386)
833 current_flags = env->hflags;
834 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
835 current_cs_base = (target_ulong)env->segs[R_CS].base;
836 current_pc = current_cs_base + env->eip;
837#else
838#error unsupported CPU
839#endif
840 }
841#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
842 /* we need to do that to handle the case where a signal
843 occurs while doing tb_phys_invalidate() */
844 saved_tb = NULL;
845 if (env) {
846 saved_tb = env->current_tb;
847 env->current_tb = NULL;
848 }
9fa3e853 849 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
850 if (env) {
851 env->current_tb = saved_tb;
852 if (env->interrupt_request && env->current_tb)
853 cpu_interrupt(env, env->interrupt_request);
854 }
9fa3e853
FB
855 }
856 tb = tb_next;
857 }
858#if !defined(CONFIG_USER_ONLY)
859 /* if no code remaining, no need to continue to use slow writes */
860 if (!p->first_tb) {
861 invalidate_page_bitmap(p);
d720b93d
FB
862 if (is_cpu_write_access) {
863 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
864 }
865 }
866#endif
867#ifdef TARGET_HAS_PRECISE_SMC
868 if (current_tb_modified) {
869 /* we generate a block containing just the instruction
870 modifying the memory. It will ensure that it cannot modify
871 itself */
ea1c1802 872 env->current_tb = NULL;
5fafdf24 873 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
874 CF_SINGLE_INSN);
875 cpu_resume_from_signal(env, NULL);
9fa3e853 876 }
fd6ce8f6 877#endif
9fa3e853 878}
fd6ce8f6 879
9fa3e853 880/* len must be <= 8 and start must be a multiple of len */
00f82b8a 881static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
882{
883 PageDesc *p;
884 int offset, b;
59817ccb 885#if 0
a4193c8a
FB
886 if (1) {
887 if (loglevel) {
5fafdf24
TS
888 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
889 cpu_single_env->mem_write_vaddr, len,
890 cpu_single_env->eip,
a4193c8a
FB
891 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
892 }
59817ccb
FB
893 }
894#endif
9fa3e853 895 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 896 if (!p)
9fa3e853
FB
897 return;
898 if (p->code_bitmap) {
899 offset = start & ~TARGET_PAGE_MASK;
900 b = p->code_bitmap[offset >> 3] >> (offset & 7);
901 if (b & ((1 << len) - 1))
902 goto do_invalidate;
903 } else {
904 do_invalidate:
d720b93d 905 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
906 }
907}
908
9fa3e853 909#if !defined(CONFIG_SOFTMMU)
00f82b8a 910static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 911 unsigned long pc, void *puc)
9fa3e853 912{
d720b93d
FB
913 int n, current_flags, current_tb_modified;
914 target_ulong current_pc, current_cs_base;
9fa3e853 915 PageDesc *p;
d720b93d
FB
916 TranslationBlock *tb, *current_tb;
917#ifdef TARGET_HAS_PRECISE_SMC
918 CPUState *env = cpu_single_env;
919#endif
9fa3e853
FB
920
921 addr &= TARGET_PAGE_MASK;
922 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 923 if (!p)
9fa3e853
FB
924 return;
925 tb = p->first_tb;
d720b93d
FB
926 current_tb_modified = 0;
927 current_tb = NULL;
928 current_pc = 0; /* avoid warning */
929 current_cs_base = 0; /* avoid warning */
930 current_flags = 0; /* avoid warning */
931#ifdef TARGET_HAS_PRECISE_SMC
932 if (tb && pc != 0) {
933 current_tb = tb_find_pc(pc);
934 }
935#endif
9fa3e853
FB
936 while (tb != NULL) {
937 n = (long)tb & 3;
938 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
939#ifdef TARGET_HAS_PRECISE_SMC
940 if (current_tb == tb &&
941 !(current_tb->cflags & CF_SINGLE_INSN)) {
942 /* If we are modifying the current TB, we must stop
943 its execution. We could be more precise by checking
944 that the modification is after the current PC, but it
945 would require a specialized function to partially
946 restore the CPU state */
3b46e624 947
d720b93d
FB
948 current_tb_modified = 1;
949 cpu_restore_state(current_tb, env, pc, puc);
950#if defined(TARGET_I386)
951 current_flags = env->hflags;
952 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
953 current_cs_base = (target_ulong)env->segs[R_CS].base;
954 current_pc = current_cs_base + env->eip;
955#else
956#error unsupported CPU
957#endif
958 }
959#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
960 tb_phys_invalidate(tb, addr);
961 tb = tb->page_next[n];
962 }
fd6ce8f6 963 p->first_tb = NULL;
d720b93d
FB
964#ifdef TARGET_HAS_PRECISE_SMC
965 if (current_tb_modified) {
966 /* we generate a block containing just the instruction
967 modifying the memory. It will ensure that it cannot modify
968 itself */
ea1c1802 969 env->current_tb = NULL;
5fafdf24 970 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
971 CF_SINGLE_INSN);
972 cpu_resume_from_signal(env, puc);
973 }
974#endif
fd6ce8f6 975}
9fa3e853 976#endif
fd6ce8f6
FB
977
978/* add the tb in the target page and protect it if necessary */
5fafdf24 979static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 980 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
981{
982 PageDesc *p;
9fa3e853
FB
983 TranslationBlock *last_first_tb;
984
985 tb->page_addr[n] = page_addr;
3a7d929e 986 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
987 tb->page_next[n] = p->first_tb;
988 last_first_tb = p->first_tb;
989 p->first_tb = (TranslationBlock *)((long)tb | n);
990 invalidate_page_bitmap(p);
fd6ce8f6 991
107db443 992#if defined(TARGET_HAS_SMC) || 1
d720b93d 993
9fa3e853 994#if defined(CONFIG_USER_ONLY)
fd6ce8f6 995 if (p->flags & PAGE_WRITE) {
53a5960a
PB
996 target_ulong addr;
997 PageDesc *p2;
9fa3e853
FB
998 int prot;
999
fd6ce8f6
FB
1000 /* force the host page as non writable (writes will have a
1001 page fault + mprotect overhead) */
53a5960a 1002 page_addr &= qemu_host_page_mask;
fd6ce8f6 1003 prot = 0;
53a5960a
PB
1004 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1005 addr += TARGET_PAGE_SIZE) {
1006
1007 p2 = page_find (addr >> TARGET_PAGE_BITS);
1008 if (!p2)
1009 continue;
1010 prot |= p2->flags;
1011 p2->flags &= ~PAGE_WRITE;
1012 page_get_flags(addr);
1013 }
5fafdf24 1014 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1015 (prot & PAGE_BITS) & ~PAGE_WRITE);
1016#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1017 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1018 page_addr);
fd6ce8f6 1019#endif
fd6ce8f6 1020 }
9fa3e853
FB
1021#else
1022 /* if some code is already present, then the pages are already
1023 protected. So we handle the case where only the first TB is
1024 allocated in a physical page */
1025 if (!last_first_tb) {
6a00d601 1026 tlb_protect_code(page_addr);
9fa3e853
FB
1027 }
1028#endif
d720b93d
FB
1029
1030#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1031}
1032
1033/* Allocate a new translation block. Flush the translation buffer if
1034 too many translation blocks or too much generated code. */
c27004ec 1035TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1036{
1037 TranslationBlock *tb;
fd6ce8f6 1038
26a5f13b
FB
1039 if (nb_tbs >= code_gen_max_blocks ||
1040 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1041 return NULL;
fd6ce8f6
FB
1042 tb = &tbs[nb_tbs++];
1043 tb->pc = pc;
b448f2f3 1044 tb->cflags = 0;
d4e8164f
FB
1045 return tb;
1046}
1047
9fa3e853
FB
1048/* add a new TB and link it to the physical page tables. phys_page2 is
1049 (-1) to indicate that only one page contains the TB. */
5fafdf24 1050void tb_link_phys(TranslationBlock *tb,
9fa3e853 1051 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1052{
9fa3e853
FB
1053 unsigned int h;
1054 TranslationBlock **ptb;
1055
c8a706fe
PB
1056 /* Grab the mmap lock to stop another thread invalidating this TB
1057 before we are done. */
1058 mmap_lock();
9fa3e853
FB
1059 /* add in the physical hash table */
1060 h = tb_phys_hash_func(phys_pc);
1061 ptb = &tb_phys_hash[h];
1062 tb->phys_hash_next = *ptb;
1063 *ptb = tb;
fd6ce8f6
FB
1064
1065 /* add in the page list */
9fa3e853
FB
1066 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1067 if (phys_page2 != -1)
1068 tb_alloc_page(tb, 1, phys_page2);
1069 else
1070 tb->page_addr[1] = -1;
9fa3e853 1071
d4e8164f
FB
1072 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1073 tb->jmp_next[0] = NULL;
1074 tb->jmp_next[1] = NULL;
1075
1076 /* init original jump addresses */
1077 if (tb->tb_next_offset[0] != 0xffff)
1078 tb_reset_jump(tb, 0);
1079 if (tb->tb_next_offset[1] != 0xffff)
1080 tb_reset_jump(tb, 1);
8a40a180
FB
1081
1082#ifdef DEBUG_TB_CHECK
1083 tb_page_check();
1084#endif
c8a706fe 1085 mmap_unlock();
fd6ce8f6
FB
1086}
1087
9fa3e853
FB
1088/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1089 tb[1].tc_ptr. Return NULL if not found */
1090TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1091{
9fa3e853
FB
1092 int m_min, m_max, m;
1093 unsigned long v;
1094 TranslationBlock *tb;
a513fe19
FB
1095
1096 if (nb_tbs <= 0)
1097 return NULL;
1098 if (tc_ptr < (unsigned long)code_gen_buffer ||
1099 tc_ptr >= (unsigned long)code_gen_ptr)
1100 return NULL;
1101 /* binary search (cf Knuth) */
1102 m_min = 0;
1103 m_max = nb_tbs - 1;
1104 while (m_min <= m_max) {
1105 m = (m_min + m_max) >> 1;
1106 tb = &tbs[m];
1107 v = (unsigned long)tb->tc_ptr;
1108 if (v == tc_ptr)
1109 return tb;
1110 else if (tc_ptr < v) {
1111 m_max = m - 1;
1112 } else {
1113 m_min = m + 1;
1114 }
5fafdf24 1115 }
a513fe19
FB
1116 return &tbs[m_max];
1117}
7501267e 1118
ea041c0e
FB
1119static void tb_reset_jump_recursive(TranslationBlock *tb);
1120
1121static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1122{
1123 TranslationBlock *tb1, *tb_next, **ptb;
1124 unsigned int n1;
1125
1126 tb1 = tb->jmp_next[n];
1127 if (tb1 != NULL) {
1128 /* find head of list */
1129 for(;;) {
1130 n1 = (long)tb1 & 3;
1131 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1132 if (n1 == 2)
1133 break;
1134 tb1 = tb1->jmp_next[n1];
1135 }
1136 /* we are now sure now that tb jumps to tb1 */
1137 tb_next = tb1;
1138
1139 /* remove tb from the jmp_first list */
1140 ptb = &tb_next->jmp_first;
1141 for(;;) {
1142 tb1 = *ptb;
1143 n1 = (long)tb1 & 3;
1144 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1145 if (n1 == n && tb1 == tb)
1146 break;
1147 ptb = &tb1->jmp_next[n1];
1148 }
1149 *ptb = tb->jmp_next[n];
1150 tb->jmp_next[n] = NULL;
3b46e624 1151
ea041c0e
FB
1152 /* suppress the jump to next tb in generated code */
1153 tb_reset_jump(tb, n);
1154
0124311e 1155 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1156 tb_reset_jump_recursive(tb_next);
1157 }
1158}
1159
1160static void tb_reset_jump_recursive(TranslationBlock *tb)
1161{
1162 tb_reset_jump_recursive2(tb, 0);
1163 tb_reset_jump_recursive2(tb, 1);
1164}
1165
1fddef4b 1166#if defined(TARGET_HAS_ICE)
d720b93d
FB
1167static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1168{
9b3c35e0
JM
1169 target_phys_addr_t addr;
1170 target_ulong pd;
c2f07f81
PB
1171 ram_addr_t ram_addr;
1172 PhysPageDesc *p;
d720b93d 1173
c2f07f81
PB
1174 addr = cpu_get_phys_page_debug(env, pc);
1175 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1176 if (!p) {
1177 pd = IO_MEM_UNASSIGNED;
1178 } else {
1179 pd = p->phys_offset;
1180 }
1181 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1182 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1183}
c27004ec 1184#endif
d720b93d 1185
6658ffb8
PB
1186/* Add a watchpoint. */
1187int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1188{
1189 int i;
1190
1191 for (i = 0; i < env->nb_watchpoints; i++) {
1192 if (addr == env->watchpoint[i].vaddr)
1193 return 0;
1194 }
1195 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1196 return -1;
1197
1198 i = env->nb_watchpoints++;
1199 env->watchpoint[i].vaddr = addr;
1200 tlb_flush_page(env, addr);
1201 /* FIXME: This flush is needed because of the hack to make memory ops
1202 terminate the TB. It can be removed once the proper IO trap and
1203 re-execute bits are in. */
1204 tb_flush(env);
1205 return i;
1206}
1207
1208/* Remove a watchpoint. */
1209int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1210{
1211 int i;
1212
1213 for (i = 0; i < env->nb_watchpoints; i++) {
1214 if (addr == env->watchpoint[i].vaddr) {
1215 env->nb_watchpoints--;
1216 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1217 tlb_flush_page(env, addr);
1218 return 0;
1219 }
1220 }
1221 return -1;
1222}
1223
7d03f82f
EI
1224/* Remove all watchpoints. */
1225void cpu_watchpoint_remove_all(CPUState *env) {
1226 int i;
1227
1228 for (i = 0; i < env->nb_watchpoints; i++) {
1229 tlb_flush_page(env, env->watchpoint[i].vaddr);
1230 }
1231 env->nb_watchpoints = 0;
1232}
1233
c33a346e
FB
1234/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1235 breakpoint is reached */
2e12669a 1236int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1237{
1fddef4b 1238#if defined(TARGET_HAS_ICE)
4c3a88a2 1239 int i;
3b46e624 1240
4c3a88a2
FB
1241 for(i = 0; i < env->nb_breakpoints; i++) {
1242 if (env->breakpoints[i] == pc)
1243 return 0;
1244 }
1245
1246 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1247 return -1;
1248 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1249
d720b93d 1250 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1251 return 0;
1252#else
1253 return -1;
1254#endif
1255}
1256
7d03f82f
EI
1257/* remove all breakpoints */
1258void cpu_breakpoint_remove_all(CPUState *env) {
1259#if defined(TARGET_HAS_ICE)
1260 int i;
1261 for(i = 0; i < env->nb_breakpoints; i++) {
1262 breakpoint_invalidate(env, env->breakpoints[i]);
1263 }
1264 env->nb_breakpoints = 0;
1265#endif
1266}
1267
4c3a88a2 1268/* remove a breakpoint */
2e12669a 1269int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1270{
1fddef4b 1271#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1272 int i;
1273 for(i = 0; i < env->nb_breakpoints; i++) {
1274 if (env->breakpoints[i] == pc)
1275 goto found;
1276 }
1277 return -1;
1278 found:
4c3a88a2 1279 env->nb_breakpoints--;
1fddef4b
FB
1280 if (i < env->nb_breakpoints)
1281 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1282
1283 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1284 return 0;
1285#else
1286 return -1;
1287#endif
1288}
1289
c33a346e
FB
1290/* enable or disable single step mode. EXCP_DEBUG is returned by the
1291 CPU loop after each instruction */
1292void cpu_single_step(CPUState *env, int enabled)
1293{
1fddef4b 1294#if defined(TARGET_HAS_ICE)
c33a346e
FB
1295 if (env->singlestep_enabled != enabled) {
1296 env->singlestep_enabled = enabled;
1297 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1298 /* XXX: only flush what is necessary */
0124311e 1299 tb_flush(env);
c33a346e
FB
1300 }
1301#endif
1302}
1303
34865134
FB
1304/* enable or disable low levels log */
1305void cpu_set_log(int log_flags)
1306{
1307 loglevel = log_flags;
1308 if (loglevel && !logfile) {
11fcfab4 1309 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1310 if (!logfile) {
1311 perror(logfilename);
1312 _exit(1);
1313 }
9fa3e853
FB
1314#if !defined(CONFIG_SOFTMMU)
1315 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1316 {
1317 static uint8_t logfile_buf[4096];
1318 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1319 }
1320#else
34865134 1321 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1322#endif
e735b91c
PB
1323 log_append = 1;
1324 }
1325 if (!loglevel && logfile) {
1326 fclose(logfile);
1327 logfile = NULL;
34865134
FB
1328 }
1329}
1330
1331void cpu_set_log_filename(const char *filename)
1332{
1333 logfilename = strdup(filename);
e735b91c
PB
1334 if (logfile) {
1335 fclose(logfile);
1336 logfile = NULL;
1337 }
1338 cpu_set_log(loglevel);
34865134 1339}
c33a346e 1340
0124311e 1341/* mask must never be zero, except for A20 change call */
68a79315 1342void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1343{
d5975363 1344#if !defined(USE_NPTL)
ea041c0e 1345 TranslationBlock *tb;
15a51156 1346 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1347#endif
59817ccb 1348
d5975363
PB
1349 /* FIXME: This is probably not threadsafe. A different thread could
1350 be in the mittle of a read-modify-write operation. */
68a79315 1351 env->interrupt_request |= mask;
d5975363
PB
1352#if defined(USE_NPTL)
1353 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1354 problem and hope the cpu will stop of its own accord. For userspace
1355 emulation this often isn't actually as bad as it sounds. Often
1356 signals are used primarily to interrupt blocking syscalls. */
1357#else
ea041c0e
FB
1358 /* if the cpu is currently executing code, we must unlink it and
1359 all the potentially executing TB */
1360 tb = env->current_tb;
ee8b7021
FB
1361 if (tb && !testandset(&interrupt_lock)) {
1362 env->current_tb = NULL;
ea041c0e 1363 tb_reset_jump_recursive(tb);
15a51156 1364 resetlock(&interrupt_lock);
ea041c0e 1365 }
d5975363 1366#endif
ea041c0e
FB
1367}
1368
b54ad049
FB
1369void cpu_reset_interrupt(CPUState *env, int mask)
1370{
1371 env->interrupt_request &= ~mask;
1372}
1373
f193c797 1374CPULogItem cpu_log_items[] = {
5fafdf24 1375 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1376 "show generated host assembly code for each compiled TB" },
1377 { CPU_LOG_TB_IN_ASM, "in_asm",
1378 "show target assembly code for each compiled TB" },
5fafdf24 1379 { CPU_LOG_TB_OP, "op",
57fec1fe 1380 "show micro ops for each compiled TB" },
f193c797 1381 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1382 "show micro ops "
1383#ifdef TARGET_I386
1384 "before eflags optimization and "
f193c797 1385#endif
e01a1157 1386 "after liveness analysis" },
f193c797
FB
1387 { CPU_LOG_INT, "int",
1388 "show interrupts/exceptions in short format" },
1389 { CPU_LOG_EXEC, "exec",
1390 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1391 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1392 "show CPU state before block translation" },
f193c797
FB
1393#ifdef TARGET_I386
1394 { CPU_LOG_PCALL, "pcall",
1395 "show protected mode far calls/returns/exceptions" },
1396#endif
8e3a9fd2 1397#ifdef DEBUG_IOPORT
fd872598
FB
1398 { CPU_LOG_IOPORT, "ioport",
1399 "show all i/o ports accesses" },
8e3a9fd2 1400#endif
f193c797
FB
1401 { 0, NULL, NULL },
1402};
1403
1404static int cmp1(const char *s1, int n, const char *s2)
1405{
1406 if (strlen(s2) != n)
1407 return 0;
1408 return memcmp(s1, s2, n) == 0;
1409}
3b46e624 1410
f193c797
FB
1411/* takes a comma separated list of log masks. Return 0 if error. */
1412int cpu_str_to_log_mask(const char *str)
1413{
1414 CPULogItem *item;
1415 int mask;
1416 const char *p, *p1;
1417
1418 p = str;
1419 mask = 0;
1420 for(;;) {
1421 p1 = strchr(p, ',');
1422 if (!p1)
1423 p1 = p + strlen(p);
8e3a9fd2
FB
1424 if(cmp1(p,p1-p,"all")) {
1425 for(item = cpu_log_items; item->mask != 0; item++) {
1426 mask |= item->mask;
1427 }
1428 } else {
f193c797
FB
1429 for(item = cpu_log_items; item->mask != 0; item++) {
1430 if (cmp1(p, p1 - p, item->name))
1431 goto found;
1432 }
1433 return 0;
8e3a9fd2 1434 }
f193c797
FB
1435 found:
1436 mask |= item->mask;
1437 if (*p1 != ',')
1438 break;
1439 p = p1 + 1;
1440 }
1441 return mask;
1442}
ea041c0e 1443
7501267e
FB
1444void cpu_abort(CPUState *env, const char *fmt, ...)
1445{
1446 va_list ap;
493ae1f0 1447 va_list ap2;
7501267e
FB
1448
1449 va_start(ap, fmt);
493ae1f0 1450 va_copy(ap2, ap);
7501267e
FB
1451 fprintf(stderr, "qemu: fatal: ");
1452 vfprintf(stderr, fmt, ap);
1453 fprintf(stderr, "\n");
1454#ifdef TARGET_I386
7fe48483
FB
1455 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1456#else
1457 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1458#endif
924edcae 1459 if (logfile) {
f9373291 1460 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1461 vfprintf(logfile, fmt, ap2);
f9373291
JM
1462 fprintf(logfile, "\n");
1463#ifdef TARGET_I386
1464 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1465#else
1466 cpu_dump_state(env, logfile, fprintf, 0);
1467#endif
924edcae
AZ
1468 fflush(logfile);
1469 fclose(logfile);
1470 }
493ae1f0 1471 va_end(ap2);
f9373291 1472 va_end(ap);
7501267e
FB
1473 abort();
1474}
1475
c5be9f08
TS
1476CPUState *cpu_copy(CPUState *env)
1477{
01ba9816 1478 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1479 /* preserve chaining and index */
1480 CPUState *next_cpu = new_env->next_cpu;
1481 int cpu_index = new_env->cpu_index;
1482 memcpy(new_env, env, sizeof(CPUState));
1483 new_env->next_cpu = next_cpu;
1484 new_env->cpu_index = cpu_index;
1485 return new_env;
1486}
1487
0124311e
FB
1488#if !defined(CONFIG_USER_ONLY)
1489
5c751e99
EI
1490static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1491{
1492 unsigned int i;
1493
1494 /* Discard jump cache entries for any tb which might potentially
1495 overlap the flushed page. */
1496 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1497 memset (&env->tb_jmp_cache[i], 0,
1498 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1499
1500 i = tb_jmp_cache_hash_page(addr);
1501 memset (&env->tb_jmp_cache[i], 0,
1502 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1503}
1504
ee8b7021
FB
1505/* NOTE: if flush_global is true, also flush global entries (not
1506 implemented yet) */
1507void tlb_flush(CPUState *env, int flush_global)
33417e70 1508{
33417e70 1509 int i;
0124311e 1510
9fa3e853
FB
1511#if defined(DEBUG_TLB)
1512 printf("tlb_flush:\n");
1513#endif
0124311e
FB
1514 /* must reset current TB so that interrupts cannot modify the
1515 links while we are modifying them */
1516 env->current_tb = NULL;
1517
33417e70 1518 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1519 env->tlb_table[0][i].addr_read = -1;
1520 env->tlb_table[0][i].addr_write = -1;
1521 env->tlb_table[0][i].addr_code = -1;
1522 env->tlb_table[1][i].addr_read = -1;
1523 env->tlb_table[1][i].addr_write = -1;
1524 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1525#if (NB_MMU_MODES >= 3)
1526 env->tlb_table[2][i].addr_read = -1;
1527 env->tlb_table[2][i].addr_write = -1;
1528 env->tlb_table[2][i].addr_code = -1;
1529#if (NB_MMU_MODES == 4)
1530 env->tlb_table[3][i].addr_read = -1;
1531 env->tlb_table[3][i].addr_write = -1;
1532 env->tlb_table[3][i].addr_code = -1;
1533#endif
1534#endif
33417e70 1535 }
9fa3e853 1536
8a40a180 1537 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1538
1539#if !defined(CONFIG_SOFTMMU)
1540 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1541#endif
1542#ifdef USE_KQEMU
1543 if (env->kqemu_enabled) {
1544 kqemu_flush(env, flush_global);
1545 }
9fa3e853 1546#endif
e3db7226 1547 tlb_flush_count++;
33417e70
FB
1548}
1549
274da6b2 1550static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1551{
5fafdf24 1552 if (addr == (tlb_entry->addr_read &
84b7b8e7 1553 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1554 addr == (tlb_entry->addr_write &
84b7b8e7 1555 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1556 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1557 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1558 tlb_entry->addr_read = -1;
1559 tlb_entry->addr_write = -1;
1560 tlb_entry->addr_code = -1;
1561 }
61382a50
FB
1562}
1563
2e12669a 1564void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1565{
8a40a180 1566 int i;
0124311e 1567
9fa3e853 1568#if defined(DEBUG_TLB)
108c49b8 1569 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1570#endif
0124311e
FB
1571 /* must reset current TB so that interrupts cannot modify the
1572 links while we are modifying them */
1573 env->current_tb = NULL;
61382a50
FB
1574
1575 addr &= TARGET_PAGE_MASK;
1576 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1577 tlb_flush_entry(&env->tlb_table[0][i], addr);
1578 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1579#if (NB_MMU_MODES >= 3)
1580 tlb_flush_entry(&env->tlb_table[2][i], addr);
1581#if (NB_MMU_MODES == 4)
1582 tlb_flush_entry(&env->tlb_table[3][i], addr);
1583#endif
1584#endif
0124311e 1585
5c751e99 1586 tlb_flush_jmp_cache(env, addr);
9fa3e853 1587
0124311e 1588#if !defined(CONFIG_SOFTMMU)
9fa3e853 1589 if (addr < MMAP_AREA_END)
0124311e 1590 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1591#endif
0a962c02
FB
1592#ifdef USE_KQEMU
1593 if (env->kqemu_enabled) {
1594 kqemu_flush_page(env, addr);
1595 }
1596#endif
9fa3e853
FB
1597}
1598
9fa3e853
FB
1599/* update the TLBs so that writes to code in the virtual page 'addr'
1600 can be detected */
6a00d601 1601static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1602{
5fafdf24 1603 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1604 ram_addr + TARGET_PAGE_SIZE,
1605 CODE_DIRTY_FLAG);
9fa3e853
FB
1606}
1607
9fa3e853 1608/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1609 tested for self modifying code */
5fafdf24 1610static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1611 target_ulong vaddr)
9fa3e853 1612{
3a7d929e 1613 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1614}
1615
5fafdf24 1616static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1617 unsigned long start, unsigned long length)
1618{
1619 unsigned long addr;
84b7b8e7
FB
1620 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1621 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1622 if ((addr - start) < length) {
84b7b8e7 1623 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1624 }
1625 }
1626}
1627
3a7d929e 1628void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1629 int dirty_flags)
1ccde1cb
FB
1630{
1631 CPUState *env;
4f2ac237 1632 unsigned long length, start1;
0a962c02
FB
1633 int i, mask, len;
1634 uint8_t *p;
1ccde1cb
FB
1635
1636 start &= TARGET_PAGE_MASK;
1637 end = TARGET_PAGE_ALIGN(end);
1638
1639 length = end - start;
1640 if (length == 0)
1641 return;
0a962c02 1642 len = length >> TARGET_PAGE_BITS;
3a7d929e 1643#ifdef USE_KQEMU
6a00d601
FB
1644 /* XXX: should not depend on cpu context */
1645 env = first_cpu;
3a7d929e 1646 if (env->kqemu_enabled) {
f23db169
FB
1647 ram_addr_t addr;
1648 addr = start;
1649 for(i = 0; i < len; i++) {
1650 kqemu_set_notdirty(env, addr);
1651 addr += TARGET_PAGE_SIZE;
1652 }
3a7d929e
FB
1653 }
1654#endif
f23db169
FB
1655 mask = ~dirty_flags;
1656 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1657 for(i = 0; i < len; i++)
1658 p[i] &= mask;
1659
1ccde1cb
FB
1660 /* we modify the TLB cache so that the dirty bit will be set again
1661 when accessing the range */
59817ccb 1662 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1663 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1664 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1665 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1666 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1667 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1668#if (NB_MMU_MODES >= 3)
1669 for(i = 0; i < CPU_TLB_SIZE; i++)
1670 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1671#if (NB_MMU_MODES == 4)
1672 for(i = 0; i < CPU_TLB_SIZE; i++)
1673 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1674#endif
1675#endif
6a00d601 1676 }
59817ccb
FB
1677
1678#if !defined(CONFIG_SOFTMMU)
1679 /* XXX: this is expensive */
1680 {
1681 VirtPageDesc *p;
1682 int j;
1683 target_ulong addr;
1684
1685 for(i = 0; i < L1_SIZE; i++) {
1686 p = l1_virt_map[i];
1687 if (p) {
1688 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1689 for(j = 0; j < L2_SIZE; j++) {
1690 if (p->valid_tag == virt_valid_tag &&
1691 p->phys_addr >= start && p->phys_addr < end &&
1692 (p->prot & PROT_WRITE)) {
1693 if (addr < MMAP_AREA_END) {
5fafdf24 1694 mprotect((void *)addr, TARGET_PAGE_SIZE,
59817ccb
FB
1695 p->prot & ~PROT_WRITE);
1696 }
1697 }
1698 addr += TARGET_PAGE_SIZE;
1699 p++;
1700 }
1701 }
1702 }
1703 }
1704#endif
1ccde1cb
FB
1705}
1706
3a7d929e
FB
1707static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1708{
1709 ram_addr_t ram_addr;
1710
84b7b8e7 1711 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1712 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1713 tlb_entry->addend - (unsigned long)phys_ram_base;
1714 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1715 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1716 }
1717 }
1718}
1719
1720/* update the TLB according to the current state of the dirty bits */
1721void cpu_tlb_update_dirty(CPUState *env)
1722{
1723 int i;
1724 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1725 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1726 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1727 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1728#if (NB_MMU_MODES >= 3)
1729 for(i = 0; i < CPU_TLB_SIZE; i++)
1730 tlb_update_dirty(&env->tlb_table[2][i]);
1731#if (NB_MMU_MODES == 4)
1732 for(i = 0; i < CPU_TLB_SIZE; i++)
1733 tlb_update_dirty(&env->tlb_table[3][i]);
1734#endif
1735#endif
3a7d929e
FB
1736}
1737
5fafdf24 1738static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1739 unsigned long start)
1ccde1cb
FB
1740{
1741 unsigned long addr;
84b7b8e7
FB
1742 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1743 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1744 if (addr == start) {
84b7b8e7 1745 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1746 }
1747 }
1748}
1749
1750/* update the TLB corresponding to virtual page vaddr and phys addr
1751 addr so that it is no longer dirty */
6a00d601
FB
1752static inline void tlb_set_dirty(CPUState *env,
1753 unsigned long addr, target_ulong vaddr)
1ccde1cb 1754{
1ccde1cb
FB
1755 int i;
1756
1ccde1cb
FB
1757 addr &= TARGET_PAGE_MASK;
1758 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1759 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1760 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1761#if (NB_MMU_MODES >= 3)
1762 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1763#if (NB_MMU_MODES == 4)
1764 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1765#endif
1766#endif
9fa3e853
FB
1767}
1768
59817ccb
FB
1769/* add a new TLB entry. At most one entry for a given virtual address
1770 is permitted. Return 0 if OK or 2 if the page could not be mapped
1771 (can only happen in non SOFTMMU mode for I/O pages or pages
1772 conflicting with the host address space). */
5fafdf24
TS
1773int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1774 target_phys_addr_t paddr, int prot,
6ebbf390 1775 int mmu_idx, int is_softmmu)
9fa3e853 1776{
92e873b9 1777 PhysPageDesc *p;
4f2ac237 1778 unsigned long pd;
9fa3e853 1779 unsigned int index;
4f2ac237 1780 target_ulong address;
108c49b8 1781 target_phys_addr_t addend;
9fa3e853 1782 int ret;
84b7b8e7 1783 CPUTLBEntry *te;
6658ffb8 1784 int i;
9fa3e853 1785
92e873b9 1786 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1787 if (!p) {
1788 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1789 } else {
1790 pd = p->phys_offset;
9fa3e853
FB
1791 }
1792#if defined(DEBUG_TLB)
6ebbf390
JM
1793 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1794 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1795#endif
1796
1797 ret = 0;
1798#if !defined(CONFIG_SOFTMMU)
5fafdf24 1799 if (is_softmmu)
9fa3e853
FB
1800#endif
1801 {
2a4188a3 1802 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1803 /* IO memory case */
1804 address = vaddr | pd;
1805 addend = paddr;
1806 } else {
1807 /* standard memory */
1808 address = vaddr;
1809 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1810 }
6658ffb8
PB
1811
1812 /* Make accesses to pages with watchpoints go via the
1813 watchpoint trap routines. */
1814 for (i = 0; i < env->nb_watchpoints; i++) {
1815 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1816 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1817 env->watchpoint[i].addend = 0;
6658ffb8
PB
1818 address = vaddr | io_mem_watch;
1819 } else {
d79acba4
AZ
1820 env->watchpoint[i].addend = pd - paddr +
1821 (unsigned long) phys_ram_base;
6658ffb8
PB
1822 /* TODO: Figure out how to make read watchpoints coexist
1823 with code. */
1824 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1825 }
1826 }
1827 }
d79acba4 1828
90f18422 1829 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1830 addend -= vaddr;
6ebbf390 1831 te = &env->tlb_table[mmu_idx][index];
84b7b8e7 1832 te->addend = addend;
67b915a5 1833 if (prot & PAGE_READ) {
84b7b8e7
FB
1834 te->addr_read = address;
1835 } else {
1836 te->addr_read = -1;
1837 }
5c751e99 1838
84b7b8e7
FB
1839 if (prot & PAGE_EXEC) {
1840 te->addr_code = address;
9fa3e853 1841 } else {
84b7b8e7 1842 te->addr_code = -1;
9fa3e853 1843 }
67b915a5 1844 if (prot & PAGE_WRITE) {
5fafdf24 1845 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
856074ec
FB
1846 (pd & IO_MEM_ROMD)) {
1847 /* write access calls the I/O callback */
5fafdf24 1848 te->addr_write = vaddr |
856074ec 1849 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
5fafdf24 1850 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1851 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1852 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1853 } else {
84b7b8e7 1854 te->addr_write = address;
9fa3e853
FB
1855 }
1856 } else {
84b7b8e7 1857 te->addr_write = -1;
9fa3e853
FB
1858 }
1859 }
1860#if !defined(CONFIG_SOFTMMU)
1861 else {
1862 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1863 /* IO access: no mapping is done as it will be handled by the
1864 soft MMU */
1865 if (!(env->hflags & HF_SOFTMMU_MASK))
1866 ret = 2;
1867 } else {
1868 void *map_addr;
59817ccb
FB
1869
1870 if (vaddr >= MMAP_AREA_END) {
1871 ret = 2;
1872 } else {
1873 if (prot & PROT_WRITE) {
5fafdf24 1874 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1875#if defined(TARGET_HAS_SMC) || 1
59817ccb 1876 first_tb ||
d720b93d 1877#endif
5fafdf24 1878 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
59817ccb
FB
1879 !cpu_physical_memory_is_dirty(pd))) {
1880 /* ROM: we do as if code was inside */
1881 /* if code is present, we only map as read only and save the
1882 original mapping */
1883 VirtPageDesc *vp;
3b46e624 1884
90f18422 1885 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1886 vp->phys_addr = pd;
1887 vp->prot = prot;
1888 vp->valid_tag = virt_valid_tag;
1889 prot &= ~PAGE_WRITE;
1890 }
1891 }
5fafdf24 1892 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
59817ccb
FB
1893 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1894 if (map_addr == MAP_FAILED) {
1895 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1896 paddr, vaddr);
9fa3e853 1897 }
9fa3e853
FB
1898 }
1899 }
1900 }
1901#endif
1902 return ret;
1903}
1904
1905/* called from signal handler: invalidate the code and unprotect the
1906 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1907int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1908{
1909#if !defined(CONFIG_SOFTMMU)
1910 VirtPageDesc *vp;
1911
1912#if defined(DEBUG_TLB)
1913 printf("page_unprotect: addr=0x%08x\n", addr);
1914#endif
1915 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1916
1917 /* if it is not mapped, no need to worry here */
1918 if (addr >= MMAP_AREA_END)
1919 return 0;
9fa3e853
FB
1920 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1921 if (!vp)
1922 return 0;
1923 /* NOTE: in this case, validate_tag is _not_ tested as it
1924 validates only the code TLB */
1925 if (vp->valid_tag != virt_valid_tag)
1926 return 0;
1927 if (!(vp->prot & PAGE_WRITE))
1928 return 0;
1929#if defined(DEBUG_TLB)
5fafdf24 1930 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
9fa3e853
FB
1931 addr, vp->phys_addr, vp->prot);
1932#endif
59817ccb
FB
1933 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1934 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1935 (unsigned long)addr, vp->prot);
d720b93d 1936 /* set the dirty bit */
0a962c02 1937 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1938 /* flush the code inside */
1939 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1940 return 1;
1941#else
1942 return 0;
1943#endif
33417e70
FB
1944}
1945
0124311e
FB
1946#else
1947
ee8b7021 1948void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1949{
1950}
1951
2e12669a 1952void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1953{
1954}
1955
5fafdf24
TS
1956int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1957 target_phys_addr_t paddr, int prot,
6ebbf390 1958 int mmu_idx, int is_softmmu)
9fa3e853
FB
1959{
1960 return 0;
1961}
0124311e 1962
9fa3e853
FB
1963/* dump memory mappings */
1964void page_dump(FILE *f)
33417e70 1965{
9fa3e853
FB
1966 unsigned long start, end;
1967 int i, j, prot, prot1;
1968 PageDesc *p;
33417e70 1969
9fa3e853
FB
1970 fprintf(f, "%-8s %-8s %-8s %s\n",
1971 "start", "end", "size", "prot");
1972 start = -1;
1973 end = -1;
1974 prot = 0;
1975 for(i = 0; i <= L1_SIZE; i++) {
1976 if (i < L1_SIZE)
1977 p = l1_map[i];
1978 else
1979 p = NULL;
1980 for(j = 0;j < L2_SIZE; j++) {
1981 if (!p)
1982 prot1 = 0;
1983 else
1984 prot1 = p[j].flags;
1985 if (prot1 != prot) {
1986 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1987 if (start != -1) {
1988 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1989 start, end, end - start,
9fa3e853
FB
1990 prot & PAGE_READ ? 'r' : '-',
1991 prot & PAGE_WRITE ? 'w' : '-',
1992 prot & PAGE_EXEC ? 'x' : '-');
1993 }
1994 if (prot1 != 0)
1995 start = end;
1996 else
1997 start = -1;
1998 prot = prot1;
1999 }
2000 if (!p)
2001 break;
2002 }
33417e70 2003 }
33417e70
FB
2004}
2005
53a5960a 2006int page_get_flags(target_ulong address)
33417e70 2007{
9fa3e853
FB
2008 PageDesc *p;
2009
2010 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2011 if (!p)
9fa3e853
FB
2012 return 0;
2013 return p->flags;
2014}
2015
2016/* modify the flags of a page and invalidate the code if
2017 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2018 depending on PAGE_WRITE */
53a5960a 2019void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2020{
2021 PageDesc *p;
53a5960a 2022 target_ulong addr;
9fa3e853 2023
c8a706fe 2024 /* mmap_lock should already be held. */
9fa3e853
FB
2025 start = start & TARGET_PAGE_MASK;
2026 end = TARGET_PAGE_ALIGN(end);
2027 if (flags & PAGE_WRITE)
2028 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2029 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2030 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2031 /* if the write protection is set, then we invalidate the code
2032 inside */
5fafdf24 2033 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2034 (flags & PAGE_WRITE) &&
2035 p->first_tb) {
d720b93d 2036 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2037 }
2038 p->flags = flags;
2039 }
33417e70
FB
2040}
2041
3d97b40b
TS
2042int page_check_range(target_ulong start, target_ulong len, int flags)
2043{
2044 PageDesc *p;
2045 target_ulong end;
2046 target_ulong addr;
2047
2048 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2049 start = start & TARGET_PAGE_MASK;
2050
2051 if( end < start )
2052 /* we've wrapped around */
2053 return -1;
2054 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2055 p = page_find(addr >> TARGET_PAGE_BITS);
2056 if( !p )
2057 return -1;
2058 if( !(p->flags & PAGE_VALID) )
2059 return -1;
2060
dae3270c 2061 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2062 return -1;
dae3270c
FB
2063 if (flags & PAGE_WRITE) {
2064 if (!(p->flags & PAGE_WRITE_ORG))
2065 return -1;
2066 /* unprotect the page if it was put read-only because it
2067 contains translated code */
2068 if (!(p->flags & PAGE_WRITE)) {
2069 if (!page_unprotect(addr, 0, NULL))
2070 return -1;
2071 }
2072 return 0;
2073 }
3d97b40b
TS
2074 }
2075 return 0;
2076}
2077
9fa3e853
FB
2078/* called from signal handler: invalidate the code and unprotect the
2079 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2080int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2081{
2082 unsigned int page_index, prot, pindex;
2083 PageDesc *p, *p1;
53a5960a 2084 target_ulong host_start, host_end, addr;
9fa3e853 2085
c8a706fe
PB
2086 /* Technically this isn't safe inside a signal handler. However we
2087 know this only ever happens in a synchronous SEGV handler, so in
2088 practice it seems to be ok. */
2089 mmap_lock();
2090
83fb7adf 2091 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2092 page_index = host_start >> TARGET_PAGE_BITS;
2093 p1 = page_find(page_index);
c8a706fe
PB
2094 if (!p1) {
2095 mmap_unlock();
9fa3e853 2096 return 0;
c8a706fe 2097 }
83fb7adf 2098 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2099 p = p1;
2100 prot = 0;
2101 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2102 prot |= p->flags;
2103 p++;
2104 }
2105 /* if the page was really writable, then we change its
2106 protection back to writable */
2107 if (prot & PAGE_WRITE_ORG) {
2108 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2109 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2110 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2111 (prot & PAGE_BITS) | PAGE_WRITE);
2112 p1[pindex].flags |= PAGE_WRITE;
2113 /* and since the content will be modified, we must invalidate
2114 the corresponding translated code. */
d720b93d 2115 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2116#ifdef DEBUG_TB_CHECK
2117 tb_invalidate_check(address);
2118#endif
c8a706fe 2119 mmap_unlock();
9fa3e853
FB
2120 return 1;
2121 }
2122 }
c8a706fe 2123 mmap_unlock();
9fa3e853
FB
2124 return 0;
2125}
2126
6a00d601
FB
2127static inline void tlb_set_dirty(CPUState *env,
2128 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2129{
2130}
9fa3e853
FB
2131#endif /* defined(CONFIG_USER_ONLY) */
2132
db7b5426 2133static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2134 ram_addr_t memory);
2135static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2136 ram_addr_t orig_memory);
db7b5426
BS
2137#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2138 need_subpage) \
2139 do { \
2140 if (addr > start_addr) \
2141 start_addr2 = 0; \
2142 else { \
2143 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2144 if (start_addr2 > 0) \
2145 need_subpage = 1; \
2146 } \
2147 \
49e9fba2 2148 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2149 end_addr2 = TARGET_PAGE_SIZE - 1; \
2150 else { \
2151 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2152 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2153 need_subpage = 1; \
2154 } \
2155 } while (0)
2156
33417e70
FB
2157/* register physical memory. 'size' must be a multiple of the target
2158 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2159 io memory page */
5fafdf24 2160void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2161 ram_addr_t size,
2162 ram_addr_t phys_offset)
33417e70 2163{
108c49b8 2164 target_phys_addr_t addr, end_addr;
92e873b9 2165 PhysPageDesc *p;
9d42037b 2166 CPUState *env;
00f82b8a 2167 ram_addr_t orig_size = size;
db7b5426 2168 void *subpage;
33417e70 2169
da260249
FB
2170#ifdef USE_KQEMU
2171 /* XXX: should not depend on cpu context */
2172 env = first_cpu;
2173 if (env->kqemu_enabled) {
2174 kqemu_set_phys_mem(start_addr, size, phys_offset);
2175 }
2176#endif
5fd386f6 2177 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2178 end_addr = start_addr + (target_phys_addr_t)size;
2179 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2180 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2181 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2182 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2183 target_phys_addr_t start_addr2, end_addr2;
2184 int need_subpage = 0;
2185
2186 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2187 need_subpage);
4254fab8 2188 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2189 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2190 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2191 &p->phys_offset, orig_memory);
2192 } else {
2193 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2194 >> IO_MEM_SHIFT];
2195 }
2196 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2197 } else {
2198 p->phys_offset = phys_offset;
2199 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2200 (phys_offset & IO_MEM_ROMD))
2201 phys_offset += TARGET_PAGE_SIZE;
2202 }
2203 } else {
2204 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2205 p->phys_offset = phys_offset;
2206 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2207 (phys_offset & IO_MEM_ROMD))
2208 phys_offset += TARGET_PAGE_SIZE;
2209 else {
2210 target_phys_addr_t start_addr2, end_addr2;
2211 int need_subpage = 0;
2212
2213 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2214 end_addr2, need_subpage);
2215
4254fab8 2216 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2217 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2218 &p->phys_offset, IO_MEM_UNASSIGNED);
2219 subpage_register(subpage, start_addr2, end_addr2,
2220 phys_offset);
2221 }
2222 }
2223 }
33417e70 2224 }
3b46e624 2225
9d42037b
FB
2226 /* since each CPU stores ram addresses in its TLB cache, we must
2227 reset the modified entries */
2228 /* XXX: slow ! */
2229 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2230 tlb_flush(env, 1);
2231 }
33417e70
FB
2232}
2233
ba863458 2234/* XXX: temporary until new memory mapping API */
00f82b8a 2235ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2236{
2237 PhysPageDesc *p;
2238
2239 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2240 if (!p)
2241 return IO_MEM_UNASSIGNED;
2242 return p->phys_offset;
2243}
2244
e9a1ab19 2245/* XXX: better than nothing */
00f82b8a 2246ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2247{
2248 ram_addr_t addr;
7fb4fdcf 2249 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
ed441467
FB
2250 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2251 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2252 abort();
2253 }
2254 addr = phys_ram_alloc_offset;
2255 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2256 return addr;
2257}
2258
2259void qemu_ram_free(ram_addr_t addr)
2260{
2261}
2262
a4193c8a 2263static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2264{
67d3b957 2265#ifdef DEBUG_UNASSIGNED
ab3d1727 2266 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2267#endif
2268#ifdef TARGET_SPARC
6c36d3fa 2269 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2270#elif TARGET_CRIS
2271 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2272#endif
33417e70
FB
2273 return 0;
2274}
2275
a4193c8a 2276static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2277{
67d3b957 2278#ifdef DEBUG_UNASSIGNED
ab3d1727 2279 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2280#endif
b4f0a316 2281#ifdef TARGET_SPARC
6c36d3fa 2282 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2283#elif TARGET_CRIS
2284 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2285#endif
33417e70
FB
2286}
2287
2288static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2289 unassigned_mem_readb,
2290 unassigned_mem_readb,
2291 unassigned_mem_readb,
2292};
2293
2294static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2295 unassigned_mem_writeb,
2296 unassigned_mem_writeb,
2297 unassigned_mem_writeb,
2298};
2299
3a7d929e 2300static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2301{
3a7d929e
FB
2302 unsigned long ram_addr;
2303 int dirty_flags;
2304 ram_addr = addr - (unsigned long)phys_ram_base;
2305 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2306 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2307#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2308 tb_invalidate_phys_page_fast(ram_addr, 1);
2309 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2310#endif
3a7d929e 2311 }
c27004ec 2312 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2313#ifdef USE_KQEMU
2314 if (cpu_single_env->kqemu_enabled &&
2315 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2316 kqemu_modify_page(cpu_single_env, ram_addr);
2317#endif
f23db169
FB
2318 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2319 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2320 /* we remove the notdirty callback only if the code has been
2321 flushed */
2322 if (dirty_flags == 0xff)
6a00d601 2323 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2324}
2325
3a7d929e 2326static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2327{
3a7d929e
FB
2328 unsigned long ram_addr;
2329 int dirty_flags;
2330 ram_addr = addr - (unsigned long)phys_ram_base;
2331 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2332 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2333#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2334 tb_invalidate_phys_page_fast(ram_addr, 2);
2335 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2336#endif
3a7d929e 2337 }
c27004ec 2338 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2339#ifdef USE_KQEMU
2340 if (cpu_single_env->kqemu_enabled &&
2341 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2342 kqemu_modify_page(cpu_single_env, ram_addr);
2343#endif
f23db169
FB
2344 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2345 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2346 /* we remove the notdirty callback only if the code has been
2347 flushed */
2348 if (dirty_flags == 0xff)
6a00d601 2349 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2350}
2351
3a7d929e 2352static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2353{
3a7d929e
FB
2354 unsigned long ram_addr;
2355 int dirty_flags;
2356 ram_addr = addr - (unsigned long)phys_ram_base;
2357 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2358 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2359#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2360 tb_invalidate_phys_page_fast(ram_addr, 4);
2361 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2362#endif
3a7d929e 2363 }
c27004ec 2364 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2365#ifdef USE_KQEMU
2366 if (cpu_single_env->kqemu_enabled &&
2367 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2368 kqemu_modify_page(cpu_single_env, ram_addr);
2369#endif
f23db169
FB
2370 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2371 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2372 /* we remove the notdirty callback only if the code has been
2373 flushed */
2374 if (dirty_flags == 0xff)
6a00d601 2375 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2376}
2377
3a7d929e 2378static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2379 NULL, /* never used */
2380 NULL, /* never used */
2381 NULL, /* never used */
2382};
2383
1ccde1cb
FB
2384static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2385 notdirty_mem_writeb,
2386 notdirty_mem_writew,
2387 notdirty_mem_writel,
2388};
2389
6658ffb8
PB
2390#if defined(CONFIG_SOFTMMU)
2391/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2392 so these check for a hit then pass through to the normal out-of-line
2393 phys routines. */
2394static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2395{
2396 return ldub_phys(addr);
2397}
2398
2399static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2400{
2401 return lduw_phys(addr);
2402}
2403
2404static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2405{
2406 return ldl_phys(addr);
2407}
2408
2409/* Generate a debug exception if a watchpoint has been hit.
2410 Returns the real physical address of the access. addr will be a host
d79acba4 2411 address in case of a RAM location. */
6658ffb8
PB
2412static target_ulong check_watchpoint(target_phys_addr_t addr)
2413{
2414 CPUState *env = cpu_single_env;
2415 target_ulong watch;
2416 target_ulong retaddr;
2417 int i;
2418
2419 retaddr = addr;
2420 for (i = 0; i < env->nb_watchpoints; i++) {
2421 watch = env->watchpoint[i].vaddr;
2422 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2423 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2424 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2425 cpu_single_env->watchpoint_hit = i + 1;
2426 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2427 break;
2428 }
2429 }
2430 }
2431 return retaddr;
2432}
2433
2434static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2435 uint32_t val)
2436{
2437 addr = check_watchpoint(addr);
2438 stb_phys(addr, val);
2439}
2440
2441static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2442 uint32_t val)
2443{
2444 addr = check_watchpoint(addr);
2445 stw_phys(addr, val);
2446}
2447
2448static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2449 uint32_t val)
2450{
2451 addr = check_watchpoint(addr);
2452 stl_phys(addr, val);
2453}
2454
2455static CPUReadMemoryFunc *watch_mem_read[3] = {
2456 watch_mem_readb,
2457 watch_mem_readw,
2458 watch_mem_readl,
2459};
2460
2461static CPUWriteMemoryFunc *watch_mem_write[3] = {
2462 watch_mem_writeb,
2463 watch_mem_writew,
2464 watch_mem_writel,
2465};
2466#endif
2467
db7b5426
BS
2468static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2469 unsigned int len)
2470{
db7b5426
BS
2471 uint32_t ret;
2472 unsigned int idx;
2473
2474 idx = SUBPAGE_IDX(addr - mmio->base);
2475#if defined(DEBUG_SUBPAGE)
2476 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2477 mmio, len, addr, idx);
2478#endif
3ee89922 2479 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2480
2481 return ret;
2482}
2483
2484static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2485 uint32_t value, unsigned int len)
2486{
db7b5426
BS
2487 unsigned int idx;
2488
2489 idx = SUBPAGE_IDX(addr - mmio->base);
2490#if defined(DEBUG_SUBPAGE)
2491 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2492 mmio, len, addr, idx, value);
2493#endif
3ee89922 2494 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2495}
2496
2497static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2498{
2499#if defined(DEBUG_SUBPAGE)
2500 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2501#endif
2502
2503 return subpage_readlen(opaque, addr, 0);
2504}
2505
2506static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2507 uint32_t value)
2508{
2509#if defined(DEBUG_SUBPAGE)
2510 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2511#endif
2512 subpage_writelen(opaque, addr, value, 0);
2513}
2514
2515static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2516{
2517#if defined(DEBUG_SUBPAGE)
2518 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2519#endif
2520
2521 return subpage_readlen(opaque, addr, 1);
2522}
2523
2524static void subpage_writew (void *opaque, target_phys_addr_t addr,
2525 uint32_t value)
2526{
2527#if defined(DEBUG_SUBPAGE)
2528 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2529#endif
2530 subpage_writelen(opaque, addr, value, 1);
2531}
2532
2533static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2534{
2535#if defined(DEBUG_SUBPAGE)
2536 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2537#endif
2538
2539 return subpage_readlen(opaque, addr, 2);
2540}
2541
2542static void subpage_writel (void *opaque,
2543 target_phys_addr_t addr, uint32_t value)
2544{
2545#if defined(DEBUG_SUBPAGE)
2546 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2547#endif
2548 subpage_writelen(opaque, addr, value, 2);
2549}
2550
2551static CPUReadMemoryFunc *subpage_read[] = {
2552 &subpage_readb,
2553 &subpage_readw,
2554 &subpage_readl,
2555};
2556
2557static CPUWriteMemoryFunc *subpage_write[] = {
2558 &subpage_writeb,
2559 &subpage_writew,
2560 &subpage_writel,
2561};
2562
2563static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2564 ram_addr_t memory)
db7b5426
BS
2565{
2566 int idx, eidx;
4254fab8 2567 unsigned int i;
db7b5426
BS
2568
2569 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2570 return -1;
2571 idx = SUBPAGE_IDX(start);
2572 eidx = SUBPAGE_IDX(end);
2573#if defined(DEBUG_SUBPAGE)
2574 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2575 mmio, start, end, idx, eidx, memory);
2576#endif
2577 memory >>= IO_MEM_SHIFT;
2578 for (; idx <= eidx; idx++) {
4254fab8 2579 for (i = 0; i < 4; i++) {
3ee89922
BS
2580 if (io_mem_read[memory][i]) {
2581 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2582 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2583 }
2584 if (io_mem_write[memory][i]) {
2585 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2586 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2587 }
4254fab8 2588 }
db7b5426
BS
2589 }
2590
2591 return 0;
2592}
2593
00f82b8a
AJ
2594static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2595 ram_addr_t orig_memory)
db7b5426
BS
2596{
2597 subpage_t *mmio;
2598 int subpage_memory;
2599
2600 mmio = qemu_mallocz(sizeof(subpage_t));
2601 if (mmio != NULL) {
2602 mmio->base = base;
2603 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2604#if defined(DEBUG_SUBPAGE)
2605 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2606 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2607#endif
2608 *phys = subpage_memory | IO_MEM_SUBPAGE;
2609 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2610 }
2611
2612 return mmio;
2613}
2614
33417e70
FB
2615static void io_mem_init(void)
2616{
3a7d929e 2617 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2618 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2619 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2620 io_mem_nb = 5;
2621
6658ffb8
PB
2622#if defined(CONFIG_SOFTMMU)
2623 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2624 watch_mem_write, NULL);
2625#endif
1ccde1cb 2626 /* alloc dirty bits array */
0a962c02 2627 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2628 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2629}
2630
2631/* mem_read and mem_write are arrays of functions containing the
2632 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2633 2). Functions can be omitted with a NULL function pointer. The
2634 registered functions may be modified dynamically later.
2635 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2636 modified. If it is zero, a new io zone is allocated. The return
2637 value can be used with cpu_register_physical_memory(). (-1) is
2638 returned if error. */
33417e70
FB
2639int cpu_register_io_memory(int io_index,
2640 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2641 CPUWriteMemoryFunc **mem_write,
2642 void *opaque)
33417e70 2643{
4254fab8 2644 int i, subwidth = 0;
33417e70
FB
2645
2646 if (io_index <= 0) {
b5ff1b31 2647 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2648 return -1;
2649 io_index = io_mem_nb++;
2650 } else {
2651 if (io_index >= IO_MEM_NB_ENTRIES)
2652 return -1;
2653 }
b5ff1b31 2654
33417e70 2655 for(i = 0;i < 3; i++) {
4254fab8
BS
2656 if (!mem_read[i] || !mem_write[i])
2657 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2658 io_mem_read[io_index][i] = mem_read[i];
2659 io_mem_write[io_index][i] = mem_write[i];
2660 }
a4193c8a 2661 io_mem_opaque[io_index] = opaque;
4254fab8 2662 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2663}
61382a50 2664
8926b517
FB
2665CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2666{
2667 return io_mem_write[io_index >> IO_MEM_SHIFT];
2668}
2669
2670CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2671{
2672 return io_mem_read[io_index >> IO_MEM_SHIFT];
2673}
2674
13eb76e0
FB
2675/* physical memory access (slow version, mainly for debug) */
2676#if defined(CONFIG_USER_ONLY)
5fafdf24 2677void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2678 int len, int is_write)
2679{
2680 int l, flags;
2681 target_ulong page;
53a5960a 2682 void * p;
13eb76e0
FB
2683
2684 while (len > 0) {
2685 page = addr & TARGET_PAGE_MASK;
2686 l = (page + TARGET_PAGE_SIZE) - addr;
2687 if (l > len)
2688 l = len;
2689 flags = page_get_flags(page);
2690 if (!(flags & PAGE_VALID))
2691 return;
2692 if (is_write) {
2693 if (!(flags & PAGE_WRITE))
2694 return;
579a97f7 2695 /* XXX: this code should not depend on lock_user */
72fb7daa 2696 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2697 /* FIXME - should this return an error rather than just fail? */
2698 return;
72fb7daa
AJ
2699 memcpy(p, buf, l);
2700 unlock_user(p, addr, l);
13eb76e0
FB
2701 } else {
2702 if (!(flags & PAGE_READ))
2703 return;
579a97f7 2704 /* XXX: this code should not depend on lock_user */
72fb7daa 2705 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2706 /* FIXME - should this return an error rather than just fail? */
2707 return;
72fb7daa 2708 memcpy(buf, p, l);
5b257578 2709 unlock_user(p, addr, 0);
13eb76e0
FB
2710 }
2711 len -= l;
2712 buf += l;
2713 addr += l;
2714 }
2715}
8df1cd07 2716
13eb76e0 2717#else
5fafdf24 2718void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2719 int len, int is_write)
2720{
2721 int l, io_index;
2722 uint8_t *ptr;
2723 uint32_t val;
2e12669a
FB
2724 target_phys_addr_t page;
2725 unsigned long pd;
92e873b9 2726 PhysPageDesc *p;
3b46e624 2727
13eb76e0
FB
2728 while (len > 0) {
2729 page = addr & TARGET_PAGE_MASK;
2730 l = (page + TARGET_PAGE_SIZE) - addr;
2731 if (l > len)
2732 l = len;
92e873b9 2733 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2734 if (!p) {
2735 pd = IO_MEM_UNASSIGNED;
2736 } else {
2737 pd = p->phys_offset;
2738 }
3b46e624 2739
13eb76e0 2740 if (is_write) {
3a7d929e 2741 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2742 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2743 /* XXX: could force cpu_single_env to NULL to avoid
2744 potential bugs */
13eb76e0 2745 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2746 /* 32 bit write access */
c27004ec 2747 val = ldl_p(buf);
a4193c8a 2748 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2749 l = 4;
2750 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2751 /* 16 bit write access */
c27004ec 2752 val = lduw_p(buf);
a4193c8a 2753 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2754 l = 2;
2755 } else {
1c213d19 2756 /* 8 bit write access */
c27004ec 2757 val = ldub_p(buf);
a4193c8a 2758 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2759 l = 1;
2760 }
2761 } else {
b448f2f3
FB
2762 unsigned long addr1;
2763 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2764 /* RAM case */
b448f2f3 2765 ptr = phys_ram_base + addr1;
13eb76e0 2766 memcpy(ptr, buf, l);
3a7d929e
FB
2767 if (!cpu_physical_memory_is_dirty(addr1)) {
2768 /* invalidate code */
2769 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2770 /* set dirty bit */
5fafdf24 2771 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2772 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2773 }
13eb76e0
FB
2774 }
2775 } else {
5fafdf24 2776 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2777 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2778 /* I/O case */
2779 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2780 if (l >= 4 && ((addr & 3) == 0)) {
2781 /* 32 bit read access */
a4193c8a 2782 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2783 stl_p(buf, val);
13eb76e0
FB
2784 l = 4;
2785 } else if (l >= 2 && ((addr & 1) == 0)) {
2786 /* 16 bit read access */
a4193c8a 2787 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2788 stw_p(buf, val);
13eb76e0
FB
2789 l = 2;
2790 } else {
1c213d19 2791 /* 8 bit read access */
a4193c8a 2792 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2793 stb_p(buf, val);
13eb76e0
FB
2794 l = 1;
2795 }
2796 } else {
2797 /* RAM case */
5fafdf24 2798 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2799 (addr & ~TARGET_PAGE_MASK);
2800 memcpy(buf, ptr, l);
2801 }
2802 }
2803 len -= l;
2804 buf += l;
2805 addr += l;
2806 }
2807}
8df1cd07 2808
d0ecd2aa 2809/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2810void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2811 const uint8_t *buf, int len)
2812{
2813 int l;
2814 uint8_t *ptr;
2815 target_phys_addr_t page;
2816 unsigned long pd;
2817 PhysPageDesc *p;
3b46e624 2818
d0ecd2aa
FB
2819 while (len > 0) {
2820 page = addr & TARGET_PAGE_MASK;
2821 l = (page + TARGET_PAGE_SIZE) - addr;
2822 if (l > len)
2823 l = len;
2824 p = phys_page_find(page >> TARGET_PAGE_BITS);
2825 if (!p) {
2826 pd = IO_MEM_UNASSIGNED;
2827 } else {
2828 pd = p->phys_offset;
2829 }
3b46e624 2830
d0ecd2aa 2831 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2832 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2833 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2834 /* do nothing */
2835 } else {
2836 unsigned long addr1;
2837 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2838 /* ROM/RAM case */
2839 ptr = phys_ram_base + addr1;
2840 memcpy(ptr, buf, l);
2841 }
2842 len -= l;
2843 buf += l;
2844 addr += l;
2845 }
2846}
2847
2848
8df1cd07
FB
2849/* warning: addr must be aligned */
2850uint32_t ldl_phys(target_phys_addr_t addr)
2851{
2852 int io_index;
2853 uint8_t *ptr;
2854 uint32_t val;
2855 unsigned long pd;
2856 PhysPageDesc *p;
2857
2858 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2859 if (!p) {
2860 pd = IO_MEM_UNASSIGNED;
2861 } else {
2862 pd = p->phys_offset;
2863 }
3b46e624 2864
5fafdf24 2865 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2866 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2867 /* I/O case */
2868 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2869 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2870 } else {
2871 /* RAM case */
5fafdf24 2872 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2873 (addr & ~TARGET_PAGE_MASK);
2874 val = ldl_p(ptr);
2875 }
2876 return val;
2877}
2878
84b7b8e7
FB
2879/* warning: addr must be aligned */
2880uint64_t ldq_phys(target_phys_addr_t addr)
2881{
2882 int io_index;
2883 uint8_t *ptr;
2884 uint64_t val;
2885 unsigned long pd;
2886 PhysPageDesc *p;
2887
2888 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2889 if (!p) {
2890 pd = IO_MEM_UNASSIGNED;
2891 } else {
2892 pd = p->phys_offset;
2893 }
3b46e624 2894
2a4188a3
FB
2895 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2896 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2897 /* I/O case */
2898 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2899#ifdef TARGET_WORDS_BIGENDIAN
2900 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2901 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2902#else
2903 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2904 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2905#endif
2906 } else {
2907 /* RAM case */
5fafdf24 2908 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2909 (addr & ~TARGET_PAGE_MASK);
2910 val = ldq_p(ptr);
2911 }
2912 return val;
2913}
2914
aab33094
FB
2915/* XXX: optimize */
2916uint32_t ldub_phys(target_phys_addr_t addr)
2917{
2918 uint8_t val;
2919 cpu_physical_memory_read(addr, &val, 1);
2920 return val;
2921}
2922
2923/* XXX: optimize */
2924uint32_t lduw_phys(target_phys_addr_t addr)
2925{
2926 uint16_t val;
2927 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2928 return tswap16(val);
2929}
2930
8df1cd07
FB
2931/* warning: addr must be aligned. The ram page is not masked as dirty
2932 and the code inside is not invalidated. It is useful if the dirty
2933 bits are used to track modified PTEs */
2934void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2935{
2936 int io_index;
2937 uint8_t *ptr;
2938 unsigned long pd;
2939 PhysPageDesc *p;
2940
2941 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2942 if (!p) {
2943 pd = IO_MEM_UNASSIGNED;
2944 } else {
2945 pd = p->phys_offset;
2946 }
3b46e624 2947
3a7d929e 2948 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2949 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2950 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2951 } else {
5fafdf24 2952 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2953 (addr & ~TARGET_PAGE_MASK);
2954 stl_p(ptr, val);
2955 }
2956}
2957
bc98a7ef
JM
2958void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2959{
2960 int io_index;
2961 uint8_t *ptr;
2962 unsigned long pd;
2963 PhysPageDesc *p;
2964
2965 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2966 if (!p) {
2967 pd = IO_MEM_UNASSIGNED;
2968 } else {
2969 pd = p->phys_offset;
2970 }
3b46e624 2971
bc98a7ef
JM
2972 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2973 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2974#ifdef TARGET_WORDS_BIGENDIAN
2975 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2976 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2977#else
2978 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2979 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2980#endif
2981 } else {
5fafdf24 2982 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2983 (addr & ~TARGET_PAGE_MASK);
2984 stq_p(ptr, val);
2985 }
2986}
2987
8df1cd07 2988/* warning: addr must be aligned */
8df1cd07
FB
2989void stl_phys(target_phys_addr_t addr, uint32_t val)
2990{
2991 int io_index;
2992 uint8_t *ptr;
2993 unsigned long pd;
2994 PhysPageDesc *p;
2995
2996 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2997 if (!p) {
2998 pd = IO_MEM_UNASSIGNED;
2999 } else {
3000 pd = p->phys_offset;
3001 }
3b46e624 3002
3a7d929e 3003 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
3004 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3005 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3006 } else {
3007 unsigned long addr1;
3008 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3009 /* RAM case */
3010 ptr = phys_ram_base + addr1;
3011 stl_p(ptr, val);
3a7d929e
FB
3012 if (!cpu_physical_memory_is_dirty(addr1)) {
3013 /* invalidate code */
3014 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3015 /* set dirty bit */
f23db169
FB
3016 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3017 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3018 }
8df1cd07
FB
3019 }
3020}
3021
aab33094
FB
3022/* XXX: optimize */
3023void stb_phys(target_phys_addr_t addr, uint32_t val)
3024{
3025 uint8_t v = val;
3026 cpu_physical_memory_write(addr, &v, 1);
3027}
3028
3029/* XXX: optimize */
3030void stw_phys(target_phys_addr_t addr, uint32_t val)
3031{
3032 uint16_t v = tswap16(val);
3033 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3034}
3035
3036/* XXX: optimize */
3037void stq_phys(target_phys_addr_t addr, uint64_t val)
3038{
3039 val = tswap64(val);
3040 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3041}
3042
13eb76e0
FB
3043#endif
3044
3045/* virtual memory access for debug */
5fafdf24 3046int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3047 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3048{
3049 int l;
9b3c35e0
JM
3050 target_phys_addr_t phys_addr;
3051 target_ulong page;
13eb76e0
FB
3052
3053 while (len > 0) {
3054 page = addr & TARGET_PAGE_MASK;
3055 phys_addr = cpu_get_phys_page_debug(env, page);
3056 /* if no physical page mapped, return an error */
3057 if (phys_addr == -1)
3058 return -1;
3059 l = (page + TARGET_PAGE_SIZE) - addr;
3060 if (l > len)
3061 l = len;
5fafdf24 3062 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3063 buf, l, is_write);
13eb76e0
FB
3064 len -= l;
3065 buf += l;
3066 addr += l;
3067 }
3068 return 0;
3069}
3070
e3db7226
FB
3071void dump_exec_info(FILE *f,
3072 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3073{
3074 int i, target_code_size, max_target_code_size;
3075 int direct_jmp_count, direct_jmp2_count, cross_page;
3076 TranslationBlock *tb;
3b46e624 3077
e3db7226
FB
3078 target_code_size = 0;
3079 max_target_code_size = 0;
3080 cross_page = 0;
3081 direct_jmp_count = 0;
3082 direct_jmp2_count = 0;
3083 for(i = 0; i < nb_tbs; i++) {
3084 tb = &tbs[i];
3085 target_code_size += tb->size;
3086 if (tb->size > max_target_code_size)
3087 max_target_code_size = tb->size;
3088 if (tb->page_addr[1] != -1)
3089 cross_page++;
3090 if (tb->tb_next_offset[0] != 0xffff) {
3091 direct_jmp_count++;
3092 if (tb->tb_next_offset[1] != 0xffff) {
3093 direct_jmp2_count++;
3094 }
3095 }
3096 }
3097 /* XXX: avoid using doubles ? */
57fec1fe 3098 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3099 cpu_fprintf(f, "gen code size %ld/%ld\n",
3100 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3101 cpu_fprintf(f, "TB count %d/%d\n",
3102 nb_tbs, code_gen_max_blocks);
5fafdf24 3103 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3104 nb_tbs ? target_code_size / nb_tbs : 0,
3105 max_target_code_size);
5fafdf24 3106 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3107 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3108 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3109 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3110 cross_page,
e3db7226
FB
3111 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3112 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3113 direct_jmp_count,
e3db7226
FB
3114 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3115 direct_jmp2_count,
3116 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3117 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3118 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3119 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3120 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3121 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3122}
3123
5fafdf24 3124#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3125
3126#define MMUSUFFIX _cmmu
3127#define GETPC() NULL
3128#define env cpu_single_env
b769d8fe 3129#define SOFTMMU_CODE_ACCESS
61382a50
FB
3130
3131#define SHIFT 0
3132#include "softmmu_template.h"
3133
3134#define SHIFT 1
3135#include "softmmu_template.h"
3136
3137#define SHIFT 2
3138#include "softmmu_template.h"
3139
3140#define SHIFT 3
3141#include "softmmu_template.h"
3142
3143#undef env
3144
3145#endif