]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Fix location of futex.h.
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
42#endif
54936004 43
fd6ce8f6 44//#define DEBUG_TB_INVALIDATE
66e85a21 45//#define DEBUG_FLUSH
9fa3e853 46//#define DEBUG_TLB
67d3b957 47//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
48
49/* make various TB consistency checks */
5fafdf24
TS
50//#define DEBUG_TB_CHECK
51//#define DEBUG_TLB_CHECK
fd6ce8f6 52
1196be37 53//#define DEBUG_IOPORT
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
99773bd4
PB
56#if !defined(CONFIG_USER_ONLY)
57/* TB consistency checks only implemented for usermode emulation. */
58#undef DEBUG_TB_CHECK
59#endif
60
9fa3e853
FB
61#define SMC_BITMAP_USE_THRESHOLD 10
62
63#define MMAP_AREA_START 0x00000000
64#define MMAP_AREA_END 0xa8000000
fd6ce8f6 65
108c49b8
FB
66#if defined(TARGET_SPARC64)
67#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
68#elif defined(TARGET_SPARC)
69#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
70#elif defined(TARGET_ALPHA)
71#define TARGET_PHYS_ADDR_SPACE_BITS 42
72#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
73#elif defined(TARGET_PPC64)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
75#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76#define TARGET_PHYS_ADDR_SPACE_BITS 42
77#elif defined(TARGET_I386) && !defined(USE_KQEMU)
78#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
79#else
80/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81#define TARGET_PHYS_ADDR_SPACE_BITS 32
82#endif
83
fab94c0e 84TranslationBlock *tbs;
26a5f13b 85int code_gen_max_blocks;
9fa3e853 86TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 87int nb_tbs;
eb51d102
FB
88/* any access to the tbs or the page table must use this lock */
89spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 90
7cb69cae 91uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
26a5f13b
FB
92uint8_t *code_gen_buffer;
93unsigned long code_gen_buffer_size;
94/* threshold to flush the translated code buffer */
95unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
96uint8_t *code_gen_ptr;
97
00f82b8a 98ram_addr_t phys_ram_size;
9fa3e853
FB
99int phys_ram_fd;
100uint8_t *phys_ram_base;
1ccde1cb 101uint8_t *phys_ram_dirty;
e9a1ab19 102static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 103
6a00d601
FB
104CPUState *first_cpu;
105/* current CPU in the current thread. It is only valid inside
106 cpu_exec() */
5fafdf24 107CPUState *cpu_single_env;
6a00d601 108
54936004 109typedef struct PageDesc {
92e873b9 110 /* list of TBs intersecting this ram page */
fd6ce8f6 111 TranslationBlock *first_tb;
9fa3e853
FB
112 /* in order to optimize self modifying code, we count the number
113 of lookups we do to a given page to use a bitmap */
114 unsigned int code_write_count;
115 uint8_t *code_bitmap;
116#if defined(CONFIG_USER_ONLY)
117 unsigned long flags;
118#endif
54936004
FB
119} PageDesc;
120
92e873b9
FB
121typedef struct PhysPageDesc {
122 /* offset in host memory of the page + io_index in the low 12 bits */
00f82b8a 123 ram_addr_t phys_offset;
92e873b9
FB
124} PhysPageDesc;
125
54936004 126#define L2_BITS 10
bedb69ea
JM
127#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
128/* XXX: this is a temporary hack for alpha target.
129 * In the future, this is to be replaced by a multi-level table
130 * to actually be able to handle the complete 64 bits address space.
131 */
132#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
133#else
03875444 134#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 135#endif
54936004
FB
136
137#define L1_SIZE (1 << L1_BITS)
138#define L2_SIZE (1 << L2_BITS)
139
33417e70 140static void io_mem_init(void);
fd6ce8f6 141
83fb7adf
FB
142unsigned long qemu_real_host_page_size;
143unsigned long qemu_host_page_bits;
144unsigned long qemu_host_page_size;
145unsigned long qemu_host_page_mask;
54936004 146
92e873b9 147/* XXX: for system emulation, it could just be an array */
54936004 148static PageDesc *l1_map[L1_SIZE];
0a962c02 149PhysPageDesc **l1_phys_map;
54936004 150
33417e70 151/* io memory support */
33417e70
FB
152CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
153CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 154void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 155static int io_mem_nb;
6658ffb8
PB
156#if defined(CONFIG_SOFTMMU)
157static int io_mem_watch;
158#endif
33417e70 159
34865134
FB
160/* log support */
161char *logfilename = "/tmp/qemu.log";
162FILE *logfile;
163int loglevel;
e735b91c 164static int log_append = 0;
34865134 165
e3db7226
FB
166/* statistics */
167static int tlb_flush_count;
168static int tb_flush_count;
169static int tb_phys_invalidate_count;
170
db7b5426
BS
171#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
172typedef struct subpage_t {
173 target_phys_addr_t base;
3ee89922
BS
174 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
175 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
176 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
177} subpage_t;
178
7cb69cae
FB
179#ifdef _WIN32
180static void map_exec(void *addr, long size)
181{
182 DWORD old_protect;
183 VirtualProtect(addr, size,
184 PAGE_EXECUTE_READWRITE, &old_protect);
185
186}
187#else
188static void map_exec(void *addr, long size)
189{
4369415f 190 unsigned long start, end, page_size;
7cb69cae 191
4369415f 192 page_size = getpagesize();
7cb69cae 193 start = (unsigned long)addr;
4369415f 194 start &= ~(page_size - 1);
7cb69cae
FB
195
196 end = (unsigned long)addr + size;
4369415f
FB
197 end += page_size - 1;
198 end &= ~(page_size - 1);
7cb69cae
FB
199
200 mprotect((void *)start, end - start,
201 PROT_READ | PROT_WRITE | PROT_EXEC);
202}
203#endif
204
b346ff46 205static void page_init(void)
54936004 206{
83fb7adf 207 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 208 TARGET_PAGE_SIZE */
67b915a5 209#ifdef _WIN32
d5a8f07c
FB
210 {
211 SYSTEM_INFO system_info;
212 DWORD old_protect;
3b46e624 213
d5a8f07c
FB
214 GetSystemInfo(&system_info);
215 qemu_real_host_page_size = system_info.dwPageSize;
d5a8f07c 216 }
67b915a5 217#else
83fb7adf 218 qemu_real_host_page_size = getpagesize();
67b915a5 219#endif
83fb7adf
FB
220 if (qemu_host_page_size == 0)
221 qemu_host_page_size = qemu_real_host_page_size;
222 if (qemu_host_page_size < TARGET_PAGE_SIZE)
223 qemu_host_page_size = TARGET_PAGE_SIZE;
224 qemu_host_page_bits = 0;
225 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
226 qemu_host_page_bits++;
227 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
228 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
229 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
230
231#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
232 {
233 long long startaddr, endaddr;
234 FILE *f;
235 int n;
236
0776590d 237 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
238 f = fopen("/proc/self/maps", "r");
239 if (f) {
240 do {
241 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
242 if (n == 2) {
e0b8d65a
BS
243 startaddr = MIN(startaddr,
244 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
245 endaddr = MIN(endaddr,
246 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 247 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
248 TARGET_PAGE_ALIGN(endaddr),
249 PAGE_RESERVED);
250 }
251 } while (!feof(f));
252 fclose(f);
253 }
254 }
255#endif
54936004
FB
256}
257
00f82b8a 258static inline PageDesc *page_find_alloc(target_ulong index)
54936004 259{
54936004
FB
260 PageDesc **lp, *p;
261
54936004
FB
262 lp = &l1_map[index >> L2_BITS];
263 p = *lp;
264 if (!p) {
265 /* allocate if not found */
59817ccb 266 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 267 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
268 *lp = p;
269 }
270 return p + (index & (L2_SIZE - 1));
271}
272
00f82b8a 273static inline PageDesc *page_find(target_ulong index)
54936004 274{
54936004
FB
275 PageDesc *p;
276
54936004
FB
277 p = l1_map[index >> L2_BITS];
278 if (!p)
279 return 0;
fd6ce8f6
FB
280 return p + (index & (L2_SIZE - 1));
281}
282
108c49b8 283static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 284{
108c49b8 285 void **lp, **p;
e3f4e2a4 286 PhysPageDesc *pd;
92e873b9 287
108c49b8
FB
288 p = (void **)l1_phys_map;
289#if TARGET_PHYS_ADDR_SPACE_BITS > 32
290
291#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
292#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
293#endif
294 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
295 p = *lp;
296 if (!p) {
297 /* allocate if not found */
108c49b8
FB
298 if (!alloc)
299 return NULL;
300 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
301 memset(p, 0, sizeof(void *) * L1_SIZE);
302 *lp = p;
303 }
304#endif
305 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
306 pd = *lp;
307 if (!pd) {
308 int i;
108c49b8
FB
309 /* allocate if not found */
310 if (!alloc)
311 return NULL;
e3f4e2a4
PB
312 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
313 *lp = pd;
314 for (i = 0; i < L2_SIZE; i++)
315 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 316 }
e3f4e2a4 317 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
318}
319
108c49b8 320static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 321{
108c49b8 322 return phys_page_find_alloc(index, 0);
92e873b9
FB
323}
324
9fa3e853 325#if !defined(CONFIG_USER_ONLY)
6a00d601 326static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 327static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 328 target_ulong vaddr);
9fa3e853 329#endif
fd6ce8f6 330
4369415f
FB
331#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
332
333#if defined(CONFIG_USER_ONLY)
334/* Currently it is not recommanded to allocate big chunks of data in
335 user mode. It will change when a dedicated libc will be used */
336#define USE_STATIC_CODE_GEN_BUFFER
337#endif
338
339#ifdef USE_STATIC_CODE_GEN_BUFFER
340static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
341#endif
342
26a5f13b
FB
343void code_gen_alloc(unsigned long tb_size)
344{
4369415f
FB
345#ifdef USE_STATIC_CODE_GEN_BUFFER
346 code_gen_buffer = static_code_gen_buffer;
347 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
348 map_exec(code_gen_buffer, code_gen_buffer_size);
349#else
26a5f13b
FB
350 code_gen_buffer_size = tb_size;
351 if (code_gen_buffer_size == 0) {
4369415f
FB
352#if defined(CONFIG_USER_ONLY)
353 /* in user mode, phys_ram_size is not meaningful */
354 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
355#else
26a5f13b
FB
356 /* XXX: needs ajustments */
357 code_gen_buffer_size = (int)(phys_ram_size / 4);
4369415f 358#endif
26a5f13b
FB
359 }
360 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
361 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
362 /* The code gen buffer location may have constraints depending on
363 the host cpu and OS */
364#if defined(__linux__)
365 {
366 int flags;
367 flags = MAP_PRIVATE | MAP_ANONYMOUS;
368#if defined(__x86_64__)
369 flags |= MAP_32BIT;
370 /* Cannot map more than that */
371 if (code_gen_buffer_size > (800 * 1024 * 1024))
372 code_gen_buffer_size = (800 * 1024 * 1024);
373#endif
374 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
375 PROT_WRITE | PROT_READ | PROT_EXEC,
376 flags, -1, 0);
377 if (code_gen_buffer == MAP_FAILED) {
378 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
379 exit(1);
380 }
381 }
382#else
383 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
384 if (!code_gen_buffer) {
385 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
386 exit(1);
387 }
388 map_exec(code_gen_buffer, code_gen_buffer_size);
389#endif
4369415f 390#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
391 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
392 code_gen_buffer_max_size = code_gen_buffer_size -
393 code_gen_max_block_size();
394 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
395 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
396}
397
398/* Must be called before using the QEMU cpus. 'tb_size' is the size
399 (in bytes) allocated to the translation buffer. Zero means default
400 size. */
401void cpu_exec_init_all(unsigned long tb_size)
402{
26a5f13b
FB
403 cpu_gen_init();
404 code_gen_alloc(tb_size);
405 code_gen_ptr = code_gen_buffer;
4369415f 406 page_init();
26a5f13b
FB
407 io_mem_init();
408}
409
6a00d601 410void cpu_exec_init(CPUState *env)
fd6ce8f6 411{
6a00d601
FB
412 CPUState **penv;
413 int cpu_index;
414
6a00d601
FB
415 env->next_cpu = NULL;
416 penv = &first_cpu;
417 cpu_index = 0;
418 while (*penv != NULL) {
419 penv = (CPUState **)&(*penv)->next_cpu;
420 cpu_index++;
421 }
422 env->cpu_index = cpu_index;
6658ffb8 423 env->nb_watchpoints = 0;
6a00d601 424 *penv = env;
fd6ce8f6
FB
425}
426
9fa3e853
FB
427static inline void invalidate_page_bitmap(PageDesc *p)
428{
429 if (p->code_bitmap) {
59817ccb 430 qemu_free(p->code_bitmap);
9fa3e853
FB
431 p->code_bitmap = NULL;
432 }
433 p->code_write_count = 0;
434}
435
fd6ce8f6
FB
436/* set to NULL all the 'first_tb' fields in all PageDescs */
437static void page_flush_tb(void)
438{
439 int i, j;
440 PageDesc *p;
441
442 for(i = 0; i < L1_SIZE; i++) {
443 p = l1_map[i];
444 if (p) {
9fa3e853
FB
445 for(j = 0; j < L2_SIZE; j++) {
446 p->first_tb = NULL;
447 invalidate_page_bitmap(p);
448 p++;
449 }
fd6ce8f6
FB
450 }
451 }
452}
453
454/* flush all the translation blocks */
d4e8164f 455/* XXX: tb_flush is currently not thread safe */
6a00d601 456void tb_flush(CPUState *env1)
fd6ce8f6 457{
6a00d601 458 CPUState *env;
0124311e 459#if defined(DEBUG_FLUSH)
ab3d1727
BS
460 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
461 (unsigned long)(code_gen_ptr - code_gen_buffer),
462 nb_tbs, nb_tbs > 0 ?
463 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 464#endif
26a5f13b 465 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
466 cpu_abort(env1, "Internal error: code buffer overflow\n");
467
fd6ce8f6 468 nb_tbs = 0;
3b46e624 469
6a00d601
FB
470 for(env = first_cpu; env != NULL; env = env->next_cpu) {
471 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
472 }
9fa3e853 473
8a8a608f 474 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 475 page_flush_tb();
9fa3e853 476
fd6ce8f6 477 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
478 /* XXX: flush processor icache at this point if cache flush is
479 expensive */
e3db7226 480 tb_flush_count++;
fd6ce8f6
FB
481}
482
483#ifdef DEBUG_TB_CHECK
484
bc98a7ef 485static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
486{
487 TranslationBlock *tb;
488 int i;
489 address &= TARGET_PAGE_MASK;
99773bd4
PB
490 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
491 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
492 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
493 address >= tb->pc + tb->size)) {
494 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 495 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
496 }
497 }
498 }
499}
500
501/* verify that all the pages have correct rights for code */
502static void tb_page_check(void)
503{
504 TranslationBlock *tb;
505 int i, flags1, flags2;
3b46e624 506
99773bd4
PB
507 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
508 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
509 flags1 = page_get_flags(tb->pc);
510 flags2 = page_get_flags(tb->pc + tb->size - 1);
511 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
512 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 513 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
514 }
515 }
516 }
517}
518
d4e8164f
FB
519void tb_jmp_check(TranslationBlock *tb)
520{
521 TranslationBlock *tb1;
522 unsigned int n1;
523
524 /* suppress any remaining jumps to this TB */
525 tb1 = tb->jmp_first;
526 for(;;) {
527 n1 = (long)tb1 & 3;
528 tb1 = (TranslationBlock *)((long)tb1 & ~3);
529 if (n1 == 2)
530 break;
531 tb1 = tb1->jmp_next[n1];
532 }
533 /* check end of list */
534 if (tb1 != tb) {
535 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
536 }
537}
538
fd6ce8f6
FB
539#endif
540
541/* invalidate one TB */
542static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
543 int next_offset)
544{
545 TranslationBlock *tb1;
546 for(;;) {
547 tb1 = *ptb;
548 if (tb1 == tb) {
549 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
550 break;
551 }
552 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
553 }
554}
555
9fa3e853
FB
556static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
557{
558 TranslationBlock *tb1;
559 unsigned int n1;
560
561 for(;;) {
562 tb1 = *ptb;
563 n1 = (long)tb1 & 3;
564 tb1 = (TranslationBlock *)((long)tb1 & ~3);
565 if (tb1 == tb) {
566 *ptb = tb1->page_next[n1];
567 break;
568 }
569 ptb = &tb1->page_next[n1];
570 }
571}
572
d4e8164f
FB
573static inline void tb_jmp_remove(TranslationBlock *tb, int n)
574{
575 TranslationBlock *tb1, **ptb;
576 unsigned int n1;
577
578 ptb = &tb->jmp_next[n];
579 tb1 = *ptb;
580 if (tb1) {
581 /* find tb(n) in circular list */
582 for(;;) {
583 tb1 = *ptb;
584 n1 = (long)tb1 & 3;
585 tb1 = (TranslationBlock *)((long)tb1 & ~3);
586 if (n1 == n && tb1 == tb)
587 break;
588 if (n1 == 2) {
589 ptb = &tb1->jmp_first;
590 } else {
591 ptb = &tb1->jmp_next[n1];
592 }
593 }
594 /* now we can suppress tb(n) from the list */
595 *ptb = tb->jmp_next[n];
596
597 tb->jmp_next[n] = NULL;
598 }
599}
600
601/* reset the jump entry 'n' of a TB so that it is not chained to
602 another TB */
603static inline void tb_reset_jump(TranslationBlock *tb, int n)
604{
605 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
606}
607
00f82b8a 608static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 609{
6a00d601 610 CPUState *env;
8a40a180 611 PageDesc *p;
d4e8164f 612 unsigned int h, n1;
00f82b8a 613 target_phys_addr_t phys_pc;
8a40a180 614 TranslationBlock *tb1, *tb2;
3b46e624 615
8a40a180
FB
616 /* remove the TB from the hash list */
617 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
618 h = tb_phys_hash_func(phys_pc);
5fafdf24 619 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
620 offsetof(TranslationBlock, phys_hash_next));
621
622 /* remove the TB from the page list */
623 if (tb->page_addr[0] != page_addr) {
624 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
625 tb_page_remove(&p->first_tb, tb);
626 invalidate_page_bitmap(p);
627 }
628 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
629 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
630 tb_page_remove(&p->first_tb, tb);
631 invalidate_page_bitmap(p);
632 }
633
36bdbe54 634 tb_invalidated_flag = 1;
59817ccb 635
fd6ce8f6 636 /* remove the TB from the hash list */
8a40a180 637 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
638 for(env = first_cpu; env != NULL; env = env->next_cpu) {
639 if (env->tb_jmp_cache[h] == tb)
640 env->tb_jmp_cache[h] = NULL;
641 }
d4e8164f
FB
642
643 /* suppress this TB from the two jump lists */
644 tb_jmp_remove(tb, 0);
645 tb_jmp_remove(tb, 1);
646
647 /* suppress any remaining jumps to this TB */
648 tb1 = tb->jmp_first;
649 for(;;) {
650 n1 = (long)tb1 & 3;
651 if (n1 == 2)
652 break;
653 tb1 = (TranslationBlock *)((long)tb1 & ~3);
654 tb2 = tb1->jmp_next[n1];
655 tb_reset_jump(tb1, n1);
656 tb1->jmp_next[n1] = NULL;
657 tb1 = tb2;
658 }
659 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 660
e3db7226 661 tb_phys_invalidate_count++;
9fa3e853
FB
662}
663
664static inline void set_bits(uint8_t *tab, int start, int len)
665{
666 int end, mask, end1;
667
668 end = start + len;
669 tab += start >> 3;
670 mask = 0xff << (start & 7);
671 if ((start & ~7) == (end & ~7)) {
672 if (start < end) {
673 mask &= ~(0xff << (end & 7));
674 *tab |= mask;
675 }
676 } else {
677 *tab++ |= mask;
678 start = (start + 8) & ~7;
679 end1 = end & ~7;
680 while (start < end1) {
681 *tab++ = 0xff;
682 start += 8;
683 }
684 if (start < end) {
685 mask = ~(0xff << (end & 7));
686 *tab |= mask;
687 }
688 }
689}
690
691static void build_page_bitmap(PageDesc *p)
692{
693 int n, tb_start, tb_end;
694 TranslationBlock *tb;
3b46e624 695
59817ccb 696 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
697 if (!p->code_bitmap)
698 return;
699 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
700
701 tb = p->first_tb;
702 while (tb != NULL) {
703 n = (long)tb & 3;
704 tb = (TranslationBlock *)((long)tb & ~3);
705 /* NOTE: this is subtle as a TB may span two physical pages */
706 if (n == 0) {
707 /* NOTE: tb_end may be after the end of the page, but
708 it is not a problem */
709 tb_start = tb->pc & ~TARGET_PAGE_MASK;
710 tb_end = tb_start + tb->size;
711 if (tb_end > TARGET_PAGE_SIZE)
712 tb_end = TARGET_PAGE_SIZE;
713 } else {
714 tb_start = 0;
715 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
716 }
717 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
718 tb = tb->page_next[n];
719 }
720}
721
d720b93d
FB
722#ifdef TARGET_HAS_PRECISE_SMC
723
5fafdf24 724static void tb_gen_code(CPUState *env,
d720b93d
FB
725 target_ulong pc, target_ulong cs_base, int flags,
726 int cflags)
727{
728 TranslationBlock *tb;
729 uint8_t *tc_ptr;
730 target_ulong phys_pc, phys_page2, virt_page2;
731 int code_gen_size;
732
c27004ec
FB
733 phys_pc = get_phys_addr_code(env, pc);
734 tb = tb_alloc(pc);
d720b93d
FB
735 if (!tb) {
736 /* flush must be done */
737 tb_flush(env);
738 /* cannot fail at this point */
c27004ec 739 tb = tb_alloc(pc);
d720b93d
FB
740 }
741 tc_ptr = code_gen_ptr;
742 tb->tc_ptr = tc_ptr;
743 tb->cs_base = cs_base;
744 tb->flags = flags;
745 tb->cflags = cflags;
d07bde88 746 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 747 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 748
d720b93d 749 /* check next page if needed */
c27004ec 750 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 751 phys_page2 = -1;
c27004ec 752 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
753 phys_page2 = get_phys_addr_code(env, virt_page2);
754 }
755 tb_link_phys(tb, phys_pc, phys_page2);
756}
757#endif
3b46e624 758
9fa3e853
FB
759/* invalidate all TBs which intersect with the target physical page
760 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
761 the same physical page. 'is_cpu_write_access' should be true if called
762 from a real cpu write access: the virtual CPU will exit the current
763 TB if code is modified inside this TB. */
00f82b8a 764void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
765 int is_cpu_write_access)
766{
767 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 768 CPUState *env = cpu_single_env;
9fa3e853 769 PageDesc *p;
ea1c1802 770 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 771 target_ulong tb_start, tb_end;
d720b93d 772 target_ulong current_pc, current_cs_base;
9fa3e853
FB
773
774 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 775 if (!p)
9fa3e853 776 return;
5fafdf24 777 if (!p->code_bitmap &&
d720b93d
FB
778 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
779 is_cpu_write_access) {
9fa3e853
FB
780 /* build code bitmap */
781 build_page_bitmap(p);
782 }
783
784 /* we remove all the TBs in the range [start, end[ */
785 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
786 current_tb_not_found = is_cpu_write_access;
787 current_tb_modified = 0;
788 current_tb = NULL; /* avoid warning */
789 current_pc = 0; /* avoid warning */
790 current_cs_base = 0; /* avoid warning */
791 current_flags = 0; /* avoid warning */
9fa3e853
FB
792 tb = p->first_tb;
793 while (tb != NULL) {
794 n = (long)tb & 3;
795 tb = (TranslationBlock *)((long)tb & ~3);
796 tb_next = tb->page_next[n];
797 /* NOTE: this is subtle as a TB may span two physical pages */
798 if (n == 0) {
799 /* NOTE: tb_end may be after the end of the page, but
800 it is not a problem */
801 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
802 tb_end = tb_start + tb->size;
803 } else {
804 tb_start = tb->page_addr[1];
805 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
806 }
807 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
808#ifdef TARGET_HAS_PRECISE_SMC
809 if (current_tb_not_found) {
810 current_tb_not_found = 0;
811 current_tb = NULL;
812 if (env->mem_write_pc) {
813 /* now we have a real cpu fault */
814 current_tb = tb_find_pc(env->mem_write_pc);
815 }
816 }
817 if (current_tb == tb &&
818 !(current_tb->cflags & CF_SINGLE_INSN)) {
819 /* If we are modifying the current TB, we must stop
820 its execution. We could be more precise by checking
821 that the modification is after the current PC, but it
822 would require a specialized function to partially
823 restore the CPU state */
3b46e624 824
d720b93d 825 current_tb_modified = 1;
5fafdf24 826 cpu_restore_state(current_tb, env,
d720b93d
FB
827 env->mem_write_pc, NULL);
828#if defined(TARGET_I386)
829 current_flags = env->hflags;
830 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
831 current_cs_base = (target_ulong)env->segs[R_CS].base;
832 current_pc = current_cs_base + env->eip;
833#else
834#error unsupported CPU
835#endif
836 }
837#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
838 /* we need to do that to handle the case where a signal
839 occurs while doing tb_phys_invalidate() */
840 saved_tb = NULL;
841 if (env) {
842 saved_tb = env->current_tb;
843 env->current_tb = NULL;
844 }
9fa3e853 845 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
846 if (env) {
847 env->current_tb = saved_tb;
848 if (env->interrupt_request && env->current_tb)
849 cpu_interrupt(env, env->interrupt_request);
850 }
9fa3e853
FB
851 }
852 tb = tb_next;
853 }
854#if !defined(CONFIG_USER_ONLY)
855 /* if no code remaining, no need to continue to use slow writes */
856 if (!p->first_tb) {
857 invalidate_page_bitmap(p);
d720b93d
FB
858 if (is_cpu_write_access) {
859 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
860 }
861 }
862#endif
863#ifdef TARGET_HAS_PRECISE_SMC
864 if (current_tb_modified) {
865 /* we generate a block containing just the instruction
866 modifying the memory. It will ensure that it cannot modify
867 itself */
ea1c1802 868 env->current_tb = NULL;
5fafdf24 869 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
870 CF_SINGLE_INSN);
871 cpu_resume_from_signal(env, NULL);
9fa3e853 872 }
fd6ce8f6 873#endif
9fa3e853 874}
fd6ce8f6 875
9fa3e853 876/* len must be <= 8 and start must be a multiple of len */
00f82b8a 877static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
878{
879 PageDesc *p;
880 int offset, b;
59817ccb 881#if 0
a4193c8a
FB
882 if (1) {
883 if (loglevel) {
5fafdf24
TS
884 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
885 cpu_single_env->mem_write_vaddr, len,
886 cpu_single_env->eip,
a4193c8a
FB
887 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
888 }
59817ccb
FB
889 }
890#endif
9fa3e853 891 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 892 if (!p)
9fa3e853
FB
893 return;
894 if (p->code_bitmap) {
895 offset = start & ~TARGET_PAGE_MASK;
896 b = p->code_bitmap[offset >> 3] >> (offset & 7);
897 if (b & ((1 << len) - 1))
898 goto do_invalidate;
899 } else {
900 do_invalidate:
d720b93d 901 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
902 }
903}
904
9fa3e853 905#if !defined(CONFIG_SOFTMMU)
00f82b8a 906static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 907 unsigned long pc, void *puc)
9fa3e853 908{
d720b93d
FB
909 int n, current_flags, current_tb_modified;
910 target_ulong current_pc, current_cs_base;
9fa3e853 911 PageDesc *p;
d720b93d
FB
912 TranslationBlock *tb, *current_tb;
913#ifdef TARGET_HAS_PRECISE_SMC
914 CPUState *env = cpu_single_env;
915#endif
9fa3e853
FB
916
917 addr &= TARGET_PAGE_MASK;
918 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 919 if (!p)
9fa3e853
FB
920 return;
921 tb = p->first_tb;
d720b93d
FB
922 current_tb_modified = 0;
923 current_tb = NULL;
924 current_pc = 0; /* avoid warning */
925 current_cs_base = 0; /* avoid warning */
926 current_flags = 0; /* avoid warning */
927#ifdef TARGET_HAS_PRECISE_SMC
928 if (tb && pc != 0) {
929 current_tb = tb_find_pc(pc);
930 }
931#endif
9fa3e853
FB
932 while (tb != NULL) {
933 n = (long)tb & 3;
934 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
935#ifdef TARGET_HAS_PRECISE_SMC
936 if (current_tb == tb &&
937 !(current_tb->cflags & CF_SINGLE_INSN)) {
938 /* If we are modifying the current TB, we must stop
939 its execution. We could be more precise by checking
940 that the modification is after the current PC, but it
941 would require a specialized function to partially
942 restore the CPU state */
3b46e624 943
d720b93d
FB
944 current_tb_modified = 1;
945 cpu_restore_state(current_tb, env, pc, puc);
946#if defined(TARGET_I386)
947 current_flags = env->hflags;
948 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
949 current_cs_base = (target_ulong)env->segs[R_CS].base;
950 current_pc = current_cs_base + env->eip;
951#else
952#error unsupported CPU
953#endif
954 }
955#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
956 tb_phys_invalidate(tb, addr);
957 tb = tb->page_next[n];
958 }
fd6ce8f6 959 p->first_tb = NULL;
d720b93d
FB
960#ifdef TARGET_HAS_PRECISE_SMC
961 if (current_tb_modified) {
962 /* we generate a block containing just the instruction
963 modifying the memory. It will ensure that it cannot modify
964 itself */
ea1c1802 965 env->current_tb = NULL;
5fafdf24 966 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
967 CF_SINGLE_INSN);
968 cpu_resume_from_signal(env, puc);
969 }
970#endif
fd6ce8f6 971}
9fa3e853 972#endif
fd6ce8f6
FB
973
974/* add the tb in the target page and protect it if necessary */
5fafdf24 975static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 976 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
977{
978 PageDesc *p;
9fa3e853
FB
979 TranslationBlock *last_first_tb;
980
981 tb->page_addr[n] = page_addr;
3a7d929e 982 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
983 tb->page_next[n] = p->first_tb;
984 last_first_tb = p->first_tb;
985 p->first_tb = (TranslationBlock *)((long)tb | n);
986 invalidate_page_bitmap(p);
fd6ce8f6 987
107db443 988#if defined(TARGET_HAS_SMC) || 1
d720b93d 989
9fa3e853 990#if defined(CONFIG_USER_ONLY)
fd6ce8f6 991 if (p->flags & PAGE_WRITE) {
53a5960a
PB
992 target_ulong addr;
993 PageDesc *p2;
9fa3e853
FB
994 int prot;
995
fd6ce8f6
FB
996 /* force the host page as non writable (writes will have a
997 page fault + mprotect overhead) */
53a5960a 998 page_addr &= qemu_host_page_mask;
fd6ce8f6 999 prot = 0;
53a5960a
PB
1000 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1001 addr += TARGET_PAGE_SIZE) {
1002
1003 p2 = page_find (addr >> TARGET_PAGE_BITS);
1004 if (!p2)
1005 continue;
1006 prot |= p2->flags;
1007 p2->flags &= ~PAGE_WRITE;
1008 page_get_flags(addr);
1009 }
5fafdf24 1010 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1011 (prot & PAGE_BITS) & ~PAGE_WRITE);
1012#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1013 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1014 page_addr);
fd6ce8f6 1015#endif
fd6ce8f6 1016 }
9fa3e853
FB
1017#else
1018 /* if some code is already present, then the pages are already
1019 protected. So we handle the case where only the first TB is
1020 allocated in a physical page */
1021 if (!last_first_tb) {
6a00d601 1022 tlb_protect_code(page_addr);
9fa3e853
FB
1023 }
1024#endif
d720b93d
FB
1025
1026#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1027}
1028
1029/* Allocate a new translation block. Flush the translation buffer if
1030 too many translation blocks or too much generated code. */
c27004ec 1031TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1032{
1033 TranslationBlock *tb;
fd6ce8f6 1034
26a5f13b
FB
1035 if (nb_tbs >= code_gen_max_blocks ||
1036 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1037 return NULL;
fd6ce8f6
FB
1038 tb = &tbs[nb_tbs++];
1039 tb->pc = pc;
b448f2f3 1040 tb->cflags = 0;
d4e8164f
FB
1041 return tb;
1042}
1043
9fa3e853
FB
1044/* add a new TB and link it to the physical page tables. phys_page2 is
1045 (-1) to indicate that only one page contains the TB. */
5fafdf24 1046void tb_link_phys(TranslationBlock *tb,
9fa3e853 1047 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1048{
9fa3e853
FB
1049 unsigned int h;
1050 TranslationBlock **ptb;
1051
1052 /* add in the physical hash table */
1053 h = tb_phys_hash_func(phys_pc);
1054 ptb = &tb_phys_hash[h];
1055 tb->phys_hash_next = *ptb;
1056 *ptb = tb;
fd6ce8f6
FB
1057
1058 /* add in the page list */
9fa3e853
FB
1059 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1060 if (phys_page2 != -1)
1061 tb_alloc_page(tb, 1, phys_page2);
1062 else
1063 tb->page_addr[1] = -1;
9fa3e853 1064
d4e8164f
FB
1065 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1066 tb->jmp_next[0] = NULL;
1067 tb->jmp_next[1] = NULL;
1068
1069 /* init original jump addresses */
1070 if (tb->tb_next_offset[0] != 0xffff)
1071 tb_reset_jump(tb, 0);
1072 if (tb->tb_next_offset[1] != 0xffff)
1073 tb_reset_jump(tb, 1);
8a40a180
FB
1074
1075#ifdef DEBUG_TB_CHECK
1076 tb_page_check();
1077#endif
fd6ce8f6
FB
1078}
1079
9fa3e853
FB
1080/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1081 tb[1].tc_ptr. Return NULL if not found */
1082TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1083{
9fa3e853
FB
1084 int m_min, m_max, m;
1085 unsigned long v;
1086 TranslationBlock *tb;
a513fe19
FB
1087
1088 if (nb_tbs <= 0)
1089 return NULL;
1090 if (tc_ptr < (unsigned long)code_gen_buffer ||
1091 tc_ptr >= (unsigned long)code_gen_ptr)
1092 return NULL;
1093 /* binary search (cf Knuth) */
1094 m_min = 0;
1095 m_max = nb_tbs - 1;
1096 while (m_min <= m_max) {
1097 m = (m_min + m_max) >> 1;
1098 tb = &tbs[m];
1099 v = (unsigned long)tb->tc_ptr;
1100 if (v == tc_ptr)
1101 return tb;
1102 else if (tc_ptr < v) {
1103 m_max = m - 1;
1104 } else {
1105 m_min = m + 1;
1106 }
5fafdf24 1107 }
a513fe19
FB
1108 return &tbs[m_max];
1109}
7501267e 1110
ea041c0e
FB
1111static void tb_reset_jump_recursive(TranslationBlock *tb);
1112
1113static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1114{
1115 TranslationBlock *tb1, *tb_next, **ptb;
1116 unsigned int n1;
1117
1118 tb1 = tb->jmp_next[n];
1119 if (tb1 != NULL) {
1120 /* find head of list */
1121 for(;;) {
1122 n1 = (long)tb1 & 3;
1123 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1124 if (n1 == 2)
1125 break;
1126 tb1 = tb1->jmp_next[n1];
1127 }
1128 /* we are now sure now that tb jumps to tb1 */
1129 tb_next = tb1;
1130
1131 /* remove tb from the jmp_first list */
1132 ptb = &tb_next->jmp_first;
1133 for(;;) {
1134 tb1 = *ptb;
1135 n1 = (long)tb1 & 3;
1136 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1137 if (n1 == n && tb1 == tb)
1138 break;
1139 ptb = &tb1->jmp_next[n1];
1140 }
1141 *ptb = tb->jmp_next[n];
1142 tb->jmp_next[n] = NULL;
3b46e624 1143
ea041c0e
FB
1144 /* suppress the jump to next tb in generated code */
1145 tb_reset_jump(tb, n);
1146
0124311e 1147 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1148 tb_reset_jump_recursive(tb_next);
1149 }
1150}
1151
1152static void tb_reset_jump_recursive(TranslationBlock *tb)
1153{
1154 tb_reset_jump_recursive2(tb, 0);
1155 tb_reset_jump_recursive2(tb, 1);
1156}
1157
1fddef4b 1158#if defined(TARGET_HAS_ICE)
d720b93d
FB
1159static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1160{
9b3c35e0
JM
1161 target_phys_addr_t addr;
1162 target_ulong pd;
c2f07f81
PB
1163 ram_addr_t ram_addr;
1164 PhysPageDesc *p;
d720b93d 1165
c2f07f81
PB
1166 addr = cpu_get_phys_page_debug(env, pc);
1167 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1168 if (!p) {
1169 pd = IO_MEM_UNASSIGNED;
1170 } else {
1171 pd = p->phys_offset;
1172 }
1173 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1174 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1175}
c27004ec 1176#endif
d720b93d 1177
6658ffb8
PB
1178/* Add a watchpoint. */
1179int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1180{
1181 int i;
1182
1183 for (i = 0; i < env->nb_watchpoints; i++) {
1184 if (addr == env->watchpoint[i].vaddr)
1185 return 0;
1186 }
1187 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1188 return -1;
1189
1190 i = env->nb_watchpoints++;
1191 env->watchpoint[i].vaddr = addr;
1192 tlb_flush_page(env, addr);
1193 /* FIXME: This flush is needed because of the hack to make memory ops
1194 terminate the TB. It can be removed once the proper IO trap and
1195 re-execute bits are in. */
1196 tb_flush(env);
1197 return i;
1198}
1199
1200/* Remove a watchpoint. */
1201int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1202{
1203 int i;
1204
1205 for (i = 0; i < env->nb_watchpoints; i++) {
1206 if (addr == env->watchpoint[i].vaddr) {
1207 env->nb_watchpoints--;
1208 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1209 tlb_flush_page(env, addr);
1210 return 0;
1211 }
1212 }
1213 return -1;
1214}
1215
7d03f82f
EI
1216/* Remove all watchpoints. */
1217void cpu_watchpoint_remove_all(CPUState *env) {
1218 int i;
1219
1220 for (i = 0; i < env->nb_watchpoints; i++) {
1221 tlb_flush_page(env, env->watchpoint[i].vaddr);
1222 }
1223 env->nb_watchpoints = 0;
1224}
1225
c33a346e
FB
1226/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1227 breakpoint is reached */
2e12669a 1228int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1229{
1fddef4b 1230#if defined(TARGET_HAS_ICE)
4c3a88a2 1231 int i;
3b46e624 1232
4c3a88a2
FB
1233 for(i = 0; i < env->nb_breakpoints; i++) {
1234 if (env->breakpoints[i] == pc)
1235 return 0;
1236 }
1237
1238 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1239 return -1;
1240 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1241
d720b93d 1242 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1243 return 0;
1244#else
1245 return -1;
1246#endif
1247}
1248
7d03f82f
EI
1249/* remove all breakpoints */
1250void cpu_breakpoint_remove_all(CPUState *env) {
1251#if defined(TARGET_HAS_ICE)
1252 int i;
1253 for(i = 0; i < env->nb_breakpoints; i++) {
1254 breakpoint_invalidate(env, env->breakpoints[i]);
1255 }
1256 env->nb_breakpoints = 0;
1257#endif
1258}
1259
4c3a88a2 1260/* remove a breakpoint */
2e12669a 1261int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1262{
1fddef4b 1263#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1264 int i;
1265 for(i = 0; i < env->nb_breakpoints; i++) {
1266 if (env->breakpoints[i] == pc)
1267 goto found;
1268 }
1269 return -1;
1270 found:
4c3a88a2 1271 env->nb_breakpoints--;
1fddef4b
FB
1272 if (i < env->nb_breakpoints)
1273 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1274
1275 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1276 return 0;
1277#else
1278 return -1;
1279#endif
1280}
1281
c33a346e
FB
1282/* enable or disable single step mode. EXCP_DEBUG is returned by the
1283 CPU loop after each instruction */
1284void cpu_single_step(CPUState *env, int enabled)
1285{
1fddef4b 1286#if defined(TARGET_HAS_ICE)
c33a346e
FB
1287 if (env->singlestep_enabled != enabled) {
1288 env->singlestep_enabled = enabled;
1289 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1290 /* XXX: only flush what is necessary */
0124311e 1291 tb_flush(env);
c33a346e
FB
1292 }
1293#endif
1294}
1295
34865134
FB
1296/* enable or disable low levels log */
1297void cpu_set_log(int log_flags)
1298{
1299 loglevel = log_flags;
1300 if (loglevel && !logfile) {
11fcfab4 1301 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1302 if (!logfile) {
1303 perror(logfilename);
1304 _exit(1);
1305 }
9fa3e853
FB
1306#if !defined(CONFIG_SOFTMMU)
1307 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1308 {
1309 static uint8_t logfile_buf[4096];
1310 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1311 }
1312#else
34865134 1313 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1314#endif
e735b91c
PB
1315 log_append = 1;
1316 }
1317 if (!loglevel && logfile) {
1318 fclose(logfile);
1319 logfile = NULL;
34865134
FB
1320 }
1321}
1322
1323void cpu_set_log_filename(const char *filename)
1324{
1325 logfilename = strdup(filename);
e735b91c
PB
1326 if (logfile) {
1327 fclose(logfile);
1328 logfile = NULL;
1329 }
1330 cpu_set_log(loglevel);
34865134 1331}
c33a346e 1332
0124311e 1333/* mask must never be zero, except for A20 change call */
68a79315 1334void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1335{
1336 TranslationBlock *tb;
15a51156 1337 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1338
68a79315 1339 env->interrupt_request |= mask;
ea041c0e
FB
1340 /* if the cpu is currently executing code, we must unlink it and
1341 all the potentially executing TB */
1342 tb = env->current_tb;
ee8b7021
FB
1343 if (tb && !testandset(&interrupt_lock)) {
1344 env->current_tb = NULL;
ea041c0e 1345 tb_reset_jump_recursive(tb);
15a51156 1346 resetlock(&interrupt_lock);
ea041c0e
FB
1347 }
1348}
1349
b54ad049
FB
1350void cpu_reset_interrupt(CPUState *env, int mask)
1351{
1352 env->interrupt_request &= ~mask;
1353}
1354
f193c797 1355CPULogItem cpu_log_items[] = {
5fafdf24 1356 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1357 "show generated host assembly code for each compiled TB" },
1358 { CPU_LOG_TB_IN_ASM, "in_asm",
1359 "show target assembly code for each compiled TB" },
5fafdf24 1360 { CPU_LOG_TB_OP, "op",
57fec1fe 1361 "show micro ops for each compiled TB" },
f193c797 1362 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1363 "show micro ops "
1364#ifdef TARGET_I386
1365 "before eflags optimization and "
f193c797 1366#endif
e01a1157 1367 "after liveness analysis" },
f193c797
FB
1368 { CPU_LOG_INT, "int",
1369 "show interrupts/exceptions in short format" },
1370 { CPU_LOG_EXEC, "exec",
1371 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1372 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1373 "show CPU state before block translation" },
f193c797
FB
1374#ifdef TARGET_I386
1375 { CPU_LOG_PCALL, "pcall",
1376 "show protected mode far calls/returns/exceptions" },
1377#endif
8e3a9fd2 1378#ifdef DEBUG_IOPORT
fd872598
FB
1379 { CPU_LOG_IOPORT, "ioport",
1380 "show all i/o ports accesses" },
8e3a9fd2 1381#endif
f193c797
FB
1382 { 0, NULL, NULL },
1383};
1384
1385static int cmp1(const char *s1, int n, const char *s2)
1386{
1387 if (strlen(s2) != n)
1388 return 0;
1389 return memcmp(s1, s2, n) == 0;
1390}
3b46e624 1391
f193c797
FB
1392/* takes a comma separated list of log masks. Return 0 if error. */
1393int cpu_str_to_log_mask(const char *str)
1394{
1395 CPULogItem *item;
1396 int mask;
1397 const char *p, *p1;
1398
1399 p = str;
1400 mask = 0;
1401 for(;;) {
1402 p1 = strchr(p, ',');
1403 if (!p1)
1404 p1 = p + strlen(p);
8e3a9fd2
FB
1405 if(cmp1(p,p1-p,"all")) {
1406 for(item = cpu_log_items; item->mask != 0; item++) {
1407 mask |= item->mask;
1408 }
1409 } else {
f193c797
FB
1410 for(item = cpu_log_items; item->mask != 0; item++) {
1411 if (cmp1(p, p1 - p, item->name))
1412 goto found;
1413 }
1414 return 0;
8e3a9fd2 1415 }
f193c797
FB
1416 found:
1417 mask |= item->mask;
1418 if (*p1 != ',')
1419 break;
1420 p = p1 + 1;
1421 }
1422 return mask;
1423}
ea041c0e 1424
7501267e
FB
1425void cpu_abort(CPUState *env, const char *fmt, ...)
1426{
1427 va_list ap;
493ae1f0 1428 va_list ap2;
7501267e
FB
1429
1430 va_start(ap, fmt);
493ae1f0 1431 va_copy(ap2, ap);
7501267e
FB
1432 fprintf(stderr, "qemu: fatal: ");
1433 vfprintf(stderr, fmt, ap);
1434 fprintf(stderr, "\n");
1435#ifdef TARGET_I386
7fe48483
FB
1436 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1437#else
1438 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1439#endif
924edcae 1440 if (logfile) {
f9373291 1441 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1442 vfprintf(logfile, fmt, ap2);
f9373291
JM
1443 fprintf(logfile, "\n");
1444#ifdef TARGET_I386
1445 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1446#else
1447 cpu_dump_state(env, logfile, fprintf, 0);
1448#endif
924edcae
AZ
1449 fflush(logfile);
1450 fclose(logfile);
1451 }
493ae1f0 1452 va_end(ap2);
f9373291 1453 va_end(ap);
7501267e
FB
1454 abort();
1455}
1456
c5be9f08
TS
1457CPUState *cpu_copy(CPUState *env)
1458{
01ba9816 1459 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1460 /* preserve chaining and index */
1461 CPUState *next_cpu = new_env->next_cpu;
1462 int cpu_index = new_env->cpu_index;
1463 memcpy(new_env, env, sizeof(CPUState));
1464 new_env->next_cpu = next_cpu;
1465 new_env->cpu_index = cpu_index;
1466 return new_env;
1467}
1468
0124311e
FB
1469#if !defined(CONFIG_USER_ONLY)
1470
5c751e99
EI
1471static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1472{
1473 unsigned int i;
1474
1475 /* Discard jump cache entries for any tb which might potentially
1476 overlap the flushed page. */
1477 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1478 memset (&env->tb_jmp_cache[i], 0,
1479 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1480
1481 i = tb_jmp_cache_hash_page(addr);
1482 memset (&env->tb_jmp_cache[i], 0,
1483 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1484}
1485
ee8b7021
FB
1486/* NOTE: if flush_global is true, also flush global entries (not
1487 implemented yet) */
1488void tlb_flush(CPUState *env, int flush_global)
33417e70 1489{
33417e70 1490 int i;
0124311e 1491
9fa3e853
FB
1492#if defined(DEBUG_TLB)
1493 printf("tlb_flush:\n");
1494#endif
0124311e
FB
1495 /* must reset current TB so that interrupts cannot modify the
1496 links while we are modifying them */
1497 env->current_tb = NULL;
1498
33417e70 1499 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1500 env->tlb_table[0][i].addr_read = -1;
1501 env->tlb_table[0][i].addr_write = -1;
1502 env->tlb_table[0][i].addr_code = -1;
1503 env->tlb_table[1][i].addr_read = -1;
1504 env->tlb_table[1][i].addr_write = -1;
1505 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1506#if (NB_MMU_MODES >= 3)
1507 env->tlb_table[2][i].addr_read = -1;
1508 env->tlb_table[2][i].addr_write = -1;
1509 env->tlb_table[2][i].addr_code = -1;
1510#if (NB_MMU_MODES == 4)
1511 env->tlb_table[3][i].addr_read = -1;
1512 env->tlb_table[3][i].addr_write = -1;
1513 env->tlb_table[3][i].addr_code = -1;
1514#endif
1515#endif
33417e70 1516 }
9fa3e853 1517
8a40a180 1518 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1519
1520#if !defined(CONFIG_SOFTMMU)
1521 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1522#endif
1523#ifdef USE_KQEMU
1524 if (env->kqemu_enabled) {
1525 kqemu_flush(env, flush_global);
1526 }
9fa3e853 1527#endif
e3db7226 1528 tlb_flush_count++;
33417e70
FB
1529}
1530
274da6b2 1531static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1532{
5fafdf24 1533 if (addr == (tlb_entry->addr_read &
84b7b8e7 1534 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1535 addr == (tlb_entry->addr_write &
84b7b8e7 1536 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1537 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1538 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1539 tlb_entry->addr_read = -1;
1540 tlb_entry->addr_write = -1;
1541 tlb_entry->addr_code = -1;
1542 }
61382a50
FB
1543}
1544
2e12669a 1545void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1546{
8a40a180 1547 int i;
0124311e 1548
9fa3e853 1549#if defined(DEBUG_TLB)
108c49b8 1550 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1551#endif
0124311e
FB
1552 /* must reset current TB so that interrupts cannot modify the
1553 links while we are modifying them */
1554 env->current_tb = NULL;
61382a50
FB
1555
1556 addr &= TARGET_PAGE_MASK;
1557 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1558 tlb_flush_entry(&env->tlb_table[0][i], addr);
1559 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1560#if (NB_MMU_MODES >= 3)
1561 tlb_flush_entry(&env->tlb_table[2][i], addr);
1562#if (NB_MMU_MODES == 4)
1563 tlb_flush_entry(&env->tlb_table[3][i], addr);
1564#endif
1565#endif
0124311e 1566
5c751e99 1567 tlb_flush_jmp_cache(env, addr);
9fa3e853 1568
0124311e 1569#if !defined(CONFIG_SOFTMMU)
9fa3e853 1570 if (addr < MMAP_AREA_END)
0124311e 1571 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1572#endif
0a962c02
FB
1573#ifdef USE_KQEMU
1574 if (env->kqemu_enabled) {
1575 kqemu_flush_page(env, addr);
1576 }
1577#endif
9fa3e853
FB
1578}
1579
9fa3e853
FB
1580/* update the TLBs so that writes to code in the virtual page 'addr'
1581 can be detected */
6a00d601 1582static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1583{
5fafdf24 1584 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1585 ram_addr + TARGET_PAGE_SIZE,
1586 CODE_DIRTY_FLAG);
9fa3e853
FB
1587}
1588
9fa3e853 1589/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1590 tested for self modifying code */
5fafdf24 1591static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1592 target_ulong vaddr)
9fa3e853 1593{
3a7d929e 1594 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1595}
1596
5fafdf24 1597static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1598 unsigned long start, unsigned long length)
1599{
1600 unsigned long addr;
84b7b8e7
FB
1601 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1602 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1603 if ((addr - start) < length) {
84b7b8e7 1604 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1605 }
1606 }
1607}
1608
3a7d929e 1609void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1610 int dirty_flags)
1ccde1cb
FB
1611{
1612 CPUState *env;
4f2ac237 1613 unsigned long length, start1;
0a962c02
FB
1614 int i, mask, len;
1615 uint8_t *p;
1ccde1cb
FB
1616
1617 start &= TARGET_PAGE_MASK;
1618 end = TARGET_PAGE_ALIGN(end);
1619
1620 length = end - start;
1621 if (length == 0)
1622 return;
0a962c02 1623 len = length >> TARGET_PAGE_BITS;
3a7d929e 1624#ifdef USE_KQEMU
6a00d601
FB
1625 /* XXX: should not depend on cpu context */
1626 env = first_cpu;
3a7d929e 1627 if (env->kqemu_enabled) {
f23db169
FB
1628 ram_addr_t addr;
1629 addr = start;
1630 for(i = 0; i < len; i++) {
1631 kqemu_set_notdirty(env, addr);
1632 addr += TARGET_PAGE_SIZE;
1633 }
3a7d929e
FB
1634 }
1635#endif
f23db169
FB
1636 mask = ~dirty_flags;
1637 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1638 for(i = 0; i < len; i++)
1639 p[i] &= mask;
1640
1ccde1cb
FB
1641 /* we modify the TLB cache so that the dirty bit will be set again
1642 when accessing the range */
59817ccb 1643 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1644 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1645 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1646 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1647 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1648 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1649#if (NB_MMU_MODES >= 3)
1650 for(i = 0; i < CPU_TLB_SIZE; i++)
1651 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1652#if (NB_MMU_MODES == 4)
1653 for(i = 0; i < CPU_TLB_SIZE; i++)
1654 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1655#endif
1656#endif
6a00d601 1657 }
59817ccb
FB
1658
1659#if !defined(CONFIG_SOFTMMU)
1660 /* XXX: this is expensive */
1661 {
1662 VirtPageDesc *p;
1663 int j;
1664 target_ulong addr;
1665
1666 for(i = 0; i < L1_SIZE; i++) {
1667 p = l1_virt_map[i];
1668 if (p) {
1669 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1670 for(j = 0; j < L2_SIZE; j++) {
1671 if (p->valid_tag == virt_valid_tag &&
1672 p->phys_addr >= start && p->phys_addr < end &&
1673 (p->prot & PROT_WRITE)) {
1674 if (addr < MMAP_AREA_END) {
5fafdf24 1675 mprotect((void *)addr, TARGET_PAGE_SIZE,
59817ccb
FB
1676 p->prot & ~PROT_WRITE);
1677 }
1678 }
1679 addr += TARGET_PAGE_SIZE;
1680 p++;
1681 }
1682 }
1683 }
1684 }
1685#endif
1ccde1cb
FB
1686}
1687
3a7d929e
FB
1688static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1689{
1690 ram_addr_t ram_addr;
1691
84b7b8e7 1692 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1693 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1694 tlb_entry->addend - (unsigned long)phys_ram_base;
1695 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1696 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1697 }
1698 }
1699}
1700
1701/* update the TLB according to the current state of the dirty bits */
1702void cpu_tlb_update_dirty(CPUState *env)
1703{
1704 int i;
1705 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1706 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1707 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1708 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1709#if (NB_MMU_MODES >= 3)
1710 for(i = 0; i < CPU_TLB_SIZE; i++)
1711 tlb_update_dirty(&env->tlb_table[2][i]);
1712#if (NB_MMU_MODES == 4)
1713 for(i = 0; i < CPU_TLB_SIZE; i++)
1714 tlb_update_dirty(&env->tlb_table[3][i]);
1715#endif
1716#endif
3a7d929e
FB
1717}
1718
5fafdf24 1719static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1720 unsigned long start)
1ccde1cb
FB
1721{
1722 unsigned long addr;
84b7b8e7
FB
1723 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1724 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1725 if (addr == start) {
84b7b8e7 1726 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1727 }
1728 }
1729}
1730
1731/* update the TLB corresponding to virtual page vaddr and phys addr
1732 addr so that it is no longer dirty */
6a00d601
FB
1733static inline void tlb_set_dirty(CPUState *env,
1734 unsigned long addr, target_ulong vaddr)
1ccde1cb 1735{
1ccde1cb
FB
1736 int i;
1737
1ccde1cb
FB
1738 addr &= TARGET_PAGE_MASK;
1739 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1740 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1741 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1742#if (NB_MMU_MODES >= 3)
1743 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1744#if (NB_MMU_MODES == 4)
1745 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1746#endif
1747#endif
9fa3e853
FB
1748}
1749
59817ccb
FB
1750/* add a new TLB entry. At most one entry for a given virtual address
1751 is permitted. Return 0 if OK or 2 if the page could not be mapped
1752 (can only happen in non SOFTMMU mode for I/O pages or pages
1753 conflicting with the host address space). */
5fafdf24
TS
1754int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1755 target_phys_addr_t paddr, int prot,
6ebbf390 1756 int mmu_idx, int is_softmmu)
9fa3e853 1757{
92e873b9 1758 PhysPageDesc *p;
4f2ac237 1759 unsigned long pd;
9fa3e853 1760 unsigned int index;
4f2ac237 1761 target_ulong address;
108c49b8 1762 target_phys_addr_t addend;
9fa3e853 1763 int ret;
84b7b8e7 1764 CPUTLBEntry *te;
6658ffb8 1765 int i;
9fa3e853 1766
92e873b9 1767 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1768 if (!p) {
1769 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1770 } else {
1771 pd = p->phys_offset;
9fa3e853
FB
1772 }
1773#if defined(DEBUG_TLB)
6ebbf390
JM
1774 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1775 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1776#endif
1777
1778 ret = 0;
1779#if !defined(CONFIG_SOFTMMU)
5fafdf24 1780 if (is_softmmu)
9fa3e853
FB
1781#endif
1782 {
2a4188a3 1783 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1784 /* IO memory case */
1785 address = vaddr | pd;
1786 addend = paddr;
1787 } else {
1788 /* standard memory */
1789 address = vaddr;
1790 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1791 }
6658ffb8
PB
1792
1793 /* Make accesses to pages with watchpoints go via the
1794 watchpoint trap routines. */
1795 for (i = 0; i < env->nb_watchpoints; i++) {
1796 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1797 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1798 env->watchpoint[i].addend = 0;
6658ffb8
PB
1799 address = vaddr | io_mem_watch;
1800 } else {
d79acba4
AZ
1801 env->watchpoint[i].addend = pd - paddr +
1802 (unsigned long) phys_ram_base;
6658ffb8
PB
1803 /* TODO: Figure out how to make read watchpoints coexist
1804 with code. */
1805 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1806 }
1807 }
1808 }
d79acba4 1809
90f18422 1810 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1811 addend -= vaddr;
6ebbf390 1812 te = &env->tlb_table[mmu_idx][index];
84b7b8e7 1813 te->addend = addend;
67b915a5 1814 if (prot & PAGE_READ) {
84b7b8e7
FB
1815 te->addr_read = address;
1816 } else {
1817 te->addr_read = -1;
1818 }
5c751e99 1819
84b7b8e7
FB
1820 if (prot & PAGE_EXEC) {
1821 te->addr_code = address;
9fa3e853 1822 } else {
84b7b8e7 1823 te->addr_code = -1;
9fa3e853 1824 }
67b915a5 1825 if (prot & PAGE_WRITE) {
5fafdf24 1826 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
856074ec
FB
1827 (pd & IO_MEM_ROMD)) {
1828 /* write access calls the I/O callback */
5fafdf24 1829 te->addr_write = vaddr |
856074ec 1830 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
5fafdf24 1831 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1832 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1833 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1834 } else {
84b7b8e7 1835 te->addr_write = address;
9fa3e853
FB
1836 }
1837 } else {
84b7b8e7 1838 te->addr_write = -1;
9fa3e853
FB
1839 }
1840 }
1841#if !defined(CONFIG_SOFTMMU)
1842 else {
1843 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1844 /* IO access: no mapping is done as it will be handled by the
1845 soft MMU */
1846 if (!(env->hflags & HF_SOFTMMU_MASK))
1847 ret = 2;
1848 } else {
1849 void *map_addr;
59817ccb
FB
1850
1851 if (vaddr >= MMAP_AREA_END) {
1852 ret = 2;
1853 } else {
1854 if (prot & PROT_WRITE) {
5fafdf24 1855 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1856#if defined(TARGET_HAS_SMC) || 1
59817ccb 1857 first_tb ||
d720b93d 1858#endif
5fafdf24 1859 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
59817ccb
FB
1860 !cpu_physical_memory_is_dirty(pd))) {
1861 /* ROM: we do as if code was inside */
1862 /* if code is present, we only map as read only and save the
1863 original mapping */
1864 VirtPageDesc *vp;
3b46e624 1865
90f18422 1866 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1867 vp->phys_addr = pd;
1868 vp->prot = prot;
1869 vp->valid_tag = virt_valid_tag;
1870 prot &= ~PAGE_WRITE;
1871 }
1872 }
5fafdf24 1873 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
59817ccb
FB
1874 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1875 if (map_addr == MAP_FAILED) {
1876 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1877 paddr, vaddr);
9fa3e853 1878 }
9fa3e853
FB
1879 }
1880 }
1881 }
1882#endif
1883 return ret;
1884}
1885
1886/* called from signal handler: invalidate the code and unprotect the
1887 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1888int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1889{
1890#if !defined(CONFIG_SOFTMMU)
1891 VirtPageDesc *vp;
1892
1893#if defined(DEBUG_TLB)
1894 printf("page_unprotect: addr=0x%08x\n", addr);
1895#endif
1896 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1897
1898 /* if it is not mapped, no need to worry here */
1899 if (addr >= MMAP_AREA_END)
1900 return 0;
9fa3e853
FB
1901 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1902 if (!vp)
1903 return 0;
1904 /* NOTE: in this case, validate_tag is _not_ tested as it
1905 validates only the code TLB */
1906 if (vp->valid_tag != virt_valid_tag)
1907 return 0;
1908 if (!(vp->prot & PAGE_WRITE))
1909 return 0;
1910#if defined(DEBUG_TLB)
5fafdf24 1911 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
9fa3e853
FB
1912 addr, vp->phys_addr, vp->prot);
1913#endif
59817ccb
FB
1914 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1915 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1916 (unsigned long)addr, vp->prot);
d720b93d 1917 /* set the dirty bit */
0a962c02 1918 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1919 /* flush the code inside */
1920 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1921 return 1;
1922#else
1923 return 0;
1924#endif
33417e70
FB
1925}
1926
0124311e
FB
1927#else
1928
ee8b7021 1929void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1930{
1931}
1932
2e12669a 1933void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1934{
1935}
1936
5fafdf24
TS
1937int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1938 target_phys_addr_t paddr, int prot,
6ebbf390 1939 int mmu_idx, int is_softmmu)
9fa3e853
FB
1940{
1941 return 0;
1942}
0124311e 1943
9fa3e853
FB
1944/* dump memory mappings */
1945void page_dump(FILE *f)
33417e70 1946{
9fa3e853
FB
1947 unsigned long start, end;
1948 int i, j, prot, prot1;
1949 PageDesc *p;
33417e70 1950
9fa3e853
FB
1951 fprintf(f, "%-8s %-8s %-8s %s\n",
1952 "start", "end", "size", "prot");
1953 start = -1;
1954 end = -1;
1955 prot = 0;
1956 for(i = 0; i <= L1_SIZE; i++) {
1957 if (i < L1_SIZE)
1958 p = l1_map[i];
1959 else
1960 p = NULL;
1961 for(j = 0;j < L2_SIZE; j++) {
1962 if (!p)
1963 prot1 = 0;
1964 else
1965 prot1 = p[j].flags;
1966 if (prot1 != prot) {
1967 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1968 if (start != -1) {
1969 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1970 start, end, end - start,
9fa3e853
FB
1971 prot & PAGE_READ ? 'r' : '-',
1972 prot & PAGE_WRITE ? 'w' : '-',
1973 prot & PAGE_EXEC ? 'x' : '-');
1974 }
1975 if (prot1 != 0)
1976 start = end;
1977 else
1978 start = -1;
1979 prot = prot1;
1980 }
1981 if (!p)
1982 break;
1983 }
33417e70 1984 }
33417e70
FB
1985}
1986
53a5960a 1987int page_get_flags(target_ulong address)
33417e70 1988{
9fa3e853
FB
1989 PageDesc *p;
1990
1991 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1992 if (!p)
9fa3e853
FB
1993 return 0;
1994 return p->flags;
1995}
1996
1997/* modify the flags of a page and invalidate the code if
1998 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1999 depending on PAGE_WRITE */
53a5960a 2000void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2001{
2002 PageDesc *p;
53a5960a 2003 target_ulong addr;
9fa3e853
FB
2004
2005 start = start & TARGET_PAGE_MASK;
2006 end = TARGET_PAGE_ALIGN(end);
2007 if (flags & PAGE_WRITE)
2008 flags |= PAGE_WRITE_ORG;
2009 spin_lock(&tb_lock);
2010 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2011 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2012 /* if the write protection is set, then we invalidate the code
2013 inside */
5fafdf24 2014 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2015 (flags & PAGE_WRITE) &&
2016 p->first_tb) {
d720b93d 2017 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2018 }
2019 p->flags = flags;
2020 }
2021 spin_unlock(&tb_lock);
33417e70
FB
2022}
2023
3d97b40b
TS
2024int page_check_range(target_ulong start, target_ulong len, int flags)
2025{
2026 PageDesc *p;
2027 target_ulong end;
2028 target_ulong addr;
2029
2030 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2031 start = start & TARGET_PAGE_MASK;
2032
2033 if( end < start )
2034 /* we've wrapped around */
2035 return -1;
2036 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2037 p = page_find(addr >> TARGET_PAGE_BITS);
2038 if( !p )
2039 return -1;
2040 if( !(p->flags & PAGE_VALID) )
2041 return -1;
2042
dae3270c 2043 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2044 return -1;
dae3270c
FB
2045 if (flags & PAGE_WRITE) {
2046 if (!(p->flags & PAGE_WRITE_ORG))
2047 return -1;
2048 /* unprotect the page if it was put read-only because it
2049 contains translated code */
2050 if (!(p->flags & PAGE_WRITE)) {
2051 if (!page_unprotect(addr, 0, NULL))
2052 return -1;
2053 }
2054 return 0;
2055 }
3d97b40b
TS
2056 }
2057 return 0;
2058}
2059
9fa3e853
FB
2060/* called from signal handler: invalidate the code and unprotect the
2061 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2062int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2063{
2064 unsigned int page_index, prot, pindex;
2065 PageDesc *p, *p1;
53a5960a 2066 target_ulong host_start, host_end, addr;
9fa3e853 2067
83fb7adf 2068 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2069 page_index = host_start >> TARGET_PAGE_BITS;
2070 p1 = page_find(page_index);
2071 if (!p1)
2072 return 0;
83fb7adf 2073 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2074 p = p1;
2075 prot = 0;
2076 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2077 prot |= p->flags;
2078 p++;
2079 }
2080 /* if the page was really writable, then we change its
2081 protection back to writable */
2082 if (prot & PAGE_WRITE_ORG) {
2083 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2084 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2085 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2086 (prot & PAGE_BITS) | PAGE_WRITE);
2087 p1[pindex].flags |= PAGE_WRITE;
2088 /* and since the content will be modified, we must invalidate
2089 the corresponding translated code. */
d720b93d 2090 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2091#ifdef DEBUG_TB_CHECK
2092 tb_invalidate_check(address);
2093#endif
2094 return 1;
2095 }
2096 }
2097 return 0;
2098}
2099
6a00d601
FB
2100static inline void tlb_set_dirty(CPUState *env,
2101 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2102{
2103}
9fa3e853
FB
2104#endif /* defined(CONFIG_USER_ONLY) */
2105
db7b5426 2106static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2107 ram_addr_t memory);
2108static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2109 ram_addr_t orig_memory);
db7b5426
BS
2110#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2111 need_subpage) \
2112 do { \
2113 if (addr > start_addr) \
2114 start_addr2 = 0; \
2115 else { \
2116 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2117 if (start_addr2 > 0) \
2118 need_subpage = 1; \
2119 } \
2120 \
49e9fba2 2121 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2122 end_addr2 = TARGET_PAGE_SIZE - 1; \
2123 else { \
2124 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2125 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2126 need_subpage = 1; \
2127 } \
2128 } while (0)
2129
33417e70
FB
2130/* register physical memory. 'size' must be a multiple of the target
2131 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2132 io memory page */
5fafdf24 2133void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2134 ram_addr_t size,
2135 ram_addr_t phys_offset)
33417e70 2136{
108c49b8 2137 target_phys_addr_t addr, end_addr;
92e873b9 2138 PhysPageDesc *p;
9d42037b 2139 CPUState *env;
00f82b8a 2140 ram_addr_t orig_size = size;
db7b5426 2141 void *subpage;
33417e70 2142
da260249
FB
2143#ifdef USE_KQEMU
2144 /* XXX: should not depend on cpu context */
2145 env = first_cpu;
2146 if (env->kqemu_enabled) {
2147 kqemu_set_phys_mem(start_addr, size, phys_offset);
2148 }
2149#endif
5fd386f6 2150 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2151 end_addr = start_addr + (target_phys_addr_t)size;
2152 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2153 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2154 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2155 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2156 target_phys_addr_t start_addr2, end_addr2;
2157 int need_subpage = 0;
2158
2159 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2160 need_subpage);
4254fab8 2161 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2162 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2163 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2164 &p->phys_offset, orig_memory);
2165 } else {
2166 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2167 >> IO_MEM_SHIFT];
2168 }
2169 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2170 } else {
2171 p->phys_offset = phys_offset;
2172 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2173 (phys_offset & IO_MEM_ROMD))
2174 phys_offset += TARGET_PAGE_SIZE;
2175 }
2176 } else {
2177 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2178 p->phys_offset = phys_offset;
2179 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2180 (phys_offset & IO_MEM_ROMD))
2181 phys_offset += TARGET_PAGE_SIZE;
2182 else {
2183 target_phys_addr_t start_addr2, end_addr2;
2184 int need_subpage = 0;
2185
2186 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2187 end_addr2, need_subpage);
2188
4254fab8 2189 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2190 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2191 &p->phys_offset, IO_MEM_UNASSIGNED);
2192 subpage_register(subpage, start_addr2, end_addr2,
2193 phys_offset);
2194 }
2195 }
2196 }
33417e70 2197 }
3b46e624 2198
9d42037b
FB
2199 /* since each CPU stores ram addresses in its TLB cache, we must
2200 reset the modified entries */
2201 /* XXX: slow ! */
2202 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2203 tlb_flush(env, 1);
2204 }
33417e70
FB
2205}
2206
ba863458 2207/* XXX: temporary until new memory mapping API */
00f82b8a 2208ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2209{
2210 PhysPageDesc *p;
2211
2212 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2213 if (!p)
2214 return IO_MEM_UNASSIGNED;
2215 return p->phys_offset;
2216}
2217
e9a1ab19 2218/* XXX: better than nothing */
00f82b8a 2219ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2220{
2221 ram_addr_t addr;
7fb4fdcf 2222 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
ed441467
FB
2223 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2224 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2225 abort();
2226 }
2227 addr = phys_ram_alloc_offset;
2228 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2229 return addr;
2230}
2231
2232void qemu_ram_free(ram_addr_t addr)
2233{
2234}
2235
a4193c8a 2236static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2237{
67d3b957 2238#ifdef DEBUG_UNASSIGNED
ab3d1727 2239 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2240#endif
2241#ifdef TARGET_SPARC
6c36d3fa 2242 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2243#elif TARGET_CRIS
2244 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2245#endif
33417e70
FB
2246 return 0;
2247}
2248
a4193c8a 2249static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2250{
67d3b957 2251#ifdef DEBUG_UNASSIGNED
ab3d1727 2252 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2253#endif
b4f0a316 2254#ifdef TARGET_SPARC
6c36d3fa 2255 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2256#elif TARGET_CRIS
2257 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2258#endif
33417e70
FB
2259}
2260
2261static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2262 unassigned_mem_readb,
2263 unassigned_mem_readb,
2264 unassigned_mem_readb,
2265};
2266
2267static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2268 unassigned_mem_writeb,
2269 unassigned_mem_writeb,
2270 unassigned_mem_writeb,
2271};
2272
3a7d929e 2273static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2274{
3a7d929e
FB
2275 unsigned long ram_addr;
2276 int dirty_flags;
2277 ram_addr = addr - (unsigned long)phys_ram_base;
2278 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2279 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2280#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2281 tb_invalidate_phys_page_fast(ram_addr, 1);
2282 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2283#endif
3a7d929e 2284 }
c27004ec 2285 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2286#ifdef USE_KQEMU
2287 if (cpu_single_env->kqemu_enabled &&
2288 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2289 kqemu_modify_page(cpu_single_env, ram_addr);
2290#endif
f23db169
FB
2291 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2292 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2293 /* we remove the notdirty callback only if the code has been
2294 flushed */
2295 if (dirty_flags == 0xff)
6a00d601 2296 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2297}
2298
3a7d929e 2299static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2300{
3a7d929e
FB
2301 unsigned long ram_addr;
2302 int dirty_flags;
2303 ram_addr = addr - (unsigned long)phys_ram_base;
2304 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2305 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2306#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2307 tb_invalidate_phys_page_fast(ram_addr, 2);
2308 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2309#endif
3a7d929e 2310 }
c27004ec 2311 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2312#ifdef USE_KQEMU
2313 if (cpu_single_env->kqemu_enabled &&
2314 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2315 kqemu_modify_page(cpu_single_env, ram_addr);
2316#endif
f23db169
FB
2317 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2318 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2319 /* we remove the notdirty callback only if the code has been
2320 flushed */
2321 if (dirty_flags == 0xff)
6a00d601 2322 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2323}
2324
3a7d929e 2325static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2326{
3a7d929e
FB
2327 unsigned long ram_addr;
2328 int dirty_flags;
2329 ram_addr = addr - (unsigned long)phys_ram_base;
2330 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2331 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2332#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2333 tb_invalidate_phys_page_fast(ram_addr, 4);
2334 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2335#endif
3a7d929e 2336 }
c27004ec 2337 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2338#ifdef USE_KQEMU
2339 if (cpu_single_env->kqemu_enabled &&
2340 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2341 kqemu_modify_page(cpu_single_env, ram_addr);
2342#endif
f23db169
FB
2343 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2344 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2345 /* we remove the notdirty callback only if the code has been
2346 flushed */
2347 if (dirty_flags == 0xff)
6a00d601 2348 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2349}
2350
3a7d929e 2351static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2352 NULL, /* never used */
2353 NULL, /* never used */
2354 NULL, /* never used */
2355};
2356
1ccde1cb
FB
2357static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2358 notdirty_mem_writeb,
2359 notdirty_mem_writew,
2360 notdirty_mem_writel,
2361};
2362
6658ffb8
PB
2363#if defined(CONFIG_SOFTMMU)
2364/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2365 so these check for a hit then pass through to the normal out-of-line
2366 phys routines. */
2367static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2368{
2369 return ldub_phys(addr);
2370}
2371
2372static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2373{
2374 return lduw_phys(addr);
2375}
2376
2377static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2378{
2379 return ldl_phys(addr);
2380}
2381
2382/* Generate a debug exception if a watchpoint has been hit.
2383 Returns the real physical address of the access. addr will be a host
d79acba4 2384 address in case of a RAM location. */
6658ffb8
PB
2385static target_ulong check_watchpoint(target_phys_addr_t addr)
2386{
2387 CPUState *env = cpu_single_env;
2388 target_ulong watch;
2389 target_ulong retaddr;
2390 int i;
2391
2392 retaddr = addr;
2393 for (i = 0; i < env->nb_watchpoints; i++) {
2394 watch = env->watchpoint[i].vaddr;
2395 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2396 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2397 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2398 cpu_single_env->watchpoint_hit = i + 1;
2399 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2400 break;
2401 }
2402 }
2403 }
2404 return retaddr;
2405}
2406
2407static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2408 uint32_t val)
2409{
2410 addr = check_watchpoint(addr);
2411 stb_phys(addr, val);
2412}
2413
2414static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2415 uint32_t val)
2416{
2417 addr = check_watchpoint(addr);
2418 stw_phys(addr, val);
2419}
2420
2421static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2422 uint32_t val)
2423{
2424 addr = check_watchpoint(addr);
2425 stl_phys(addr, val);
2426}
2427
2428static CPUReadMemoryFunc *watch_mem_read[3] = {
2429 watch_mem_readb,
2430 watch_mem_readw,
2431 watch_mem_readl,
2432};
2433
2434static CPUWriteMemoryFunc *watch_mem_write[3] = {
2435 watch_mem_writeb,
2436 watch_mem_writew,
2437 watch_mem_writel,
2438};
2439#endif
2440
db7b5426
BS
2441static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2442 unsigned int len)
2443{
db7b5426
BS
2444 uint32_t ret;
2445 unsigned int idx;
2446
2447 idx = SUBPAGE_IDX(addr - mmio->base);
2448#if defined(DEBUG_SUBPAGE)
2449 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2450 mmio, len, addr, idx);
2451#endif
3ee89922 2452 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2453
2454 return ret;
2455}
2456
2457static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2458 uint32_t value, unsigned int len)
2459{
db7b5426
BS
2460 unsigned int idx;
2461
2462 idx = SUBPAGE_IDX(addr - mmio->base);
2463#if defined(DEBUG_SUBPAGE)
2464 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2465 mmio, len, addr, idx, value);
2466#endif
3ee89922 2467 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2468}
2469
2470static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2471{
2472#if defined(DEBUG_SUBPAGE)
2473 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2474#endif
2475
2476 return subpage_readlen(opaque, addr, 0);
2477}
2478
2479static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2480 uint32_t value)
2481{
2482#if defined(DEBUG_SUBPAGE)
2483 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2484#endif
2485 subpage_writelen(opaque, addr, value, 0);
2486}
2487
2488static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2489{
2490#if defined(DEBUG_SUBPAGE)
2491 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2492#endif
2493
2494 return subpage_readlen(opaque, addr, 1);
2495}
2496
2497static void subpage_writew (void *opaque, target_phys_addr_t addr,
2498 uint32_t value)
2499{
2500#if defined(DEBUG_SUBPAGE)
2501 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2502#endif
2503 subpage_writelen(opaque, addr, value, 1);
2504}
2505
2506static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2507{
2508#if defined(DEBUG_SUBPAGE)
2509 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2510#endif
2511
2512 return subpage_readlen(opaque, addr, 2);
2513}
2514
2515static void subpage_writel (void *opaque,
2516 target_phys_addr_t addr, uint32_t value)
2517{
2518#if defined(DEBUG_SUBPAGE)
2519 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2520#endif
2521 subpage_writelen(opaque, addr, value, 2);
2522}
2523
2524static CPUReadMemoryFunc *subpage_read[] = {
2525 &subpage_readb,
2526 &subpage_readw,
2527 &subpage_readl,
2528};
2529
2530static CPUWriteMemoryFunc *subpage_write[] = {
2531 &subpage_writeb,
2532 &subpage_writew,
2533 &subpage_writel,
2534};
2535
2536static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2537 ram_addr_t memory)
db7b5426
BS
2538{
2539 int idx, eidx;
4254fab8 2540 unsigned int i;
db7b5426
BS
2541
2542 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2543 return -1;
2544 idx = SUBPAGE_IDX(start);
2545 eidx = SUBPAGE_IDX(end);
2546#if defined(DEBUG_SUBPAGE)
2547 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2548 mmio, start, end, idx, eidx, memory);
2549#endif
2550 memory >>= IO_MEM_SHIFT;
2551 for (; idx <= eidx; idx++) {
4254fab8 2552 for (i = 0; i < 4; i++) {
3ee89922
BS
2553 if (io_mem_read[memory][i]) {
2554 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2555 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2556 }
2557 if (io_mem_write[memory][i]) {
2558 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2559 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2560 }
4254fab8 2561 }
db7b5426
BS
2562 }
2563
2564 return 0;
2565}
2566
00f82b8a
AJ
2567static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2568 ram_addr_t orig_memory)
db7b5426
BS
2569{
2570 subpage_t *mmio;
2571 int subpage_memory;
2572
2573 mmio = qemu_mallocz(sizeof(subpage_t));
2574 if (mmio != NULL) {
2575 mmio->base = base;
2576 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2577#if defined(DEBUG_SUBPAGE)
2578 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2579 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2580#endif
2581 *phys = subpage_memory | IO_MEM_SUBPAGE;
2582 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2583 }
2584
2585 return mmio;
2586}
2587
33417e70
FB
2588static void io_mem_init(void)
2589{
3a7d929e 2590 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2591 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2592 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2593 io_mem_nb = 5;
2594
6658ffb8
PB
2595#if defined(CONFIG_SOFTMMU)
2596 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2597 watch_mem_write, NULL);
2598#endif
1ccde1cb 2599 /* alloc dirty bits array */
0a962c02 2600 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2601 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2602}
2603
2604/* mem_read and mem_write are arrays of functions containing the
2605 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2606 2). Functions can be omitted with a NULL function pointer. The
2607 registered functions may be modified dynamically later.
2608 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2609 modified. If it is zero, a new io zone is allocated. The return
2610 value can be used with cpu_register_physical_memory(). (-1) is
2611 returned if error. */
33417e70
FB
2612int cpu_register_io_memory(int io_index,
2613 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2614 CPUWriteMemoryFunc **mem_write,
2615 void *opaque)
33417e70 2616{
4254fab8 2617 int i, subwidth = 0;
33417e70
FB
2618
2619 if (io_index <= 0) {
b5ff1b31 2620 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2621 return -1;
2622 io_index = io_mem_nb++;
2623 } else {
2624 if (io_index >= IO_MEM_NB_ENTRIES)
2625 return -1;
2626 }
b5ff1b31 2627
33417e70 2628 for(i = 0;i < 3; i++) {
4254fab8
BS
2629 if (!mem_read[i] || !mem_write[i])
2630 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2631 io_mem_read[io_index][i] = mem_read[i];
2632 io_mem_write[io_index][i] = mem_write[i];
2633 }
a4193c8a 2634 io_mem_opaque[io_index] = opaque;
4254fab8 2635 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2636}
61382a50 2637
8926b517
FB
2638CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2639{
2640 return io_mem_write[io_index >> IO_MEM_SHIFT];
2641}
2642
2643CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2644{
2645 return io_mem_read[io_index >> IO_MEM_SHIFT];
2646}
2647
13eb76e0
FB
2648/* physical memory access (slow version, mainly for debug) */
2649#if defined(CONFIG_USER_ONLY)
5fafdf24 2650void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2651 int len, int is_write)
2652{
2653 int l, flags;
2654 target_ulong page;
53a5960a 2655 void * p;
13eb76e0
FB
2656
2657 while (len > 0) {
2658 page = addr & TARGET_PAGE_MASK;
2659 l = (page + TARGET_PAGE_SIZE) - addr;
2660 if (l > len)
2661 l = len;
2662 flags = page_get_flags(page);
2663 if (!(flags & PAGE_VALID))
2664 return;
2665 if (is_write) {
2666 if (!(flags & PAGE_WRITE))
2667 return;
579a97f7 2668 /* XXX: this code should not depend on lock_user */
72fb7daa 2669 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2670 /* FIXME - should this return an error rather than just fail? */
2671 return;
72fb7daa
AJ
2672 memcpy(p, buf, l);
2673 unlock_user(p, addr, l);
13eb76e0
FB
2674 } else {
2675 if (!(flags & PAGE_READ))
2676 return;
579a97f7 2677 /* XXX: this code should not depend on lock_user */
72fb7daa 2678 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2679 /* FIXME - should this return an error rather than just fail? */
2680 return;
72fb7daa 2681 memcpy(buf, p, l);
5b257578 2682 unlock_user(p, addr, 0);
13eb76e0
FB
2683 }
2684 len -= l;
2685 buf += l;
2686 addr += l;
2687 }
2688}
8df1cd07 2689
13eb76e0 2690#else
5fafdf24 2691void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2692 int len, int is_write)
2693{
2694 int l, io_index;
2695 uint8_t *ptr;
2696 uint32_t val;
2e12669a
FB
2697 target_phys_addr_t page;
2698 unsigned long pd;
92e873b9 2699 PhysPageDesc *p;
3b46e624 2700
13eb76e0
FB
2701 while (len > 0) {
2702 page = addr & TARGET_PAGE_MASK;
2703 l = (page + TARGET_PAGE_SIZE) - addr;
2704 if (l > len)
2705 l = len;
92e873b9 2706 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2707 if (!p) {
2708 pd = IO_MEM_UNASSIGNED;
2709 } else {
2710 pd = p->phys_offset;
2711 }
3b46e624 2712
13eb76e0 2713 if (is_write) {
3a7d929e 2714 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2715 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2716 /* XXX: could force cpu_single_env to NULL to avoid
2717 potential bugs */
13eb76e0 2718 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2719 /* 32 bit write access */
c27004ec 2720 val = ldl_p(buf);
a4193c8a 2721 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2722 l = 4;
2723 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2724 /* 16 bit write access */
c27004ec 2725 val = lduw_p(buf);
a4193c8a 2726 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2727 l = 2;
2728 } else {
1c213d19 2729 /* 8 bit write access */
c27004ec 2730 val = ldub_p(buf);
a4193c8a 2731 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2732 l = 1;
2733 }
2734 } else {
b448f2f3
FB
2735 unsigned long addr1;
2736 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2737 /* RAM case */
b448f2f3 2738 ptr = phys_ram_base + addr1;
13eb76e0 2739 memcpy(ptr, buf, l);
3a7d929e
FB
2740 if (!cpu_physical_memory_is_dirty(addr1)) {
2741 /* invalidate code */
2742 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2743 /* set dirty bit */
5fafdf24 2744 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2745 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2746 }
13eb76e0
FB
2747 }
2748 } else {
5fafdf24 2749 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2750 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2751 /* I/O case */
2752 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2753 if (l >= 4 && ((addr & 3) == 0)) {
2754 /* 32 bit read access */
a4193c8a 2755 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2756 stl_p(buf, val);
13eb76e0
FB
2757 l = 4;
2758 } else if (l >= 2 && ((addr & 1) == 0)) {
2759 /* 16 bit read access */
a4193c8a 2760 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2761 stw_p(buf, val);
13eb76e0
FB
2762 l = 2;
2763 } else {
1c213d19 2764 /* 8 bit read access */
a4193c8a 2765 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2766 stb_p(buf, val);
13eb76e0
FB
2767 l = 1;
2768 }
2769 } else {
2770 /* RAM case */
5fafdf24 2771 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2772 (addr & ~TARGET_PAGE_MASK);
2773 memcpy(buf, ptr, l);
2774 }
2775 }
2776 len -= l;
2777 buf += l;
2778 addr += l;
2779 }
2780}
8df1cd07 2781
d0ecd2aa 2782/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2783void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2784 const uint8_t *buf, int len)
2785{
2786 int l;
2787 uint8_t *ptr;
2788 target_phys_addr_t page;
2789 unsigned long pd;
2790 PhysPageDesc *p;
3b46e624 2791
d0ecd2aa
FB
2792 while (len > 0) {
2793 page = addr & TARGET_PAGE_MASK;
2794 l = (page + TARGET_PAGE_SIZE) - addr;
2795 if (l > len)
2796 l = len;
2797 p = phys_page_find(page >> TARGET_PAGE_BITS);
2798 if (!p) {
2799 pd = IO_MEM_UNASSIGNED;
2800 } else {
2801 pd = p->phys_offset;
2802 }
3b46e624 2803
d0ecd2aa 2804 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2805 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2806 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2807 /* do nothing */
2808 } else {
2809 unsigned long addr1;
2810 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2811 /* ROM/RAM case */
2812 ptr = phys_ram_base + addr1;
2813 memcpy(ptr, buf, l);
2814 }
2815 len -= l;
2816 buf += l;
2817 addr += l;
2818 }
2819}
2820
2821
8df1cd07
FB
2822/* warning: addr must be aligned */
2823uint32_t ldl_phys(target_phys_addr_t addr)
2824{
2825 int io_index;
2826 uint8_t *ptr;
2827 uint32_t val;
2828 unsigned long pd;
2829 PhysPageDesc *p;
2830
2831 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2832 if (!p) {
2833 pd = IO_MEM_UNASSIGNED;
2834 } else {
2835 pd = p->phys_offset;
2836 }
3b46e624 2837
5fafdf24 2838 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2839 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2840 /* I/O case */
2841 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2842 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2843 } else {
2844 /* RAM case */
5fafdf24 2845 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2846 (addr & ~TARGET_PAGE_MASK);
2847 val = ldl_p(ptr);
2848 }
2849 return val;
2850}
2851
84b7b8e7
FB
2852/* warning: addr must be aligned */
2853uint64_t ldq_phys(target_phys_addr_t addr)
2854{
2855 int io_index;
2856 uint8_t *ptr;
2857 uint64_t val;
2858 unsigned long pd;
2859 PhysPageDesc *p;
2860
2861 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2862 if (!p) {
2863 pd = IO_MEM_UNASSIGNED;
2864 } else {
2865 pd = p->phys_offset;
2866 }
3b46e624 2867
2a4188a3
FB
2868 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2869 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2870 /* I/O case */
2871 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2872#ifdef TARGET_WORDS_BIGENDIAN
2873 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2874 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2875#else
2876 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2877 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2878#endif
2879 } else {
2880 /* RAM case */
5fafdf24 2881 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2882 (addr & ~TARGET_PAGE_MASK);
2883 val = ldq_p(ptr);
2884 }
2885 return val;
2886}
2887
aab33094
FB
2888/* XXX: optimize */
2889uint32_t ldub_phys(target_phys_addr_t addr)
2890{
2891 uint8_t val;
2892 cpu_physical_memory_read(addr, &val, 1);
2893 return val;
2894}
2895
2896/* XXX: optimize */
2897uint32_t lduw_phys(target_phys_addr_t addr)
2898{
2899 uint16_t val;
2900 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2901 return tswap16(val);
2902}
2903
8df1cd07
FB
2904/* warning: addr must be aligned. The ram page is not masked as dirty
2905 and the code inside is not invalidated. It is useful if the dirty
2906 bits are used to track modified PTEs */
2907void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2908{
2909 int io_index;
2910 uint8_t *ptr;
2911 unsigned long pd;
2912 PhysPageDesc *p;
2913
2914 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2915 if (!p) {
2916 pd = IO_MEM_UNASSIGNED;
2917 } else {
2918 pd = p->phys_offset;
2919 }
3b46e624 2920
3a7d929e 2921 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2922 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2923 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2924 } else {
5fafdf24 2925 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2926 (addr & ~TARGET_PAGE_MASK);
2927 stl_p(ptr, val);
2928 }
2929}
2930
bc98a7ef
JM
2931void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2932{
2933 int io_index;
2934 uint8_t *ptr;
2935 unsigned long pd;
2936 PhysPageDesc *p;
2937
2938 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2939 if (!p) {
2940 pd = IO_MEM_UNASSIGNED;
2941 } else {
2942 pd = p->phys_offset;
2943 }
3b46e624 2944
bc98a7ef
JM
2945 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2946 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2947#ifdef TARGET_WORDS_BIGENDIAN
2948 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2949 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2950#else
2951 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2952 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2953#endif
2954 } else {
5fafdf24 2955 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2956 (addr & ~TARGET_PAGE_MASK);
2957 stq_p(ptr, val);
2958 }
2959}
2960
8df1cd07 2961/* warning: addr must be aligned */
8df1cd07
FB
2962void stl_phys(target_phys_addr_t addr, uint32_t val)
2963{
2964 int io_index;
2965 uint8_t *ptr;
2966 unsigned long pd;
2967 PhysPageDesc *p;
2968
2969 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2970 if (!p) {
2971 pd = IO_MEM_UNASSIGNED;
2972 } else {
2973 pd = p->phys_offset;
2974 }
3b46e624 2975
3a7d929e 2976 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2977 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2978 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2979 } else {
2980 unsigned long addr1;
2981 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2982 /* RAM case */
2983 ptr = phys_ram_base + addr1;
2984 stl_p(ptr, val);
3a7d929e
FB
2985 if (!cpu_physical_memory_is_dirty(addr1)) {
2986 /* invalidate code */
2987 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2988 /* set dirty bit */
f23db169
FB
2989 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2990 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2991 }
8df1cd07
FB
2992 }
2993}
2994
aab33094
FB
2995/* XXX: optimize */
2996void stb_phys(target_phys_addr_t addr, uint32_t val)
2997{
2998 uint8_t v = val;
2999 cpu_physical_memory_write(addr, &v, 1);
3000}
3001
3002/* XXX: optimize */
3003void stw_phys(target_phys_addr_t addr, uint32_t val)
3004{
3005 uint16_t v = tswap16(val);
3006 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3007}
3008
3009/* XXX: optimize */
3010void stq_phys(target_phys_addr_t addr, uint64_t val)
3011{
3012 val = tswap64(val);
3013 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3014}
3015
13eb76e0
FB
3016#endif
3017
3018/* virtual memory access for debug */
5fafdf24 3019int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3020 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3021{
3022 int l;
9b3c35e0
JM
3023 target_phys_addr_t phys_addr;
3024 target_ulong page;
13eb76e0
FB
3025
3026 while (len > 0) {
3027 page = addr & TARGET_PAGE_MASK;
3028 phys_addr = cpu_get_phys_page_debug(env, page);
3029 /* if no physical page mapped, return an error */
3030 if (phys_addr == -1)
3031 return -1;
3032 l = (page + TARGET_PAGE_SIZE) - addr;
3033 if (l > len)
3034 l = len;
5fafdf24 3035 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3036 buf, l, is_write);
13eb76e0
FB
3037 len -= l;
3038 buf += l;
3039 addr += l;
3040 }
3041 return 0;
3042}
3043
e3db7226
FB
3044void dump_exec_info(FILE *f,
3045 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3046{
3047 int i, target_code_size, max_target_code_size;
3048 int direct_jmp_count, direct_jmp2_count, cross_page;
3049 TranslationBlock *tb;
3b46e624 3050
e3db7226
FB
3051 target_code_size = 0;
3052 max_target_code_size = 0;
3053 cross_page = 0;
3054 direct_jmp_count = 0;
3055 direct_jmp2_count = 0;
3056 for(i = 0; i < nb_tbs; i++) {
3057 tb = &tbs[i];
3058 target_code_size += tb->size;
3059 if (tb->size > max_target_code_size)
3060 max_target_code_size = tb->size;
3061 if (tb->page_addr[1] != -1)
3062 cross_page++;
3063 if (tb->tb_next_offset[0] != 0xffff) {
3064 direct_jmp_count++;
3065 if (tb->tb_next_offset[1] != 0xffff) {
3066 direct_jmp2_count++;
3067 }
3068 }
3069 }
3070 /* XXX: avoid using doubles ? */
57fec1fe 3071 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3072 cpu_fprintf(f, "gen code size %ld/%ld\n",
3073 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3074 cpu_fprintf(f, "TB count %d/%d\n",
3075 nb_tbs, code_gen_max_blocks);
5fafdf24 3076 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3077 nb_tbs ? target_code_size / nb_tbs : 0,
3078 max_target_code_size);
5fafdf24 3079 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3080 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3081 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3082 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3083 cross_page,
e3db7226
FB
3084 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3085 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3086 direct_jmp_count,
e3db7226
FB
3087 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3088 direct_jmp2_count,
3089 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3090 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3091 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3092 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3093 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3094 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3095}
3096
5fafdf24 3097#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3098
3099#define MMUSUFFIX _cmmu
3100#define GETPC() NULL
3101#define env cpu_single_env
b769d8fe 3102#define SOFTMMU_CODE_ACCESS
61382a50
FB
3103
3104#define SHIFT 0
3105#include "softmmu_template.h"
3106
3107#define SHIFT 1
3108#include "softmmu_template.h"
3109
3110#define SHIFT 2
3111#include "softmmu_template.h"
3112
3113#define SHIFT 3
3114#include "softmmu_template.h"
3115
3116#undef env
3117
3118#endif