]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Add TLS sections.
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
42#endif
54936004 43
fd6ce8f6 44//#define DEBUG_TB_INVALIDATE
66e85a21 45//#define DEBUG_FLUSH
9fa3e853 46//#define DEBUG_TLB
67d3b957 47//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
48
49/* make various TB consistency checks */
5fafdf24
TS
50//#define DEBUG_TB_CHECK
51//#define DEBUG_TLB_CHECK
fd6ce8f6 52
1196be37 53//#define DEBUG_IOPORT
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
99773bd4
PB
56#if !defined(CONFIG_USER_ONLY)
57/* TB consistency checks only implemented for usermode emulation. */
58#undef DEBUG_TB_CHECK
59#endif
60
9fa3e853
FB
61#define SMC_BITMAP_USE_THRESHOLD 10
62
63#define MMAP_AREA_START 0x00000000
64#define MMAP_AREA_END 0xa8000000
fd6ce8f6 65
108c49b8
FB
66#if defined(TARGET_SPARC64)
67#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
68#elif defined(TARGET_SPARC)
69#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
70#elif defined(TARGET_ALPHA)
71#define TARGET_PHYS_ADDR_SPACE_BITS 42
72#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
73#elif defined(TARGET_PPC64)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
75#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76#define TARGET_PHYS_ADDR_SPACE_BITS 42
77#elif defined(TARGET_I386) && !defined(USE_KQEMU)
78#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
79#else
80/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81#define TARGET_PHYS_ADDR_SPACE_BITS 32
82#endif
83
fab94c0e 84TranslationBlock *tbs;
26a5f13b 85int code_gen_max_blocks;
9fa3e853 86TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 87int nb_tbs;
eb51d102
FB
88/* any access to the tbs or the page table must use this lock */
89spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 90
7cb69cae 91uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
26a5f13b
FB
92uint8_t *code_gen_buffer;
93unsigned long code_gen_buffer_size;
94/* threshold to flush the translated code buffer */
95unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
96uint8_t *code_gen_ptr;
97
00f82b8a 98ram_addr_t phys_ram_size;
9fa3e853
FB
99int phys_ram_fd;
100uint8_t *phys_ram_base;
1ccde1cb 101uint8_t *phys_ram_dirty;
e9a1ab19 102static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 103
6a00d601
FB
104CPUState *first_cpu;
105/* current CPU in the current thread. It is only valid inside
106 cpu_exec() */
5fafdf24 107CPUState *cpu_single_env;
6a00d601 108
54936004 109typedef struct PageDesc {
92e873b9 110 /* list of TBs intersecting this ram page */
fd6ce8f6 111 TranslationBlock *first_tb;
9fa3e853
FB
112 /* in order to optimize self modifying code, we count the number
113 of lookups we do to a given page to use a bitmap */
114 unsigned int code_write_count;
115 uint8_t *code_bitmap;
116#if defined(CONFIG_USER_ONLY)
117 unsigned long flags;
118#endif
54936004
FB
119} PageDesc;
120
92e873b9
FB
121typedef struct PhysPageDesc {
122 /* offset in host memory of the page + io_index in the low 12 bits */
00f82b8a 123 ram_addr_t phys_offset;
92e873b9
FB
124} PhysPageDesc;
125
54936004 126#define L2_BITS 10
bedb69ea
JM
127#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
128/* XXX: this is a temporary hack for alpha target.
129 * In the future, this is to be replaced by a multi-level table
130 * to actually be able to handle the complete 64 bits address space.
131 */
132#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
133#else
03875444 134#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 135#endif
54936004
FB
136
137#define L1_SIZE (1 << L1_BITS)
138#define L2_SIZE (1 << L2_BITS)
139
33417e70 140static void io_mem_init(void);
fd6ce8f6 141
83fb7adf
FB
142unsigned long qemu_real_host_page_size;
143unsigned long qemu_host_page_bits;
144unsigned long qemu_host_page_size;
145unsigned long qemu_host_page_mask;
54936004 146
92e873b9 147/* XXX: for system emulation, it could just be an array */
54936004 148static PageDesc *l1_map[L1_SIZE];
0a962c02 149PhysPageDesc **l1_phys_map;
54936004 150
33417e70 151/* io memory support */
33417e70
FB
152CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
153CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 154void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 155static int io_mem_nb;
6658ffb8
PB
156#if defined(CONFIG_SOFTMMU)
157static int io_mem_watch;
158#endif
33417e70 159
34865134
FB
160/* log support */
161char *logfilename = "/tmp/qemu.log";
162FILE *logfile;
163int loglevel;
e735b91c 164static int log_append = 0;
34865134 165
e3db7226
FB
166/* statistics */
167static int tlb_flush_count;
168static int tb_flush_count;
169static int tb_phys_invalidate_count;
170
db7b5426
BS
171#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
172typedef struct subpage_t {
173 target_phys_addr_t base;
3ee89922
BS
174 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
175 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
176 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
177} subpage_t;
178
7cb69cae
FB
179#ifdef _WIN32
180static void map_exec(void *addr, long size)
181{
182 DWORD old_protect;
183 VirtualProtect(addr, size,
184 PAGE_EXECUTE_READWRITE, &old_protect);
185
186}
187#else
188static void map_exec(void *addr, long size)
189{
4369415f 190 unsigned long start, end, page_size;
7cb69cae 191
4369415f 192 page_size = getpagesize();
7cb69cae 193 start = (unsigned long)addr;
4369415f 194 start &= ~(page_size - 1);
7cb69cae
FB
195
196 end = (unsigned long)addr + size;
4369415f
FB
197 end += page_size - 1;
198 end &= ~(page_size - 1);
7cb69cae
FB
199
200 mprotect((void *)start, end - start,
201 PROT_READ | PROT_WRITE | PROT_EXEC);
202}
203#endif
204
b346ff46 205static void page_init(void)
54936004 206{
83fb7adf 207 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 208 TARGET_PAGE_SIZE */
67b915a5 209#ifdef _WIN32
d5a8f07c
FB
210 {
211 SYSTEM_INFO system_info;
212 DWORD old_protect;
3b46e624 213
d5a8f07c
FB
214 GetSystemInfo(&system_info);
215 qemu_real_host_page_size = system_info.dwPageSize;
d5a8f07c 216 }
67b915a5 217#else
83fb7adf 218 qemu_real_host_page_size = getpagesize();
67b915a5 219#endif
83fb7adf
FB
220 if (qemu_host_page_size == 0)
221 qemu_host_page_size = qemu_real_host_page_size;
222 if (qemu_host_page_size < TARGET_PAGE_SIZE)
223 qemu_host_page_size = TARGET_PAGE_SIZE;
224 qemu_host_page_bits = 0;
225 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
226 qemu_host_page_bits++;
227 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
228 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
229 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
230
231#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
232 {
233 long long startaddr, endaddr;
234 FILE *f;
235 int n;
236
237 f = fopen("/proc/self/maps", "r");
238 if (f) {
239 do {
240 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
241 if (n == 2) {
e0b8d65a
BS
242 startaddr = MIN(startaddr,
243 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
244 endaddr = MIN(endaddr,
245 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 246 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
247 TARGET_PAGE_ALIGN(endaddr),
248 PAGE_RESERVED);
249 }
250 } while (!feof(f));
251 fclose(f);
252 }
253 }
254#endif
54936004
FB
255}
256
00f82b8a 257static inline PageDesc *page_find_alloc(target_ulong index)
54936004 258{
54936004
FB
259 PageDesc **lp, *p;
260
54936004
FB
261 lp = &l1_map[index >> L2_BITS];
262 p = *lp;
263 if (!p) {
264 /* allocate if not found */
59817ccb 265 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 266 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
267 *lp = p;
268 }
269 return p + (index & (L2_SIZE - 1));
270}
271
00f82b8a 272static inline PageDesc *page_find(target_ulong index)
54936004 273{
54936004
FB
274 PageDesc *p;
275
54936004
FB
276 p = l1_map[index >> L2_BITS];
277 if (!p)
278 return 0;
fd6ce8f6
FB
279 return p + (index & (L2_SIZE - 1));
280}
281
108c49b8 282static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 283{
108c49b8 284 void **lp, **p;
e3f4e2a4 285 PhysPageDesc *pd;
92e873b9 286
108c49b8
FB
287 p = (void **)l1_phys_map;
288#if TARGET_PHYS_ADDR_SPACE_BITS > 32
289
290#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
291#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
292#endif
293 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
294 p = *lp;
295 if (!p) {
296 /* allocate if not found */
108c49b8
FB
297 if (!alloc)
298 return NULL;
299 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
300 memset(p, 0, sizeof(void *) * L1_SIZE);
301 *lp = p;
302 }
303#endif
304 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
305 pd = *lp;
306 if (!pd) {
307 int i;
108c49b8
FB
308 /* allocate if not found */
309 if (!alloc)
310 return NULL;
e3f4e2a4
PB
311 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
312 *lp = pd;
313 for (i = 0; i < L2_SIZE; i++)
314 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 315 }
e3f4e2a4 316 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
317}
318
108c49b8 319static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 320{
108c49b8 321 return phys_page_find_alloc(index, 0);
92e873b9
FB
322}
323
9fa3e853 324#if !defined(CONFIG_USER_ONLY)
6a00d601 325static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 326static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 327 target_ulong vaddr);
9fa3e853 328#endif
fd6ce8f6 329
4369415f
FB
330#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
331
332#if defined(CONFIG_USER_ONLY)
333/* Currently it is not recommanded to allocate big chunks of data in
334 user mode. It will change when a dedicated libc will be used */
335#define USE_STATIC_CODE_GEN_BUFFER
336#endif
337
338#ifdef USE_STATIC_CODE_GEN_BUFFER
339static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
340#endif
341
26a5f13b
FB
342void code_gen_alloc(unsigned long tb_size)
343{
4369415f
FB
344#ifdef USE_STATIC_CODE_GEN_BUFFER
345 code_gen_buffer = static_code_gen_buffer;
346 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
347 map_exec(code_gen_buffer, code_gen_buffer_size);
348#else
26a5f13b
FB
349 code_gen_buffer_size = tb_size;
350 if (code_gen_buffer_size == 0) {
4369415f
FB
351#if defined(CONFIG_USER_ONLY)
352 /* in user mode, phys_ram_size is not meaningful */
353 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
354#else
26a5f13b
FB
355 /* XXX: needs ajustments */
356 code_gen_buffer_size = (int)(phys_ram_size / 4);
4369415f 357#endif
26a5f13b
FB
358 }
359 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
360 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
361 /* The code gen buffer location may have constraints depending on
362 the host cpu and OS */
363#if defined(__linux__)
364 {
365 int flags;
366 flags = MAP_PRIVATE | MAP_ANONYMOUS;
367#if defined(__x86_64__)
368 flags |= MAP_32BIT;
369 /* Cannot map more than that */
370 if (code_gen_buffer_size > (800 * 1024 * 1024))
371 code_gen_buffer_size = (800 * 1024 * 1024);
372#endif
373 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
374 PROT_WRITE | PROT_READ | PROT_EXEC,
375 flags, -1, 0);
376 if (code_gen_buffer == MAP_FAILED) {
377 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
378 exit(1);
379 }
380 }
381#else
382 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
383 if (!code_gen_buffer) {
384 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
385 exit(1);
386 }
387 map_exec(code_gen_buffer, code_gen_buffer_size);
388#endif
4369415f 389#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
390 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
391 code_gen_buffer_max_size = code_gen_buffer_size -
392 code_gen_max_block_size();
393 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
394 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
395}
396
397/* Must be called before using the QEMU cpus. 'tb_size' is the size
398 (in bytes) allocated to the translation buffer. Zero means default
399 size. */
400void cpu_exec_init_all(unsigned long tb_size)
401{
26a5f13b
FB
402 cpu_gen_init();
403 code_gen_alloc(tb_size);
404 code_gen_ptr = code_gen_buffer;
4369415f 405 page_init();
26a5f13b
FB
406 io_mem_init();
407}
408
6a00d601 409void cpu_exec_init(CPUState *env)
fd6ce8f6 410{
6a00d601
FB
411 CPUState **penv;
412 int cpu_index;
413
6a00d601
FB
414 env->next_cpu = NULL;
415 penv = &first_cpu;
416 cpu_index = 0;
417 while (*penv != NULL) {
418 penv = (CPUState **)&(*penv)->next_cpu;
419 cpu_index++;
420 }
421 env->cpu_index = cpu_index;
6658ffb8 422 env->nb_watchpoints = 0;
6a00d601 423 *penv = env;
fd6ce8f6
FB
424}
425
9fa3e853
FB
426static inline void invalidate_page_bitmap(PageDesc *p)
427{
428 if (p->code_bitmap) {
59817ccb 429 qemu_free(p->code_bitmap);
9fa3e853
FB
430 p->code_bitmap = NULL;
431 }
432 p->code_write_count = 0;
433}
434
fd6ce8f6
FB
435/* set to NULL all the 'first_tb' fields in all PageDescs */
436static void page_flush_tb(void)
437{
438 int i, j;
439 PageDesc *p;
440
441 for(i = 0; i < L1_SIZE; i++) {
442 p = l1_map[i];
443 if (p) {
9fa3e853
FB
444 for(j = 0; j < L2_SIZE; j++) {
445 p->first_tb = NULL;
446 invalidate_page_bitmap(p);
447 p++;
448 }
fd6ce8f6
FB
449 }
450 }
451}
452
453/* flush all the translation blocks */
d4e8164f 454/* XXX: tb_flush is currently not thread safe */
6a00d601 455void tb_flush(CPUState *env1)
fd6ce8f6 456{
6a00d601 457 CPUState *env;
0124311e 458#if defined(DEBUG_FLUSH)
ab3d1727
BS
459 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
460 (unsigned long)(code_gen_ptr - code_gen_buffer),
461 nb_tbs, nb_tbs > 0 ?
462 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 463#endif
26a5f13b 464 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
465 cpu_abort(env1, "Internal error: code buffer overflow\n");
466
fd6ce8f6 467 nb_tbs = 0;
3b46e624 468
6a00d601
FB
469 for(env = first_cpu; env != NULL; env = env->next_cpu) {
470 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
471 }
9fa3e853 472
8a8a608f 473 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 474 page_flush_tb();
9fa3e853 475
fd6ce8f6 476 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
477 /* XXX: flush processor icache at this point if cache flush is
478 expensive */
e3db7226 479 tb_flush_count++;
fd6ce8f6
FB
480}
481
482#ifdef DEBUG_TB_CHECK
483
bc98a7ef 484static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
485{
486 TranslationBlock *tb;
487 int i;
488 address &= TARGET_PAGE_MASK;
99773bd4
PB
489 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
490 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
491 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
492 address >= tb->pc + tb->size)) {
493 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 494 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
495 }
496 }
497 }
498}
499
500/* verify that all the pages have correct rights for code */
501static void tb_page_check(void)
502{
503 TranslationBlock *tb;
504 int i, flags1, flags2;
3b46e624 505
99773bd4
PB
506 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
507 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
508 flags1 = page_get_flags(tb->pc);
509 flags2 = page_get_flags(tb->pc + tb->size - 1);
510 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
511 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 512 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
513 }
514 }
515 }
516}
517
d4e8164f
FB
518void tb_jmp_check(TranslationBlock *tb)
519{
520 TranslationBlock *tb1;
521 unsigned int n1;
522
523 /* suppress any remaining jumps to this TB */
524 tb1 = tb->jmp_first;
525 for(;;) {
526 n1 = (long)tb1 & 3;
527 tb1 = (TranslationBlock *)((long)tb1 & ~3);
528 if (n1 == 2)
529 break;
530 tb1 = tb1->jmp_next[n1];
531 }
532 /* check end of list */
533 if (tb1 != tb) {
534 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
535 }
536}
537
fd6ce8f6
FB
538#endif
539
540/* invalidate one TB */
541static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
542 int next_offset)
543{
544 TranslationBlock *tb1;
545 for(;;) {
546 tb1 = *ptb;
547 if (tb1 == tb) {
548 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
549 break;
550 }
551 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
552 }
553}
554
9fa3e853
FB
555static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
556{
557 TranslationBlock *tb1;
558 unsigned int n1;
559
560 for(;;) {
561 tb1 = *ptb;
562 n1 = (long)tb1 & 3;
563 tb1 = (TranslationBlock *)((long)tb1 & ~3);
564 if (tb1 == tb) {
565 *ptb = tb1->page_next[n1];
566 break;
567 }
568 ptb = &tb1->page_next[n1];
569 }
570}
571
d4e8164f
FB
572static inline void tb_jmp_remove(TranslationBlock *tb, int n)
573{
574 TranslationBlock *tb1, **ptb;
575 unsigned int n1;
576
577 ptb = &tb->jmp_next[n];
578 tb1 = *ptb;
579 if (tb1) {
580 /* find tb(n) in circular list */
581 for(;;) {
582 tb1 = *ptb;
583 n1 = (long)tb1 & 3;
584 tb1 = (TranslationBlock *)((long)tb1 & ~3);
585 if (n1 == n && tb1 == tb)
586 break;
587 if (n1 == 2) {
588 ptb = &tb1->jmp_first;
589 } else {
590 ptb = &tb1->jmp_next[n1];
591 }
592 }
593 /* now we can suppress tb(n) from the list */
594 *ptb = tb->jmp_next[n];
595
596 tb->jmp_next[n] = NULL;
597 }
598}
599
600/* reset the jump entry 'n' of a TB so that it is not chained to
601 another TB */
602static inline void tb_reset_jump(TranslationBlock *tb, int n)
603{
604 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
605}
606
00f82b8a 607static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 608{
6a00d601 609 CPUState *env;
8a40a180 610 PageDesc *p;
d4e8164f 611 unsigned int h, n1;
00f82b8a 612 target_phys_addr_t phys_pc;
8a40a180 613 TranslationBlock *tb1, *tb2;
3b46e624 614
8a40a180
FB
615 /* remove the TB from the hash list */
616 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
617 h = tb_phys_hash_func(phys_pc);
5fafdf24 618 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
619 offsetof(TranslationBlock, phys_hash_next));
620
621 /* remove the TB from the page list */
622 if (tb->page_addr[0] != page_addr) {
623 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
624 tb_page_remove(&p->first_tb, tb);
625 invalidate_page_bitmap(p);
626 }
627 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
628 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
629 tb_page_remove(&p->first_tb, tb);
630 invalidate_page_bitmap(p);
631 }
632
36bdbe54 633 tb_invalidated_flag = 1;
59817ccb 634
fd6ce8f6 635 /* remove the TB from the hash list */
8a40a180 636 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
637 for(env = first_cpu; env != NULL; env = env->next_cpu) {
638 if (env->tb_jmp_cache[h] == tb)
639 env->tb_jmp_cache[h] = NULL;
640 }
d4e8164f
FB
641
642 /* suppress this TB from the two jump lists */
643 tb_jmp_remove(tb, 0);
644 tb_jmp_remove(tb, 1);
645
646 /* suppress any remaining jumps to this TB */
647 tb1 = tb->jmp_first;
648 for(;;) {
649 n1 = (long)tb1 & 3;
650 if (n1 == 2)
651 break;
652 tb1 = (TranslationBlock *)((long)tb1 & ~3);
653 tb2 = tb1->jmp_next[n1];
654 tb_reset_jump(tb1, n1);
655 tb1->jmp_next[n1] = NULL;
656 tb1 = tb2;
657 }
658 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 659
e3db7226 660 tb_phys_invalidate_count++;
9fa3e853
FB
661}
662
663static inline void set_bits(uint8_t *tab, int start, int len)
664{
665 int end, mask, end1;
666
667 end = start + len;
668 tab += start >> 3;
669 mask = 0xff << (start & 7);
670 if ((start & ~7) == (end & ~7)) {
671 if (start < end) {
672 mask &= ~(0xff << (end & 7));
673 *tab |= mask;
674 }
675 } else {
676 *tab++ |= mask;
677 start = (start + 8) & ~7;
678 end1 = end & ~7;
679 while (start < end1) {
680 *tab++ = 0xff;
681 start += 8;
682 }
683 if (start < end) {
684 mask = ~(0xff << (end & 7));
685 *tab |= mask;
686 }
687 }
688}
689
690static void build_page_bitmap(PageDesc *p)
691{
692 int n, tb_start, tb_end;
693 TranslationBlock *tb;
3b46e624 694
59817ccb 695 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
696 if (!p->code_bitmap)
697 return;
698 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
699
700 tb = p->first_tb;
701 while (tb != NULL) {
702 n = (long)tb & 3;
703 tb = (TranslationBlock *)((long)tb & ~3);
704 /* NOTE: this is subtle as a TB may span two physical pages */
705 if (n == 0) {
706 /* NOTE: tb_end may be after the end of the page, but
707 it is not a problem */
708 tb_start = tb->pc & ~TARGET_PAGE_MASK;
709 tb_end = tb_start + tb->size;
710 if (tb_end > TARGET_PAGE_SIZE)
711 tb_end = TARGET_PAGE_SIZE;
712 } else {
713 tb_start = 0;
714 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
715 }
716 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
717 tb = tb->page_next[n];
718 }
719}
720
d720b93d
FB
721#ifdef TARGET_HAS_PRECISE_SMC
722
5fafdf24 723static void tb_gen_code(CPUState *env,
d720b93d
FB
724 target_ulong pc, target_ulong cs_base, int flags,
725 int cflags)
726{
727 TranslationBlock *tb;
728 uint8_t *tc_ptr;
729 target_ulong phys_pc, phys_page2, virt_page2;
730 int code_gen_size;
731
c27004ec
FB
732 phys_pc = get_phys_addr_code(env, pc);
733 tb = tb_alloc(pc);
d720b93d
FB
734 if (!tb) {
735 /* flush must be done */
736 tb_flush(env);
737 /* cannot fail at this point */
c27004ec 738 tb = tb_alloc(pc);
d720b93d
FB
739 }
740 tc_ptr = code_gen_ptr;
741 tb->tc_ptr = tc_ptr;
742 tb->cs_base = cs_base;
743 tb->flags = flags;
744 tb->cflags = cflags;
d07bde88 745 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 746 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 747
d720b93d 748 /* check next page if needed */
c27004ec 749 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 750 phys_page2 = -1;
c27004ec 751 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
752 phys_page2 = get_phys_addr_code(env, virt_page2);
753 }
754 tb_link_phys(tb, phys_pc, phys_page2);
755}
756#endif
3b46e624 757
9fa3e853
FB
758/* invalidate all TBs which intersect with the target physical page
759 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
760 the same physical page. 'is_cpu_write_access' should be true if called
761 from a real cpu write access: the virtual CPU will exit the current
762 TB if code is modified inside this TB. */
00f82b8a 763void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
764 int is_cpu_write_access)
765{
766 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 767 CPUState *env = cpu_single_env;
9fa3e853 768 PageDesc *p;
ea1c1802 769 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 770 target_ulong tb_start, tb_end;
d720b93d 771 target_ulong current_pc, current_cs_base;
9fa3e853
FB
772
773 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 774 if (!p)
9fa3e853 775 return;
5fafdf24 776 if (!p->code_bitmap &&
d720b93d
FB
777 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
778 is_cpu_write_access) {
9fa3e853
FB
779 /* build code bitmap */
780 build_page_bitmap(p);
781 }
782
783 /* we remove all the TBs in the range [start, end[ */
784 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
785 current_tb_not_found = is_cpu_write_access;
786 current_tb_modified = 0;
787 current_tb = NULL; /* avoid warning */
788 current_pc = 0; /* avoid warning */
789 current_cs_base = 0; /* avoid warning */
790 current_flags = 0; /* avoid warning */
9fa3e853
FB
791 tb = p->first_tb;
792 while (tb != NULL) {
793 n = (long)tb & 3;
794 tb = (TranslationBlock *)((long)tb & ~3);
795 tb_next = tb->page_next[n];
796 /* NOTE: this is subtle as a TB may span two physical pages */
797 if (n == 0) {
798 /* NOTE: tb_end may be after the end of the page, but
799 it is not a problem */
800 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
801 tb_end = tb_start + tb->size;
802 } else {
803 tb_start = tb->page_addr[1];
804 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
805 }
806 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
807#ifdef TARGET_HAS_PRECISE_SMC
808 if (current_tb_not_found) {
809 current_tb_not_found = 0;
810 current_tb = NULL;
811 if (env->mem_write_pc) {
812 /* now we have a real cpu fault */
813 current_tb = tb_find_pc(env->mem_write_pc);
814 }
815 }
816 if (current_tb == tb &&
817 !(current_tb->cflags & CF_SINGLE_INSN)) {
818 /* If we are modifying the current TB, we must stop
819 its execution. We could be more precise by checking
820 that the modification is after the current PC, but it
821 would require a specialized function to partially
822 restore the CPU state */
3b46e624 823
d720b93d 824 current_tb_modified = 1;
5fafdf24 825 cpu_restore_state(current_tb, env,
d720b93d
FB
826 env->mem_write_pc, NULL);
827#if defined(TARGET_I386)
828 current_flags = env->hflags;
829 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
830 current_cs_base = (target_ulong)env->segs[R_CS].base;
831 current_pc = current_cs_base + env->eip;
832#else
833#error unsupported CPU
834#endif
835 }
836#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
837 /* we need to do that to handle the case where a signal
838 occurs while doing tb_phys_invalidate() */
839 saved_tb = NULL;
840 if (env) {
841 saved_tb = env->current_tb;
842 env->current_tb = NULL;
843 }
9fa3e853 844 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
845 if (env) {
846 env->current_tb = saved_tb;
847 if (env->interrupt_request && env->current_tb)
848 cpu_interrupt(env, env->interrupt_request);
849 }
9fa3e853
FB
850 }
851 tb = tb_next;
852 }
853#if !defined(CONFIG_USER_ONLY)
854 /* if no code remaining, no need to continue to use slow writes */
855 if (!p->first_tb) {
856 invalidate_page_bitmap(p);
d720b93d
FB
857 if (is_cpu_write_access) {
858 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
859 }
860 }
861#endif
862#ifdef TARGET_HAS_PRECISE_SMC
863 if (current_tb_modified) {
864 /* we generate a block containing just the instruction
865 modifying the memory. It will ensure that it cannot modify
866 itself */
ea1c1802 867 env->current_tb = NULL;
5fafdf24 868 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
869 CF_SINGLE_INSN);
870 cpu_resume_from_signal(env, NULL);
9fa3e853 871 }
fd6ce8f6 872#endif
9fa3e853 873}
fd6ce8f6 874
9fa3e853 875/* len must be <= 8 and start must be a multiple of len */
00f82b8a 876static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
877{
878 PageDesc *p;
879 int offset, b;
59817ccb 880#if 0
a4193c8a
FB
881 if (1) {
882 if (loglevel) {
5fafdf24
TS
883 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
884 cpu_single_env->mem_write_vaddr, len,
885 cpu_single_env->eip,
a4193c8a
FB
886 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
887 }
59817ccb
FB
888 }
889#endif
9fa3e853 890 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 891 if (!p)
9fa3e853
FB
892 return;
893 if (p->code_bitmap) {
894 offset = start & ~TARGET_PAGE_MASK;
895 b = p->code_bitmap[offset >> 3] >> (offset & 7);
896 if (b & ((1 << len) - 1))
897 goto do_invalidate;
898 } else {
899 do_invalidate:
d720b93d 900 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
901 }
902}
903
9fa3e853 904#if !defined(CONFIG_SOFTMMU)
00f82b8a 905static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 906 unsigned long pc, void *puc)
9fa3e853 907{
d720b93d
FB
908 int n, current_flags, current_tb_modified;
909 target_ulong current_pc, current_cs_base;
9fa3e853 910 PageDesc *p;
d720b93d
FB
911 TranslationBlock *tb, *current_tb;
912#ifdef TARGET_HAS_PRECISE_SMC
913 CPUState *env = cpu_single_env;
914#endif
9fa3e853
FB
915
916 addr &= TARGET_PAGE_MASK;
917 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 918 if (!p)
9fa3e853
FB
919 return;
920 tb = p->first_tb;
d720b93d
FB
921 current_tb_modified = 0;
922 current_tb = NULL;
923 current_pc = 0; /* avoid warning */
924 current_cs_base = 0; /* avoid warning */
925 current_flags = 0; /* avoid warning */
926#ifdef TARGET_HAS_PRECISE_SMC
927 if (tb && pc != 0) {
928 current_tb = tb_find_pc(pc);
929 }
930#endif
9fa3e853
FB
931 while (tb != NULL) {
932 n = (long)tb & 3;
933 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
934#ifdef TARGET_HAS_PRECISE_SMC
935 if (current_tb == tb &&
936 !(current_tb->cflags & CF_SINGLE_INSN)) {
937 /* If we are modifying the current TB, we must stop
938 its execution. We could be more precise by checking
939 that the modification is after the current PC, but it
940 would require a specialized function to partially
941 restore the CPU state */
3b46e624 942
d720b93d
FB
943 current_tb_modified = 1;
944 cpu_restore_state(current_tb, env, pc, puc);
945#if defined(TARGET_I386)
946 current_flags = env->hflags;
947 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
948 current_cs_base = (target_ulong)env->segs[R_CS].base;
949 current_pc = current_cs_base + env->eip;
950#else
951#error unsupported CPU
952#endif
953 }
954#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
955 tb_phys_invalidate(tb, addr);
956 tb = tb->page_next[n];
957 }
fd6ce8f6 958 p->first_tb = NULL;
d720b93d
FB
959#ifdef TARGET_HAS_PRECISE_SMC
960 if (current_tb_modified) {
961 /* we generate a block containing just the instruction
962 modifying the memory. It will ensure that it cannot modify
963 itself */
ea1c1802 964 env->current_tb = NULL;
5fafdf24 965 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
966 CF_SINGLE_INSN);
967 cpu_resume_from_signal(env, puc);
968 }
969#endif
fd6ce8f6 970}
9fa3e853 971#endif
fd6ce8f6
FB
972
973/* add the tb in the target page and protect it if necessary */
5fafdf24 974static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 975 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
976{
977 PageDesc *p;
9fa3e853
FB
978 TranslationBlock *last_first_tb;
979
980 tb->page_addr[n] = page_addr;
3a7d929e 981 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
982 tb->page_next[n] = p->first_tb;
983 last_first_tb = p->first_tb;
984 p->first_tb = (TranslationBlock *)((long)tb | n);
985 invalidate_page_bitmap(p);
fd6ce8f6 986
107db443 987#if defined(TARGET_HAS_SMC) || 1
d720b93d 988
9fa3e853 989#if defined(CONFIG_USER_ONLY)
fd6ce8f6 990 if (p->flags & PAGE_WRITE) {
53a5960a
PB
991 target_ulong addr;
992 PageDesc *p2;
9fa3e853
FB
993 int prot;
994
fd6ce8f6
FB
995 /* force the host page as non writable (writes will have a
996 page fault + mprotect overhead) */
53a5960a 997 page_addr &= qemu_host_page_mask;
fd6ce8f6 998 prot = 0;
53a5960a
PB
999 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1000 addr += TARGET_PAGE_SIZE) {
1001
1002 p2 = page_find (addr >> TARGET_PAGE_BITS);
1003 if (!p2)
1004 continue;
1005 prot |= p2->flags;
1006 p2->flags &= ~PAGE_WRITE;
1007 page_get_flags(addr);
1008 }
5fafdf24 1009 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1010 (prot & PAGE_BITS) & ~PAGE_WRITE);
1011#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1012 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1013 page_addr);
fd6ce8f6 1014#endif
fd6ce8f6 1015 }
9fa3e853
FB
1016#else
1017 /* if some code is already present, then the pages are already
1018 protected. So we handle the case where only the first TB is
1019 allocated in a physical page */
1020 if (!last_first_tb) {
6a00d601 1021 tlb_protect_code(page_addr);
9fa3e853
FB
1022 }
1023#endif
d720b93d
FB
1024
1025#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1026}
1027
1028/* Allocate a new translation block. Flush the translation buffer if
1029 too many translation blocks or too much generated code. */
c27004ec 1030TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1031{
1032 TranslationBlock *tb;
fd6ce8f6 1033
26a5f13b
FB
1034 if (nb_tbs >= code_gen_max_blocks ||
1035 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1036 return NULL;
fd6ce8f6
FB
1037 tb = &tbs[nb_tbs++];
1038 tb->pc = pc;
b448f2f3 1039 tb->cflags = 0;
d4e8164f
FB
1040 return tb;
1041}
1042
9fa3e853
FB
1043/* add a new TB and link it to the physical page tables. phys_page2 is
1044 (-1) to indicate that only one page contains the TB. */
5fafdf24 1045void tb_link_phys(TranslationBlock *tb,
9fa3e853 1046 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1047{
9fa3e853
FB
1048 unsigned int h;
1049 TranslationBlock **ptb;
1050
1051 /* add in the physical hash table */
1052 h = tb_phys_hash_func(phys_pc);
1053 ptb = &tb_phys_hash[h];
1054 tb->phys_hash_next = *ptb;
1055 *ptb = tb;
fd6ce8f6
FB
1056
1057 /* add in the page list */
9fa3e853
FB
1058 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1059 if (phys_page2 != -1)
1060 tb_alloc_page(tb, 1, phys_page2);
1061 else
1062 tb->page_addr[1] = -1;
9fa3e853 1063
d4e8164f
FB
1064 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1065 tb->jmp_next[0] = NULL;
1066 tb->jmp_next[1] = NULL;
1067
1068 /* init original jump addresses */
1069 if (tb->tb_next_offset[0] != 0xffff)
1070 tb_reset_jump(tb, 0);
1071 if (tb->tb_next_offset[1] != 0xffff)
1072 tb_reset_jump(tb, 1);
8a40a180
FB
1073
1074#ifdef DEBUG_TB_CHECK
1075 tb_page_check();
1076#endif
fd6ce8f6
FB
1077}
1078
9fa3e853
FB
1079/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1080 tb[1].tc_ptr. Return NULL if not found */
1081TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1082{
9fa3e853
FB
1083 int m_min, m_max, m;
1084 unsigned long v;
1085 TranslationBlock *tb;
a513fe19
FB
1086
1087 if (nb_tbs <= 0)
1088 return NULL;
1089 if (tc_ptr < (unsigned long)code_gen_buffer ||
1090 tc_ptr >= (unsigned long)code_gen_ptr)
1091 return NULL;
1092 /* binary search (cf Knuth) */
1093 m_min = 0;
1094 m_max = nb_tbs - 1;
1095 while (m_min <= m_max) {
1096 m = (m_min + m_max) >> 1;
1097 tb = &tbs[m];
1098 v = (unsigned long)tb->tc_ptr;
1099 if (v == tc_ptr)
1100 return tb;
1101 else if (tc_ptr < v) {
1102 m_max = m - 1;
1103 } else {
1104 m_min = m + 1;
1105 }
5fafdf24 1106 }
a513fe19
FB
1107 return &tbs[m_max];
1108}
7501267e 1109
ea041c0e
FB
1110static void tb_reset_jump_recursive(TranslationBlock *tb);
1111
1112static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1113{
1114 TranslationBlock *tb1, *tb_next, **ptb;
1115 unsigned int n1;
1116
1117 tb1 = tb->jmp_next[n];
1118 if (tb1 != NULL) {
1119 /* find head of list */
1120 for(;;) {
1121 n1 = (long)tb1 & 3;
1122 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1123 if (n1 == 2)
1124 break;
1125 tb1 = tb1->jmp_next[n1];
1126 }
1127 /* we are now sure now that tb jumps to tb1 */
1128 tb_next = tb1;
1129
1130 /* remove tb from the jmp_first list */
1131 ptb = &tb_next->jmp_first;
1132 for(;;) {
1133 tb1 = *ptb;
1134 n1 = (long)tb1 & 3;
1135 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1136 if (n1 == n && tb1 == tb)
1137 break;
1138 ptb = &tb1->jmp_next[n1];
1139 }
1140 *ptb = tb->jmp_next[n];
1141 tb->jmp_next[n] = NULL;
3b46e624 1142
ea041c0e
FB
1143 /* suppress the jump to next tb in generated code */
1144 tb_reset_jump(tb, n);
1145
0124311e 1146 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1147 tb_reset_jump_recursive(tb_next);
1148 }
1149}
1150
1151static void tb_reset_jump_recursive(TranslationBlock *tb)
1152{
1153 tb_reset_jump_recursive2(tb, 0);
1154 tb_reset_jump_recursive2(tb, 1);
1155}
1156
1fddef4b 1157#if defined(TARGET_HAS_ICE)
d720b93d
FB
1158static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1159{
9b3c35e0
JM
1160 target_phys_addr_t addr;
1161 target_ulong pd;
c2f07f81
PB
1162 ram_addr_t ram_addr;
1163 PhysPageDesc *p;
d720b93d 1164
c2f07f81
PB
1165 addr = cpu_get_phys_page_debug(env, pc);
1166 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1167 if (!p) {
1168 pd = IO_MEM_UNASSIGNED;
1169 } else {
1170 pd = p->phys_offset;
1171 }
1172 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1173 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1174}
c27004ec 1175#endif
d720b93d 1176
6658ffb8
PB
1177/* Add a watchpoint. */
1178int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1179{
1180 int i;
1181
1182 for (i = 0; i < env->nb_watchpoints; i++) {
1183 if (addr == env->watchpoint[i].vaddr)
1184 return 0;
1185 }
1186 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1187 return -1;
1188
1189 i = env->nb_watchpoints++;
1190 env->watchpoint[i].vaddr = addr;
1191 tlb_flush_page(env, addr);
1192 /* FIXME: This flush is needed because of the hack to make memory ops
1193 terminate the TB. It can be removed once the proper IO trap and
1194 re-execute bits are in. */
1195 tb_flush(env);
1196 return i;
1197}
1198
1199/* Remove a watchpoint. */
1200int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1201{
1202 int i;
1203
1204 for (i = 0; i < env->nb_watchpoints; i++) {
1205 if (addr == env->watchpoint[i].vaddr) {
1206 env->nb_watchpoints--;
1207 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1208 tlb_flush_page(env, addr);
1209 return 0;
1210 }
1211 }
1212 return -1;
1213}
1214
7d03f82f
EI
1215/* Remove all watchpoints. */
1216void cpu_watchpoint_remove_all(CPUState *env) {
1217 int i;
1218
1219 for (i = 0; i < env->nb_watchpoints; i++) {
1220 tlb_flush_page(env, env->watchpoint[i].vaddr);
1221 }
1222 env->nb_watchpoints = 0;
1223}
1224
c33a346e
FB
1225/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1226 breakpoint is reached */
2e12669a 1227int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1228{
1fddef4b 1229#if defined(TARGET_HAS_ICE)
4c3a88a2 1230 int i;
3b46e624 1231
4c3a88a2
FB
1232 for(i = 0; i < env->nb_breakpoints; i++) {
1233 if (env->breakpoints[i] == pc)
1234 return 0;
1235 }
1236
1237 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1238 return -1;
1239 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1240
d720b93d 1241 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1242 return 0;
1243#else
1244 return -1;
1245#endif
1246}
1247
7d03f82f
EI
1248/* remove all breakpoints */
1249void cpu_breakpoint_remove_all(CPUState *env) {
1250#if defined(TARGET_HAS_ICE)
1251 int i;
1252 for(i = 0; i < env->nb_breakpoints; i++) {
1253 breakpoint_invalidate(env, env->breakpoints[i]);
1254 }
1255 env->nb_breakpoints = 0;
1256#endif
1257}
1258
4c3a88a2 1259/* remove a breakpoint */
2e12669a 1260int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1261{
1fddef4b 1262#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1263 int i;
1264 for(i = 0; i < env->nb_breakpoints; i++) {
1265 if (env->breakpoints[i] == pc)
1266 goto found;
1267 }
1268 return -1;
1269 found:
4c3a88a2 1270 env->nb_breakpoints--;
1fddef4b
FB
1271 if (i < env->nb_breakpoints)
1272 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1273
1274 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1275 return 0;
1276#else
1277 return -1;
1278#endif
1279}
1280
c33a346e
FB
1281/* enable or disable single step mode. EXCP_DEBUG is returned by the
1282 CPU loop after each instruction */
1283void cpu_single_step(CPUState *env, int enabled)
1284{
1fddef4b 1285#if defined(TARGET_HAS_ICE)
c33a346e
FB
1286 if (env->singlestep_enabled != enabled) {
1287 env->singlestep_enabled = enabled;
1288 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1289 /* XXX: only flush what is necessary */
0124311e 1290 tb_flush(env);
c33a346e
FB
1291 }
1292#endif
1293}
1294
34865134
FB
1295/* enable or disable low levels log */
1296void cpu_set_log(int log_flags)
1297{
1298 loglevel = log_flags;
1299 if (loglevel && !logfile) {
11fcfab4 1300 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1301 if (!logfile) {
1302 perror(logfilename);
1303 _exit(1);
1304 }
9fa3e853
FB
1305#if !defined(CONFIG_SOFTMMU)
1306 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1307 {
1308 static uint8_t logfile_buf[4096];
1309 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1310 }
1311#else
34865134 1312 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1313#endif
e735b91c
PB
1314 log_append = 1;
1315 }
1316 if (!loglevel && logfile) {
1317 fclose(logfile);
1318 logfile = NULL;
34865134
FB
1319 }
1320}
1321
1322void cpu_set_log_filename(const char *filename)
1323{
1324 logfilename = strdup(filename);
e735b91c
PB
1325 if (logfile) {
1326 fclose(logfile);
1327 logfile = NULL;
1328 }
1329 cpu_set_log(loglevel);
34865134 1330}
c33a346e 1331
0124311e 1332/* mask must never be zero, except for A20 change call */
68a79315 1333void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1334{
1335 TranslationBlock *tb;
15a51156 1336 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1337
68a79315 1338 env->interrupt_request |= mask;
ea041c0e
FB
1339 /* if the cpu is currently executing code, we must unlink it and
1340 all the potentially executing TB */
1341 tb = env->current_tb;
ee8b7021
FB
1342 if (tb && !testandset(&interrupt_lock)) {
1343 env->current_tb = NULL;
ea041c0e 1344 tb_reset_jump_recursive(tb);
15a51156 1345 resetlock(&interrupt_lock);
ea041c0e
FB
1346 }
1347}
1348
b54ad049
FB
1349void cpu_reset_interrupt(CPUState *env, int mask)
1350{
1351 env->interrupt_request &= ~mask;
1352}
1353
f193c797 1354CPULogItem cpu_log_items[] = {
5fafdf24 1355 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1356 "show generated host assembly code for each compiled TB" },
1357 { CPU_LOG_TB_IN_ASM, "in_asm",
1358 "show target assembly code for each compiled TB" },
5fafdf24 1359 { CPU_LOG_TB_OP, "op",
57fec1fe 1360 "show micro ops for each compiled TB" },
f193c797 1361 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1362 "show micro ops "
1363#ifdef TARGET_I386
1364 "before eflags optimization and "
f193c797 1365#endif
e01a1157 1366 "after liveness analysis" },
f193c797
FB
1367 { CPU_LOG_INT, "int",
1368 "show interrupts/exceptions in short format" },
1369 { CPU_LOG_EXEC, "exec",
1370 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1371 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1372 "show CPU state before block translation" },
f193c797
FB
1373#ifdef TARGET_I386
1374 { CPU_LOG_PCALL, "pcall",
1375 "show protected mode far calls/returns/exceptions" },
1376#endif
8e3a9fd2 1377#ifdef DEBUG_IOPORT
fd872598
FB
1378 { CPU_LOG_IOPORT, "ioport",
1379 "show all i/o ports accesses" },
8e3a9fd2 1380#endif
f193c797
FB
1381 { 0, NULL, NULL },
1382};
1383
1384static int cmp1(const char *s1, int n, const char *s2)
1385{
1386 if (strlen(s2) != n)
1387 return 0;
1388 return memcmp(s1, s2, n) == 0;
1389}
3b46e624 1390
f193c797
FB
1391/* takes a comma separated list of log masks. Return 0 if error. */
1392int cpu_str_to_log_mask(const char *str)
1393{
1394 CPULogItem *item;
1395 int mask;
1396 const char *p, *p1;
1397
1398 p = str;
1399 mask = 0;
1400 for(;;) {
1401 p1 = strchr(p, ',');
1402 if (!p1)
1403 p1 = p + strlen(p);
8e3a9fd2
FB
1404 if(cmp1(p,p1-p,"all")) {
1405 for(item = cpu_log_items; item->mask != 0; item++) {
1406 mask |= item->mask;
1407 }
1408 } else {
f193c797
FB
1409 for(item = cpu_log_items; item->mask != 0; item++) {
1410 if (cmp1(p, p1 - p, item->name))
1411 goto found;
1412 }
1413 return 0;
8e3a9fd2 1414 }
f193c797
FB
1415 found:
1416 mask |= item->mask;
1417 if (*p1 != ',')
1418 break;
1419 p = p1 + 1;
1420 }
1421 return mask;
1422}
ea041c0e 1423
7501267e
FB
1424void cpu_abort(CPUState *env, const char *fmt, ...)
1425{
1426 va_list ap;
493ae1f0 1427 va_list ap2;
7501267e
FB
1428
1429 va_start(ap, fmt);
493ae1f0 1430 va_copy(ap2, ap);
7501267e
FB
1431 fprintf(stderr, "qemu: fatal: ");
1432 vfprintf(stderr, fmt, ap);
1433 fprintf(stderr, "\n");
1434#ifdef TARGET_I386
7fe48483
FB
1435 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1436#else
1437 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1438#endif
924edcae 1439 if (logfile) {
f9373291 1440 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1441 vfprintf(logfile, fmt, ap2);
f9373291
JM
1442 fprintf(logfile, "\n");
1443#ifdef TARGET_I386
1444 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1445#else
1446 cpu_dump_state(env, logfile, fprintf, 0);
1447#endif
924edcae
AZ
1448 fflush(logfile);
1449 fclose(logfile);
1450 }
493ae1f0 1451 va_end(ap2);
f9373291 1452 va_end(ap);
7501267e
FB
1453 abort();
1454}
1455
c5be9f08
TS
1456CPUState *cpu_copy(CPUState *env)
1457{
01ba9816 1458 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1459 /* preserve chaining and index */
1460 CPUState *next_cpu = new_env->next_cpu;
1461 int cpu_index = new_env->cpu_index;
1462 memcpy(new_env, env, sizeof(CPUState));
1463 new_env->next_cpu = next_cpu;
1464 new_env->cpu_index = cpu_index;
1465 return new_env;
1466}
1467
0124311e
FB
1468#if !defined(CONFIG_USER_ONLY)
1469
5c751e99
EI
1470static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1471{
1472 unsigned int i;
1473
1474 /* Discard jump cache entries for any tb which might potentially
1475 overlap the flushed page. */
1476 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1477 memset (&env->tb_jmp_cache[i], 0,
1478 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1479
1480 i = tb_jmp_cache_hash_page(addr);
1481 memset (&env->tb_jmp_cache[i], 0,
1482 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1483}
1484
ee8b7021
FB
1485/* NOTE: if flush_global is true, also flush global entries (not
1486 implemented yet) */
1487void tlb_flush(CPUState *env, int flush_global)
33417e70 1488{
33417e70 1489 int i;
0124311e 1490
9fa3e853
FB
1491#if defined(DEBUG_TLB)
1492 printf("tlb_flush:\n");
1493#endif
0124311e
FB
1494 /* must reset current TB so that interrupts cannot modify the
1495 links while we are modifying them */
1496 env->current_tb = NULL;
1497
33417e70 1498 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1499 env->tlb_table[0][i].addr_read = -1;
1500 env->tlb_table[0][i].addr_write = -1;
1501 env->tlb_table[0][i].addr_code = -1;
1502 env->tlb_table[1][i].addr_read = -1;
1503 env->tlb_table[1][i].addr_write = -1;
1504 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1505#if (NB_MMU_MODES >= 3)
1506 env->tlb_table[2][i].addr_read = -1;
1507 env->tlb_table[2][i].addr_write = -1;
1508 env->tlb_table[2][i].addr_code = -1;
1509#if (NB_MMU_MODES == 4)
1510 env->tlb_table[3][i].addr_read = -1;
1511 env->tlb_table[3][i].addr_write = -1;
1512 env->tlb_table[3][i].addr_code = -1;
1513#endif
1514#endif
33417e70 1515 }
9fa3e853 1516
8a40a180 1517 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1518
1519#if !defined(CONFIG_SOFTMMU)
1520 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1521#endif
1522#ifdef USE_KQEMU
1523 if (env->kqemu_enabled) {
1524 kqemu_flush(env, flush_global);
1525 }
9fa3e853 1526#endif
e3db7226 1527 tlb_flush_count++;
33417e70
FB
1528}
1529
274da6b2 1530static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1531{
5fafdf24 1532 if (addr == (tlb_entry->addr_read &
84b7b8e7 1533 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1534 addr == (tlb_entry->addr_write &
84b7b8e7 1535 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1536 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1537 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1538 tlb_entry->addr_read = -1;
1539 tlb_entry->addr_write = -1;
1540 tlb_entry->addr_code = -1;
1541 }
61382a50
FB
1542}
1543
2e12669a 1544void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1545{
8a40a180 1546 int i;
0124311e 1547
9fa3e853 1548#if defined(DEBUG_TLB)
108c49b8 1549 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1550#endif
0124311e
FB
1551 /* must reset current TB so that interrupts cannot modify the
1552 links while we are modifying them */
1553 env->current_tb = NULL;
61382a50
FB
1554
1555 addr &= TARGET_PAGE_MASK;
1556 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1557 tlb_flush_entry(&env->tlb_table[0][i], addr);
1558 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1559#if (NB_MMU_MODES >= 3)
1560 tlb_flush_entry(&env->tlb_table[2][i], addr);
1561#if (NB_MMU_MODES == 4)
1562 tlb_flush_entry(&env->tlb_table[3][i], addr);
1563#endif
1564#endif
0124311e 1565
5c751e99 1566 tlb_flush_jmp_cache(env, addr);
9fa3e853 1567
0124311e 1568#if !defined(CONFIG_SOFTMMU)
9fa3e853 1569 if (addr < MMAP_AREA_END)
0124311e 1570 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1571#endif
0a962c02
FB
1572#ifdef USE_KQEMU
1573 if (env->kqemu_enabled) {
1574 kqemu_flush_page(env, addr);
1575 }
1576#endif
9fa3e853
FB
1577}
1578
9fa3e853
FB
1579/* update the TLBs so that writes to code in the virtual page 'addr'
1580 can be detected */
6a00d601 1581static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1582{
5fafdf24 1583 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1584 ram_addr + TARGET_PAGE_SIZE,
1585 CODE_DIRTY_FLAG);
9fa3e853
FB
1586}
1587
9fa3e853 1588/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1589 tested for self modifying code */
5fafdf24 1590static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1591 target_ulong vaddr)
9fa3e853 1592{
3a7d929e 1593 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1594}
1595
5fafdf24 1596static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1597 unsigned long start, unsigned long length)
1598{
1599 unsigned long addr;
84b7b8e7
FB
1600 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1601 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1602 if ((addr - start) < length) {
84b7b8e7 1603 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1604 }
1605 }
1606}
1607
3a7d929e 1608void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1609 int dirty_flags)
1ccde1cb
FB
1610{
1611 CPUState *env;
4f2ac237 1612 unsigned long length, start1;
0a962c02
FB
1613 int i, mask, len;
1614 uint8_t *p;
1ccde1cb
FB
1615
1616 start &= TARGET_PAGE_MASK;
1617 end = TARGET_PAGE_ALIGN(end);
1618
1619 length = end - start;
1620 if (length == 0)
1621 return;
0a962c02 1622 len = length >> TARGET_PAGE_BITS;
3a7d929e 1623#ifdef USE_KQEMU
6a00d601
FB
1624 /* XXX: should not depend on cpu context */
1625 env = first_cpu;
3a7d929e 1626 if (env->kqemu_enabled) {
f23db169
FB
1627 ram_addr_t addr;
1628 addr = start;
1629 for(i = 0; i < len; i++) {
1630 kqemu_set_notdirty(env, addr);
1631 addr += TARGET_PAGE_SIZE;
1632 }
3a7d929e
FB
1633 }
1634#endif
f23db169
FB
1635 mask = ~dirty_flags;
1636 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1637 for(i = 0; i < len; i++)
1638 p[i] &= mask;
1639
1ccde1cb
FB
1640 /* we modify the TLB cache so that the dirty bit will be set again
1641 when accessing the range */
59817ccb 1642 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1643 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1644 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1645 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1646 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1647 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1648#if (NB_MMU_MODES >= 3)
1649 for(i = 0; i < CPU_TLB_SIZE; i++)
1650 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1651#if (NB_MMU_MODES == 4)
1652 for(i = 0; i < CPU_TLB_SIZE; i++)
1653 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1654#endif
1655#endif
6a00d601 1656 }
59817ccb
FB
1657
1658#if !defined(CONFIG_SOFTMMU)
1659 /* XXX: this is expensive */
1660 {
1661 VirtPageDesc *p;
1662 int j;
1663 target_ulong addr;
1664
1665 for(i = 0; i < L1_SIZE; i++) {
1666 p = l1_virt_map[i];
1667 if (p) {
1668 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1669 for(j = 0; j < L2_SIZE; j++) {
1670 if (p->valid_tag == virt_valid_tag &&
1671 p->phys_addr >= start && p->phys_addr < end &&
1672 (p->prot & PROT_WRITE)) {
1673 if (addr < MMAP_AREA_END) {
5fafdf24 1674 mprotect((void *)addr, TARGET_PAGE_SIZE,
59817ccb
FB
1675 p->prot & ~PROT_WRITE);
1676 }
1677 }
1678 addr += TARGET_PAGE_SIZE;
1679 p++;
1680 }
1681 }
1682 }
1683 }
1684#endif
1ccde1cb
FB
1685}
1686
3a7d929e
FB
1687static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1688{
1689 ram_addr_t ram_addr;
1690
84b7b8e7 1691 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1692 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1693 tlb_entry->addend - (unsigned long)phys_ram_base;
1694 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1695 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1696 }
1697 }
1698}
1699
1700/* update the TLB according to the current state of the dirty bits */
1701void cpu_tlb_update_dirty(CPUState *env)
1702{
1703 int i;
1704 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1705 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1706 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1707 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1708#if (NB_MMU_MODES >= 3)
1709 for(i = 0; i < CPU_TLB_SIZE; i++)
1710 tlb_update_dirty(&env->tlb_table[2][i]);
1711#if (NB_MMU_MODES == 4)
1712 for(i = 0; i < CPU_TLB_SIZE; i++)
1713 tlb_update_dirty(&env->tlb_table[3][i]);
1714#endif
1715#endif
3a7d929e
FB
1716}
1717
5fafdf24 1718static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1719 unsigned long start)
1ccde1cb
FB
1720{
1721 unsigned long addr;
84b7b8e7
FB
1722 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1723 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1724 if (addr == start) {
84b7b8e7 1725 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1726 }
1727 }
1728}
1729
1730/* update the TLB corresponding to virtual page vaddr and phys addr
1731 addr so that it is no longer dirty */
6a00d601
FB
1732static inline void tlb_set_dirty(CPUState *env,
1733 unsigned long addr, target_ulong vaddr)
1ccde1cb 1734{
1ccde1cb
FB
1735 int i;
1736
1ccde1cb
FB
1737 addr &= TARGET_PAGE_MASK;
1738 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1739 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1740 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1741#if (NB_MMU_MODES >= 3)
1742 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1743#if (NB_MMU_MODES == 4)
1744 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1745#endif
1746#endif
9fa3e853
FB
1747}
1748
59817ccb
FB
1749/* add a new TLB entry. At most one entry for a given virtual address
1750 is permitted. Return 0 if OK or 2 if the page could not be mapped
1751 (can only happen in non SOFTMMU mode for I/O pages or pages
1752 conflicting with the host address space). */
5fafdf24
TS
1753int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1754 target_phys_addr_t paddr, int prot,
6ebbf390 1755 int mmu_idx, int is_softmmu)
9fa3e853 1756{
92e873b9 1757 PhysPageDesc *p;
4f2ac237 1758 unsigned long pd;
9fa3e853 1759 unsigned int index;
4f2ac237 1760 target_ulong address;
108c49b8 1761 target_phys_addr_t addend;
9fa3e853 1762 int ret;
84b7b8e7 1763 CPUTLBEntry *te;
6658ffb8 1764 int i;
9fa3e853 1765
92e873b9 1766 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1767 if (!p) {
1768 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1769 } else {
1770 pd = p->phys_offset;
9fa3e853
FB
1771 }
1772#if defined(DEBUG_TLB)
6ebbf390
JM
1773 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1774 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1775#endif
1776
1777 ret = 0;
1778#if !defined(CONFIG_SOFTMMU)
5fafdf24 1779 if (is_softmmu)
9fa3e853
FB
1780#endif
1781 {
2a4188a3 1782 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1783 /* IO memory case */
1784 address = vaddr | pd;
1785 addend = paddr;
1786 } else {
1787 /* standard memory */
1788 address = vaddr;
1789 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1790 }
6658ffb8
PB
1791
1792 /* Make accesses to pages with watchpoints go via the
1793 watchpoint trap routines. */
1794 for (i = 0; i < env->nb_watchpoints; i++) {
1795 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1796 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1797 env->watchpoint[i].addend = 0;
6658ffb8
PB
1798 address = vaddr | io_mem_watch;
1799 } else {
d79acba4
AZ
1800 env->watchpoint[i].addend = pd - paddr +
1801 (unsigned long) phys_ram_base;
6658ffb8
PB
1802 /* TODO: Figure out how to make read watchpoints coexist
1803 with code. */
1804 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1805 }
1806 }
1807 }
d79acba4 1808
90f18422 1809 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1810 addend -= vaddr;
6ebbf390 1811 te = &env->tlb_table[mmu_idx][index];
84b7b8e7 1812 te->addend = addend;
67b915a5 1813 if (prot & PAGE_READ) {
84b7b8e7
FB
1814 te->addr_read = address;
1815 } else {
1816 te->addr_read = -1;
1817 }
5c751e99 1818
84b7b8e7
FB
1819 if (prot & PAGE_EXEC) {
1820 te->addr_code = address;
9fa3e853 1821 } else {
84b7b8e7 1822 te->addr_code = -1;
9fa3e853 1823 }
67b915a5 1824 if (prot & PAGE_WRITE) {
5fafdf24 1825 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
856074ec
FB
1826 (pd & IO_MEM_ROMD)) {
1827 /* write access calls the I/O callback */
5fafdf24 1828 te->addr_write = vaddr |
856074ec 1829 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
5fafdf24 1830 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1831 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1832 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1833 } else {
84b7b8e7 1834 te->addr_write = address;
9fa3e853
FB
1835 }
1836 } else {
84b7b8e7 1837 te->addr_write = -1;
9fa3e853
FB
1838 }
1839 }
1840#if !defined(CONFIG_SOFTMMU)
1841 else {
1842 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1843 /* IO access: no mapping is done as it will be handled by the
1844 soft MMU */
1845 if (!(env->hflags & HF_SOFTMMU_MASK))
1846 ret = 2;
1847 } else {
1848 void *map_addr;
59817ccb
FB
1849
1850 if (vaddr >= MMAP_AREA_END) {
1851 ret = 2;
1852 } else {
1853 if (prot & PROT_WRITE) {
5fafdf24 1854 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1855#if defined(TARGET_HAS_SMC) || 1
59817ccb 1856 first_tb ||
d720b93d 1857#endif
5fafdf24 1858 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
59817ccb
FB
1859 !cpu_physical_memory_is_dirty(pd))) {
1860 /* ROM: we do as if code was inside */
1861 /* if code is present, we only map as read only and save the
1862 original mapping */
1863 VirtPageDesc *vp;
3b46e624 1864
90f18422 1865 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1866 vp->phys_addr = pd;
1867 vp->prot = prot;
1868 vp->valid_tag = virt_valid_tag;
1869 prot &= ~PAGE_WRITE;
1870 }
1871 }
5fafdf24 1872 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
59817ccb
FB
1873 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1874 if (map_addr == MAP_FAILED) {
1875 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1876 paddr, vaddr);
9fa3e853 1877 }
9fa3e853
FB
1878 }
1879 }
1880 }
1881#endif
1882 return ret;
1883}
1884
1885/* called from signal handler: invalidate the code and unprotect the
1886 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1887int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1888{
1889#if !defined(CONFIG_SOFTMMU)
1890 VirtPageDesc *vp;
1891
1892#if defined(DEBUG_TLB)
1893 printf("page_unprotect: addr=0x%08x\n", addr);
1894#endif
1895 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1896
1897 /* if it is not mapped, no need to worry here */
1898 if (addr >= MMAP_AREA_END)
1899 return 0;
9fa3e853
FB
1900 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1901 if (!vp)
1902 return 0;
1903 /* NOTE: in this case, validate_tag is _not_ tested as it
1904 validates only the code TLB */
1905 if (vp->valid_tag != virt_valid_tag)
1906 return 0;
1907 if (!(vp->prot & PAGE_WRITE))
1908 return 0;
1909#if defined(DEBUG_TLB)
5fafdf24 1910 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
9fa3e853
FB
1911 addr, vp->phys_addr, vp->prot);
1912#endif
59817ccb
FB
1913 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1914 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1915 (unsigned long)addr, vp->prot);
d720b93d 1916 /* set the dirty bit */
0a962c02 1917 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1918 /* flush the code inside */
1919 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1920 return 1;
1921#else
1922 return 0;
1923#endif
33417e70
FB
1924}
1925
0124311e
FB
1926#else
1927
ee8b7021 1928void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1929{
1930}
1931
2e12669a 1932void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1933{
1934}
1935
5fafdf24
TS
1936int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1937 target_phys_addr_t paddr, int prot,
6ebbf390 1938 int mmu_idx, int is_softmmu)
9fa3e853
FB
1939{
1940 return 0;
1941}
0124311e 1942
9fa3e853
FB
1943/* dump memory mappings */
1944void page_dump(FILE *f)
33417e70 1945{
9fa3e853
FB
1946 unsigned long start, end;
1947 int i, j, prot, prot1;
1948 PageDesc *p;
33417e70 1949
9fa3e853
FB
1950 fprintf(f, "%-8s %-8s %-8s %s\n",
1951 "start", "end", "size", "prot");
1952 start = -1;
1953 end = -1;
1954 prot = 0;
1955 for(i = 0; i <= L1_SIZE; i++) {
1956 if (i < L1_SIZE)
1957 p = l1_map[i];
1958 else
1959 p = NULL;
1960 for(j = 0;j < L2_SIZE; j++) {
1961 if (!p)
1962 prot1 = 0;
1963 else
1964 prot1 = p[j].flags;
1965 if (prot1 != prot) {
1966 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1967 if (start != -1) {
1968 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1969 start, end, end - start,
9fa3e853
FB
1970 prot & PAGE_READ ? 'r' : '-',
1971 prot & PAGE_WRITE ? 'w' : '-',
1972 prot & PAGE_EXEC ? 'x' : '-');
1973 }
1974 if (prot1 != 0)
1975 start = end;
1976 else
1977 start = -1;
1978 prot = prot1;
1979 }
1980 if (!p)
1981 break;
1982 }
33417e70 1983 }
33417e70
FB
1984}
1985
53a5960a 1986int page_get_flags(target_ulong address)
33417e70 1987{
9fa3e853
FB
1988 PageDesc *p;
1989
1990 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1991 if (!p)
9fa3e853
FB
1992 return 0;
1993 return p->flags;
1994}
1995
1996/* modify the flags of a page and invalidate the code if
1997 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1998 depending on PAGE_WRITE */
53a5960a 1999void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2000{
2001 PageDesc *p;
53a5960a 2002 target_ulong addr;
9fa3e853
FB
2003
2004 start = start & TARGET_PAGE_MASK;
2005 end = TARGET_PAGE_ALIGN(end);
2006 if (flags & PAGE_WRITE)
2007 flags |= PAGE_WRITE_ORG;
2008 spin_lock(&tb_lock);
2009 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2010 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2011 /* if the write protection is set, then we invalidate the code
2012 inside */
5fafdf24 2013 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2014 (flags & PAGE_WRITE) &&
2015 p->first_tb) {
d720b93d 2016 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2017 }
2018 p->flags = flags;
2019 }
2020 spin_unlock(&tb_lock);
33417e70
FB
2021}
2022
3d97b40b
TS
2023int page_check_range(target_ulong start, target_ulong len, int flags)
2024{
2025 PageDesc *p;
2026 target_ulong end;
2027 target_ulong addr;
2028
2029 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2030 start = start & TARGET_PAGE_MASK;
2031
2032 if( end < start )
2033 /* we've wrapped around */
2034 return -1;
2035 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2036 p = page_find(addr >> TARGET_PAGE_BITS);
2037 if( !p )
2038 return -1;
2039 if( !(p->flags & PAGE_VALID) )
2040 return -1;
2041
dae3270c 2042 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2043 return -1;
dae3270c
FB
2044 if (flags & PAGE_WRITE) {
2045 if (!(p->flags & PAGE_WRITE_ORG))
2046 return -1;
2047 /* unprotect the page if it was put read-only because it
2048 contains translated code */
2049 if (!(p->flags & PAGE_WRITE)) {
2050 if (!page_unprotect(addr, 0, NULL))
2051 return -1;
2052 }
2053 return 0;
2054 }
3d97b40b
TS
2055 }
2056 return 0;
2057}
2058
9fa3e853
FB
2059/* called from signal handler: invalidate the code and unprotect the
2060 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2061int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2062{
2063 unsigned int page_index, prot, pindex;
2064 PageDesc *p, *p1;
53a5960a 2065 target_ulong host_start, host_end, addr;
9fa3e853 2066
83fb7adf 2067 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2068 page_index = host_start >> TARGET_PAGE_BITS;
2069 p1 = page_find(page_index);
2070 if (!p1)
2071 return 0;
83fb7adf 2072 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2073 p = p1;
2074 prot = 0;
2075 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2076 prot |= p->flags;
2077 p++;
2078 }
2079 /* if the page was really writable, then we change its
2080 protection back to writable */
2081 if (prot & PAGE_WRITE_ORG) {
2082 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2083 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2084 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2085 (prot & PAGE_BITS) | PAGE_WRITE);
2086 p1[pindex].flags |= PAGE_WRITE;
2087 /* and since the content will be modified, we must invalidate
2088 the corresponding translated code. */
d720b93d 2089 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2090#ifdef DEBUG_TB_CHECK
2091 tb_invalidate_check(address);
2092#endif
2093 return 1;
2094 }
2095 }
2096 return 0;
2097}
2098
6a00d601
FB
2099static inline void tlb_set_dirty(CPUState *env,
2100 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2101{
2102}
9fa3e853
FB
2103#endif /* defined(CONFIG_USER_ONLY) */
2104
db7b5426 2105static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2106 ram_addr_t memory);
2107static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2108 ram_addr_t orig_memory);
db7b5426
BS
2109#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2110 need_subpage) \
2111 do { \
2112 if (addr > start_addr) \
2113 start_addr2 = 0; \
2114 else { \
2115 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2116 if (start_addr2 > 0) \
2117 need_subpage = 1; \
2118 } \
2119 \
49e9fba2 2120 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2121 end_addr2 = TARGET_PAGE_SIZE - 1; \
2122 else { \
2123 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2124 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2125 need_subpage = 1; \
2126 } \
2127 } while (0)
2128
33417e70
FB
2129/* register physical memory. 'size' must be a multiple of the target
2130 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2131 io memory page */
5fafdf24 2132void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2133 ram_addr_t size,
2134 ram_addr_t phys_offset)
33417e70 2135{
108c49b8 2136 target_phys_addr_t addr, end_addr;
92e873b9 2137 PhysPageDesc *p;
9d42037b 2138 CPUState *env;
00f82b8a 2139 ram_addr_t orig_size = size;
db7b5426 2140 void *subpage;
33417e70 2141
da260249
FB
2142#ifdef USE_KQEMU
2143 /* XXX: should not depend on cpu context */
2144 env = first_cpu;
2145 if (env->kqemu_enabled) {
2146 kqemu_set_phys_mem(start_addr, size, phys_offset);
2147 }
2148#endif
5fd386f6 2149 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2150 end_addr = start_addr + (target_phys_addr_t)size;
2151 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2152 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2153 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2154 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2155 target_phys_addr_t start_addr2, end_addr2;
2156 int need_subpage = 0;
2157
2158 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2159 need_subpage);
4254fab8 2160 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2161 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2162 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2163 &p->phys_offset, orig_memory);
2164 } else {
2165 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2166 >> IO_MEM_SHIFT];
2167 }
2168 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2169 } else {
2170 p->phys_offset = phys_offset;
2171 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2172 (phys_offset & IO_MEM_ROMD))
2173 phys_offset += TARGET_PAGE_SIZE;
2174 }
2175 } else {
2176 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2177 p->phys_offset = phys_offset;
2178 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2179 (phys_offset & IO_MEM_ROMD))
2180 phys_offset += TARGET_PAGE_SIZE;
2181 else {
2182 target_phys_addr_t start_addr2, end_addr2;
2183 int need_subpage = 0;
2184
2185 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2186 end_addr2, need_subpage);
2187
4254fab8 2188 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2189 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2190 &p->phys_offset, IO_MEM_UNASSIGNED);
2191 subpage_register(subpage, start_addr2, end_addr2,
2192 phys_offset);
2193 }
2194 }
2195 }
33417e70 2196 }
3b46e624 2197
9d42037b
FB
2198 /* since each CPU stores ram addresses in its TLB cache, we must
2199 reset the modified entries */
2200 /* XXX: slow ! */
2201 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2202 tlb_flush(env, 1);
2203 }
33417e70
FB
2204}
2205
ba863458 2206/* XXX: temporary until new memory mapping API */
00f82b8a 2207ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2208{
2209 PhysPageDesc *p;
2210
2211 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2212 if (!p)
2213 return IO_MEM_UNASSIGNED;
2214 return p->phys_offset;
2215}
2216
e9a1ab19 2217/* XXX: better than nothing */
00f82b8a 2218ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2219{
2220 ram_addr_t addr;
7fb4fdcf 2221 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
ed441467
FB
2222 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2223 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2224 abort();
2225 }
2226 addr = phys_ram_alloc_offset;
2227 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2228 return addr;
2229}
2230
2231void qemu_ram_free(ram_addr_t addr)
2232{
2233}
2234
a4193c8a 2235static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2236{
67d3b957 2237#ifdef DEBUG_UNASSIGNED
ab3d1727 2238 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2239#endif
2240#ifdef TARGET_SPARC
6c36d3fa 2241 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2242#elif TARGET_CRIS
2243 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2244#endif
33417e70
FB
2245 return 0;
2246}
2247
a4193c8a 2248static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2249{
67d3b957 2250#ifdef DEBUG_UNASSIGNED
ab3d1727 2251 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2252#endif
b4f0a316 2253#ifdef TARGET_SPARC
6c36d3fa 2254 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2255#elif TARGET_CRIS
2256 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2257#endif
33417e70
FB
2258}
2259
2260static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2261 unassigned_mem_readb,
2262 unassigned_mem_readb,
2263 unassigned_mem_readb,
2264};
2265
2266static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2267 unassigned_mem_writeb,
2268 unassigned_mem_writeb,
2269 unassigned_mem_writeb,
2270};
2271
3a7d929e 2272static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2273{
3a7d929e
FB
2274 unsigned long ram_addr;
2275 int dirty_flags;
2276 ram_addr = addr - (unsigned long)phys_ram_base;
2277 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2278 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2279#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2280 tb_invalidate_phys_page_fast(ram_addr, 1);
2281 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2282#endif
3a7d929e 2283 }
c27004ec 2284 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2285#ifdef USE_KQEMU
2286 if (cpu_single_env->kqemu_enabled &&
2287 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2288 kqemu_modify_page(cpu_single_env, ram_addr);
2289#endif
f23db169
FB
2290 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2291 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2292 /* we remove the notdirty callback only if the code has been
2293 flushed */
2294 if (dirty_flags == 0xff)
6a00d601 2295 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2296}
2297
3a7d929e 2298static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2299{
3a7d929e
FB
2300 unsigned long ram_addr;
2301 int dirty_flags;
2302 ram_addr = addr - (unsigned long)phys_ram_base;
2303 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2304 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2305#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2306 tb_invalidate_phys_page_fast(ram_addr, 2);
2307 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2308#endif
3a7d929e 2309 }
c27004ec 2310 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2311#ifdef USE_KQEMU
2312 if (cpu_single_env->kqemu_enabled &&
2313 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2314 kqemu_modify_page(cpu_single_env, ram_addr);
2315#endif
f23db169
FB
2316 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2317 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2318 /* we remove the notdirty callback only if the code has been
2319 flushed */
2320 if (dirty_flags == 0xff)
6a00d601 2321 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2322}
2323
3a7d929e 2324static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2325{
3a7d929e
FB
2326 unsigned long ram_addr;
2327 int dirty_flags;
2328 ram_addr = addr - (unsigned long)phys_ram_base;
2329 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2330 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2331#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2332 tb_invalidate_phys_page_fast(ram_addr, 4);
2333 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2334#endif
3a7d929e 2335 }
c27004ec 2336 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2337#ifdef USE_KQEMU
2338 if (cpu_single_env->kqemu_enabled &&
2339 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2340 kqemu_modify_page(cpu_single_env, ram_addr);
2341#endif
f23db169
FB
2342 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2343 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2344 /* we remove the notdirty callback only if the code has been
2345 flushed */
2346 if (dirty_flags == 0xff)
6a00d601 2347 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2348}
2349
3a7d929e 2350static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2351 NULL, /* never used */
2352 NULL, /* never used */
2353 NULL, /* never used */
2354};
2355
1ccde1cb
FB
2356static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2357 notdirty_mem_writeb,
2358 notdirty_mem_writew,
2359 notdirty_mem_writel,
2360};
2361
6658ffb8
PB
2362#if defined(CONFIG_SOFTMMU)
2363/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2364 so these check for a hit then pass through to the normal out-of-line
2365 phys routines. */
2366static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2367{
2368 return ldub_phys(addr);
2369}
2370
2371static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2372{
2373 return lduw_phys(addr);
2374}
2375
2376static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2377{
2378 return ldl_phys(addr);
2379}
2380
2381/* Generate a debug exception if a watchpoint has been hit.
2382 Returns the real physical address of the access. addr will be a host
d79acba4 2383 address in case of a RAM location. */
6658ffb8
PB
2384static target_ulong check_watchpoint(target_phys_addr_t addr)
2385{
2386 CPUState *env = cpu_single_env;
2387 target_ulong watch;
2388 target_ulong retaddr;
2389 int i;
2390
2391 retaddr = addr;
2392 for (i = 0; i < env->nb_watchpoints; i++) {
2393 watch = env->watchpoint[i].vaddr;
2394 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2395 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2396 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2397 cpu_single_env->watchpoint_hit = i + 1;
2398 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2399 break;
2400 }
2401 }
2402 }
2403 return retaddr;
2404}
2405
2406static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2407 uint32_t val)
2408{
2409 addr = check_watchpoint(addr);
2410 stb_phys(addr, val);
2411}
2412
2413static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2414 uint32_t val)
2415{
2416 addr = check_watchpoint(addr);
2417 stw_phys(addr, val);
2418}
2419
2420static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2421 uint32_t val)
2422{
2423 addr = check_watchpoint(addr);
2424 stl_phys(addr, val);
2425}
2426
2427static CPUReadMemoryFunc *watch_mem_read[3] = {
2428 watch_mem_readb,
2429 watch_mem_readw,
2430 watch_mem_readl,
2431};
2432
2433static CPUWriteMemoryFunc *watch_mem_write[3] = {
2434 watch_mem_writeb,
2435 watch_mem_writew,
2436 watch_mem_writel,
2437};
2438#endif
2439
db7b5426
BS
2440static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2441 unsigned int len)
2442{
db7b5426
BS
2443 uint32_t ret;
2444 unsigned int idx;
2445
2446 idx = SUBPAGE_IDX(addr - mmio->base);
2447#if defined(DEBUG_SUBPAGE)
2448 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2449 mmio, len, addr, idx);
2450#endif
3ee89922 2451 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2452
2453 return ret;
2454}
2455
2456static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2457 uint32_t value, unsigned int len)
2458{
db7b5426
BS
2459 unsigned int idx;
2460
2461 idx = SUBPAGE_IDX(addr - mmio->base);
2462#if defined(DEBUG_SUBPAGE)
2463 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2464 mmio, len, addr, idx, value);
2465#endif
3ee89922 2466 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2467}
2468
2469static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2470{
2471#if defined(DEBUG_SUBPAGE)
2472 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2473#endif
2474
2475 return subpage_readlen(opaque, addr, 0);
2476}
2477
2478static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2479 uint32_t value)
2480{
2481#if defined(DEBUG_SUBPAGE)
2482 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2483#endif
2484 subpage_writelen(opaque, addr, value, 0);
2485}
2486
2487static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2488{
2489#if defined(DEBUG_SUBPAGE)
2490 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2491#endif
2492
2493 return subpage_readlen(opaque, addr, 1);
2494}
2495
2496static void subpage_writew (void *opaque, target_phys_addr_t addr,
2497 uint32_t value)
2498{
2499#if defined(DEBUG_SUBPAGE)
2500 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2501#endif
2502 subpage_writelen(opaque, addr, value, 1);
2503}
2504
2505static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2506{
2507#if defined(DEBUG_SUBPAGE)
2508 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2509#endif
2510
2511 return subpage_readlen(opaque, addr, 2);
2512}
2513
2514static void subpage_writel (void *opaque,
2515 target_phys_addr_t addr, uint32_t value)
2516{
2517#if defined(DEBUG_SUBPAGE)
2518 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2519#endif
2520 subpage_writelen(opaque, addr, value, 2);
2521}
2522
2523static CPUReadMemoryFunc *subpage_read[] = {
2524 &subpage_readb,
2525 &subpage_readw,
2526 &subpage_readl,
2527};
2528
2529static CPUWriteMemoryFunc *subpage_write[] = {
2530 &subpage_writeb,
2531 &subpage_writew,
2532 &subpage_writel,
2533};
2534
2535static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2536 ram_addr_t memory)
db7b5426
BS
2537{
2538 int idx, eidx;
4254fab8 2539 unsigned int i;
db7b5426
BS
2540
2541 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2542 return -1;
2543 idx = SUBPAGE_IDX(start);
2544 eidx = SUBPAGE_IDX(end);
2545#if defined(DEBUG_SUBPAGE)
2546 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2547 mmio, start, end, idx, eidx, memory);
2548#endif
2549 memory >>= IO_MEM_SHIFT;
2550 for (; idx <= eidx; idx++) {
4254fab8 2551 for (i = 0; i < 4; i++) {
3ee89922
BS
2552 if (io_mem_read[memory][i]) {
2553 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2554 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2555 }
2556 if (io_mem_write[memory][i]) {
2557 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2558 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2559 }
4254fab8 2560 }
db7b5426
BS
2561 }
2562
2563 return 0;
2564}
2565
00f82b8a
AJ
2566static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2567 ram_addr_t orig_memory)
db7b5426
BS
2568{
2569 subpage_t *mmio;
2570 int subpage_memory;
2571
2572 mmio = qemu_mallocz(sizeof(subpage_t));
2573 if (mmio != NULL) {
2574 mmio->base = base;
2575 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2576#if defined(DEBUG_SUBPAGE)
2577 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2578 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2579#endif
2580 *phys = subpage_memory | IO_MEM_SUBPAGE;
2581 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2582 }
2583
2584 return mmio;
2585}
2586
33417e70
FB
2587static void io_mem_init(void)
2588{
3a7d929e 2589 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2590 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2591 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2592 io_mem_nb = 5;
2593
6658ffb8
PB
2594#if defined(CONFIG_SOFTMMU)
2595 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2596 watch_mem_write, NULL);
2597#endif
1ccde1cb 2598 /* alloc dirty bits array */
0a962c02 2599 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2600 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2601}
2602
2603/* mem_read and mem_write are arrays of functions containing the
2604 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2605 2). Functions can be omitted with a NULL function pointer. The
2606 registered functions may be modified dynamically later.
2607 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2608 modified. If it is zero, a new io zone is allocated. The return
2609 value can be used with cpu_register_physical_memory(). (-1) is
2610 returned if error. */
33417e70
FB
2611int cpu_register_io_memory(int io_index,
2612 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2613 CPUWriteMemoryFunc **mem_write,
2614 void *opaque)
33417e70 2615{
4254fab8 2616 int i, subwidth = 0;
33417e70
FB
2617
2618 if (io_index <= 0) {
b5ff1b31 2619 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2620 return -1;
2621 io_index = io_mem_nb++;
2622 } else {
2623 if (io_index >= IO_MEM_NB_ENTRIES)
2624 return -1;
2625 }
b5ff1b31 2626
33417e70 2627 for(i = 0;i < 3; i++) {
4254fab8
BS
2628 if (!mem_read[i] || !mem_write[i])
2629 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2630 io_mem_read[io_index][i] = mem_read[i];
2631 io_mem_write[io_index][i] = mem_write[i];
2632 }
a4193c8a 2633 io_mem_opaque[io_index] = opaque;
4254fab8 2634 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2635}
61382a50 2636
8926b517
FB
2637CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2638{
2639 return io_mem_write[io_index >> IO_MEM_SHIFT];
2640}
2641
2642CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2643{
2644 return io_mem_read[io_index >> IO_MEM_SHIFT];
2645}
2646
13eb76e0
FB
2647/* physical memory access (slow version, mainly for debug) */
2648#if defined(CONFIG_USER_ONLY)
5fafdf24 2649void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2650 int len, int is_write)
2651{
2652 int l, flags;
2653 target_ulong page;
53a5960a 2654 void * p;
13eb76e0
FB
2655
2656 while (len > 0) {
2657 page = addr & TARGET_PAGE_MASK;
2658 l = (page + TARGET_PAGE_SIZE) - addr;
2659 if (l > len)
2660 l = len;
2661 flags = page_get_flags(page);
2662 if (!(flags & PAGE_VALID))
2663 return;
2664 if (is_write) {
2665 if (!(flags & PAGE_WRITE))
2666 return;
579a97f7 2667 /* XXX: this code should not depend on lock_user */
72fb7daa 2668 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2669 /* FIXME - should this return an error rather than just fail? */
2670 return;
72fb7daa
AJ
2671 memcpy(p, buf, l);
2672 unlock_user(p, addr, l);
13eb76e0
FB
2673 } else {
2674 if (!(flags & PAGE_READ))
2675 return;
579a97f7 2676 /* XXX: this code should not depend on lock_user */
72fb7daa 2677 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2678 /* FIXME - should this return an error rather than just fail? */
2679 return;
72fb7daa 2680 memcpy(buf, p, l);
5b257578 2681 unlock_user(p, addr, 0);
13eb76e0
FB
2682 }
2683 len -= l;
2684 buf += l;
2685 addr += l;
2686 }
2687}
8df1cd07 2688
13eb76e0 2689#else
5fafdf24 2690void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2691 int len, int is_write)
2692{
2693 int l, io_index;
2694 uint8_t *ptr;
2695 uint32_t val;
2e12669a
FB
2696 target_phys_addr_t page;
2697 unsigned long pd;
92e873b9 2698 PhysPageDesc *p;
3b46e624 2699
13eb76e0
FB
2700 while (len > 0) {
2701 page = addr & TARGET_PAGE_MASK;
2702 l = (page + TARGET_PAGE_SIZE) - addr;
2703 if (l > len)
2704 l = len;
92e873b9 2705 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2706 if (!p) {
2707 pd = IO_MEM_UNASSIGNED;
2708 } else {
2709 pd = p->phys_offset;
2710 }
3b46e624 2711
13eb76e0 2712 if (is_write) {
3a7d929e 2713 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2714 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2715 /* XXX: could force cpu_single_env to NULL to avoid
2716 potential bugs */
13eb76e0 2717 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2718 /* 32 bit write access */
c27004ec 2719 val = ldl_p(buf);
a4193c8a 2720 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2721 l = 4;
2722 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2723 /* 16 bit write access */
c27004ec 2724 val = lduw_p(buf);
a4193c8a 2725 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2726 l = 2;
2727 } else {
1c213d19 2728 /* 8 bit write access */
c27004ec 2729 val = ldub_p(buf);
a4193c8a 2730 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2731 l = 1;
2732 }
2733 } else {
b448f2f3
FB
2734 unsigned long addr1;
2735 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2736 /* RAM case */
b448f2f3 2737 ptr = phys_ram_base + addr1;
13eb76e0 2738 memcpy(ptr, buf, l);
3a7d929e
FB
2739 if (!cpu_physical_memory_is_dirty(addr1)) {
2740 /* invalidate code */
2741 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2742 /* set dirty bit */
5fafdf24 2743 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2744 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2745 }
13eb76e0
FB
2746 }
2747 } else {
5fafdf24 2748 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2749 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2750 /* I/O case */
2751 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2752 if (l >= 4 && ((addr & 3) == 0)) {
2753 /* 32 bit read access */
a4193c8a 2754 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2755 stl_p(buf, val);
13eb76e0
FB
2756 l = 4;
2757 } else if (l >= 2 && ((addr & 1) == 0)) {
2758 /* 16 bit read access */
a4193c8a 2759 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2760 stw_p(buf, val);
13eb76e0
FB
2761 l = 2;
2762 } else {
1c213d19 2763 /* 8 bit read access */
a4193c8a 2764 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2765 stb_p(buf, val);
13eb76e0
FB
2766 l = 1;
2767 }
2768 } else {
2769 /* RAM case */
5fafdf24 2770 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2771 (addr & ~TARGET_PAGE_MASK);
2772 memcpy(buf, ptr, l);
2773 }
2774 }
2775 len -= l;
2776 buf += l;
2777 addr += l;
2778 }
2779}
8df1cd07 2780
d0ecd2aa 2781/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2782void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2783 const uint8_t *buf, int len)
2784{
2785 int l;
2786 uint8_t *ptr;
2787 target_phys_addr_t page;
2788 unsigned long pd;
2789 PhysPageDesc *p;
3b46e624 2790
d0ecd2aa
FB
2791 while (len > 0) {
2792 page = addr & TARGET_PAGE_MASK;
2793 l = (page + TARGET_PAGE_SIZE) - addr;
2794 if (l > len)
2795 l = len;
2796 p = phys_page_find(page >> TARGET_PAGE_BITS);
2797 if (!p) {
2798 pd = IO_MEM_UNASSIGNED;
2799 } else {
2800 pd = p->phys_offset;
2801 }
3b46e624 2802
d0ecd2aa 2803 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2804 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2805 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2806 /* do nothing */
2807 } else {
2808 unsigned long addr1;
2809 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2810 /* ROM/RAM case */
2811 ptr = phys_ram_base + addr1;
2812 memcpy(ptr, buf, l);
2813 }
2814 len -= l;
2815 buf += l;
2816 addr += l;
2817 }
2818}
2819
2820
8df1cd07
FB
2821/* warning: addr must be aligned */
2822uint32_t ldl_phys(target_phys_addr_t addr)
2823{
2824 int io_index;
2825 uint8_t *ptr;
2826 uint32_t val;
2827 unsigned long pd;
2828 PhysPageDesc *p;
2829
2830 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2831 if (!p) {
2832 pd = IO_MEM_UNASSIGNED;
2833 } else {
2834 pd = p->phys_offset;
2835 }
3b46e624 2836
5fafdf24 2837 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2838 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2839 /* I/O case */
2840 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2841 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2842 } else {
2843 /* RAM case */
5fafdf24 2844 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2845 (addr & ~TARGET_PAGE_MASK);
2846 val = ldl_p(ptr);
2847 }
2848 return val;
2849}
2850
84b7b8e7
FB
2851/* warning: addr must be aligned */
2852uint64_t ldq_phys(target_phys_addr_t addr)
2853{
2854 int io_index;
2855 uint8_t *ptr;
2856 uint64_t val;
2857 unsigned long pd;
2858 PhysPageDesc *p;
2859
2860 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2861 if (!p) {
2862 pd = IO_MEM_UNASSIGNED;
2863 } else {
2864 pd = p->phys_offset;
2865 }
3b46e624 2866
2a4188a3
FB
2867 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2868 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2869 /* I/O case */
2870 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2871#ifdef TARGET_WORDS_BIGENDIAN
2872 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2873 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2874#else
2875 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2876 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2877#endif
2878 } else {
2879 /* RAM case */
5fafdf24 2880 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2881 (addr & ~TARGET_PAGE_MASK);
2882 val = ldq_p(ptr);
2883 }
2884 return val;
2885}
2886
aab33094
FB
2887/* XXX: optimize */
2888uint32_t ldub_phys(target_phys_addr_t addr)
2889{
2890 uint8_t val;
2891 cpu_physical_memory_read(addr, &val, 1);
2892 return val;
2893}
2894
2895/* XXX: optimize */
2896uint32_t lduw_phys(target_phys_addr_t addr)
2897{
2898 uint16_t val;
2899 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2900 return tswap16(val);
2901}
2902
8df1cd07
FB
2903/* warning: addr must be aligned. The ram page is not masked as dirty
2904 and the code inside is not invalidated. It is useful if the dirty
2905 bits are used to track modified PTEs */
2906void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2907{
2908 int io_index;
2909 uint8_t *ptr;
2910 unsigned long pd;
2911 PhysPageDesc *p;
2912
2913 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2914 if (!p) {
2915 pd = IO_MEM_UNASSIGNED;
2916 } else {
2917 pd = p->phys_offset;
2918 }
3b46e624 2919
3a7d929e 2920 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2921 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2922 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2923 } else {
5fafdf24 2924 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2925 (addr & ~TARGET_PAGE_MASK);
2926 stl_p(ptr, val);
2927 }
2928}
2929
bc98a7ef
JM
2930void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2931{
2932 int io_index;
2933 uint8_t *ptr;
2934 unsigned long pd;
2935 PhysPageDesc *p;
2936
2937 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2938 if (!p) {
2939 pd = IO_MEM_UNASSIGNED;
2940 } else {
2941 pd = p->phys_offset;
2942 }
3b46e624 2943
bc98a7ef
JM
2944 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2945 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2946#ifdef TARGET_WORDS_BIGENDIAN
2947 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2948 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2949#else
2950 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2951 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2952#endif
2953 } else {
5fafdf24 2954 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2955 (addr & ~TARGET_PAGE_MASK);
2956 stq_p(ptr, val);
2957 }
2958}
2959
8df1cd07 2960/* warning: addr must be aligned */
8df1cd07
FB
2961void stl_phys(target_phys_addr_t addr, uint32_t val)
2962{
2963 int io_index;
2964 uint8_t *ptr;
2965 unsigned long pd;
2966 PhysPageDesc *p;
2967
2968 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2969 if (!p) {
2970 pd = IO_MEM_UNASSIGNED;
2971 } else {
2972 pd = p->phys_offset;
2973 }
3b46e624 2974
3a7d929e 2975 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2976 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2977 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2978 } else {
2979 unsigned long addr1;
2980 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2981 /* RAM case */
2982 ptr = phys_ram_base + addr1;
2983 stl_p(ptr, val);
3a7d929e
FB
2984 if (!cpu_physical_memory_is_dirty(addr1)) {
2985 /* invalidate code */
2986 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2987 /* set dirty bit */
f23db169
FB
2988 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2989 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2990 }
8df1cd07
FB
2991 }
2992}
2993
aab33094
FB
2994/* XXX: optimize */
2995void stb_phys(target_phys_addr_t addr, uint32_t val)
2996{
2997 uint8_t v = val;
2998 cpu_physical_memory_write(addr, &v, 1);
2999}
3000
3001/* XXX: optimize */
3002void stw_phys(target_phys_addr_t addr, uint32_t val)
3003{
3004 uint16_t v = tswap16(val);
3005 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3006}
3007
3008/* XXX: optimize */
3009void stq_phys(target_phys_addr_t addr, uint64_t val)
3010{
3011 val = tswap64(val);
3012 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3013}
3014
13eb76e0
FB
3015#endif
3016
3017/* virtual memory access for debug */
5fafdf24 3018int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3019 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3020{
3021 int l;
9b3c35e0
JM
3022 target_phys_addr_t phys_addr;
3023 target_ulong page;
13eb76e0
FB
3024
3025 while (len > 0) {
3026 page = addr & TARGET_PAGE_MASK;
3027 phys_addr = cpu_get_phys_page_debug(env, page);
3028 /* if no physical page mapped, return an error */
3029 if (phys_addr == -1)
3030 return -1;
3031 l = (page + TARGET_PAGE_SIZE) - addr;
3032 if (l > len)
3033 l = len;
5fafdf24 3034 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3035 buf, l, is_write);
13eb76e0
FB
3036 len -= l;
3037 buf += l;
3038 addr += l;
3039 }
3040 return 0;
3041}
3042
e3db7226
FB
3043void dump_exec_info(FILE *f,
3044 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3045{
3046 int i, target_code_size, max_target_code_size;
3047 int direct_jmp_count, direct_jmp2_count, cross_page;
3048 TranslationBlock *tb;
3b46e624 3049
e3db7226
FB
3050 target_code_size = 0;
3051 max_target_code_size = 0;
3052 cross_page = 0;
3053 direct_jmp_count = 0;
3054 direct_jmp2_count = 0;
3055 for(i = 0; i < nb_tbs; i++) {
3056 tb = &tbs[i];
3057 target_code_size += tb->size;
3058 if (tb->size > max_target_code_size)
3059 max_target_code_size = tb->size;
3060 if (tb->page_addr[1] != -1)
3061 cross_page++;
3062 if (tb->tb_next_offset[0] != 0xffff) {
3063 direct_jmp_count++;
3064 if (tb->tb_next_offset[1] != 0xffff) {
3065 direct_jmp2_count++;
3066 }
3067 }
3068 }
3069 /* XXX: avoid using doubles ? */
57fec1fe 3070 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3071 cpu_fprintf(f, "gen code size %ld/%ld\n",
3072 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3073 cpu_fprintf(f, "TB count %d/%d\n",
3074 nb_tbs, code_gen_max_blocks);
5fafdf24 3075 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3076 nb_tbs ? target_code_size / nb_tbs : 0,
3077 max_target_code_size);
5fafdf24 3078 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3079 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3080 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3081 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3082 cross_page,
e3db7226
FB
3083 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3084 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3085 direct_jmp_count,
e3db7226
FB
3086 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3087 direct_jmp2_count,
3088 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3089 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3090 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3091 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3092 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3093 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3094}
3095
5fafdf24 3096#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3097
3098#define MMUSUFFIX _cmmu
3099#define GETPC() NULL
3100#define env cpu_single_env
b769d8fe 3101#define SOFTMMU_CODE_ACCESS
61382a50
FB
3102
3103#define SHIFT 0
3104#include "softmmu_template.h"
3105
3106#define SHIFT 1
3107#include "softmmu_template.h"
3108
3109#define SHIFT 2
3110#include "softmmu_template.h"
3111
3112#define SHIFT 3
3113#include "softmmu_template.h"
3114
3115#undef env
3116
3117#endif