]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Implement ARM magic kernel page and TLS register.
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
42#endif
54936004 43
fd6ce8f6 44//#define DEBUG_TB_INVALIDATE
66e85a21 45//#define DEBUG_FLUSH
9fa3e853 46//#define DEBUG_TLB
67d3b957 47//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
48
49/* make various TB consistency checks */
5fafdf24
TS
50//#define DEBUG_TB_CHECK
51//#define DEBUG_TLB_CHECK
fd6ce8f6 52
1196be37 53//#define DEBUG_IOPORT
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
99773bd4
PB
56#if !defined(CONFIG_USER_ONLY)
57/* TB consistency checks only implemented for usermode emulation. */
58#undef DEBUG_TB_CHECK
59#endif
60
9fa3e853
FB
61#define SMC_BITMAP_USE_THRESHOLD 10
62
63#define MMAP_AREA_START 0x00000000
64#define MMAP_AREA_END 0xa8000000
fd6ce8f6 65
108c49b8
FB
66#if defined(TARGET_SPARC64)
67#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
68#elif defined(TARGET_SPARC)
69#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
70#elif defined(TARGET_ALPHA)
71#define TARGET_PHYS_ADDR_SPACE_BITS 42
72#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
73#elif defined(TARGET_PPC64)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
75#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76#define TARGET_PHYS_ADDR_SPACE_BITS 42
77#elif defined(TARGET_I386) && !defined(USE_KQEMU)
78#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
79#else
80/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81#define TARGET_PHYS_ADDR_SPACE_BITS 32
82#endif
83
fab94c0e 84TranslationBlock *tbs;
26a5f13b 85int code_gen_max_blocks;
9fa3e853 86TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 87int nb_tbs;
eb51d102
FB
88/* any access to the tbs or the page table must use this lock */
89spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 90
7cb69cae 91uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
26a5f13b
FB
92uint8_t *code_gen_buffer;
93unsigned long code_gen_buffer_size;
94/* threshold to flush the translated code buffer */
95unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
96uint8_t *code_gen_ptr;
97
00f82b8a 98ram_addr_t phys_ram_size;
9fa3e853
FB
99int phys_ram_fd;
100uint8_t *phys_ram_base;
1ccde1cb 101uint8_t *phys_ram_dirty;
e9a1ab19 102static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 103
6a00d601
FB
104CPUState *first_cpu;
105/* current CPU in the current thread. It is only valid inside
106 cpu_exec() */
5fafdf24 107CPUState *cpu_single_env;
6a00d601 108
54936004 109typedef struct PageDesc {
92e873b9 110 /* list of TBs intersecting this ram page */
fd6ce8f6 111 TranslationBlock *first_tb;
9fa3e853
FB
112 /* in order to optimize self modifying code, we count the number
113 of lookups we do to a given page to use a bitmap */
114 unsigned int code_write_count;
115 uint8_t *code_bitmap;
116#if defined(CONFIG_USER_ONLY)
117 unsigned long flags;
118#endif
54936004
FB
119} PageDesc;
120
92e873b9
FB
121typedef struct PhysPageDesc {
122 /* offset in host memory of the page + io_index in the low 12 bits */
00f82b8a 123 ram_addr_t phys_offset;
92e873b9
FB
124} PhysPageDesc;
125
54936004 126#define L2_BITS 10
bedb69ea
JM
127#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
128/* XXX: this is a temporary hack for alpha target.
129 * In the future, this is to be replaced by a multi-level table
130 * to actually be able to handle the complete 64 bits address space.
131 */
132#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
133#else
03875444 134#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 135#endif
54936004
FB
136
137#define L1_SIZE (1 << L1_BITS)
138#define L2_SIZE (1 << L2_BITS)
139
33417e70 140static void io_mem_init(void);
fd6ce8f6 141
83fb7adf
FB
142unsigned long qemu_real_host_page_size;
143unsigned long qemu_host_page_bits;
144unsigned long qemu_host_page_size;
145unsigned long qemu_host_page_mask;
54936004 146
92e873b9 147/* XXX: for system emulation, it could just be an array */
54936004 148static PageDesc *l1_map[L1_SIZE];
0a962c02 149PhysPageDesc **l1_phys_map;
54936004 150
33417e70 151/* io memory support */
33417e70
FB
152CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
153CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 154void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 155static int io_mem_nb;
6658ffb8
PB
156#if defined(CONFIG_SOFTMMU)
157static int io_mem_watch;
158#endif
33417e70 159
34865134
FB
160/* log support */
161char *logfilename = "/tmp/qemu.log";
162FILE *logfile;
163int loglevel;
e735b91c 164static int log_append = 0;
34865134 165
e3db7226
FB
166/* statistics */
167static int tlb_flush_count;
168static int tb_flush_count;
169static int tb_phys_invalidate_count;
170
db7b5426
BS
171#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
172typedef struct subpage_t {
173 target_phys_addr_t base;
3ee89922
BS
174 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
175 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
176 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
177} subpage_t;
178
7cb69cae
FB
179#ifdef _WIN32
180static void map_exec(void *addr, long size)
181{
182 DWORD old_protect;
183 VirtualProtect(addr, size,
184 PAGE_EXECUTE_READWRITE, &old_protect);
185
186}
187#else
188static void map_exec(void *addr, long size)
189{
190 unsigned long start, end;
191
192 start = (unsigned long)addr;
193 start &= ~(qemu_real_host_page_size - 1);
194
195 end = (unsigned long)addr + size;
196 end += qemu_real_host_page_size - 1;
197 end &= ~(qemu_real_host_page_size - 1);
198
199 mprotect((void *)start, end - start,
200 PROT_READ | PROT_WRITE | PROT_EXEC);
201}
202#endif
203
b346ff46 204static void page_init(void)
54936004 205{
83fb7adf 206 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 207 TARGET_PAGE_SIZE */
67b915a5 208#ifdef _WIN32
d5a8f07c
FB
209 {
210 SYSTEM_INFO system_info;
211 DWORD old_protect;
3b46e624 212
d5a8f07c
FB
213 GetSystemInfo(&system_info);
214 qemu_real_host_page_size = system_info.dwPageSize;
d5a8f07c 215 }
67b915a5 216#else
83fb7adf 217 qemu_real_host_page_size = getpagesize();
67b915a5 218#endif
83fb7adf
FB
219 if (qemu_host_page_size == 0)
220 qemu_host_page_size = qemu_real_host_page_size;
221 if (qemu_host_page_size < TARGET_PAGE_SIZE)
222 qemu_host_page_size = TARGET_PAGE_SIZE;
223 qemu_host_page_bits = 0;
224 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
225 qemu_host_page_bits++;
226 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
227 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
228 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
229
230#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
231 {
232 long long startaddr, endaddr;
233 FILE *f;
234 int n;
235
236 f = fopen("/proc/self/maps", "r");
237 if (f) {
238 do {
239 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
240 if (n == 2) {
e0b8d65a
BS
241 startaddr = MIN(startaddr,
242 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
243 endaddr = MIN(endaddr,
244 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
50a9569b
AZ
245 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
246 TARGET_PAGE_ALIGN(endaddr),
247 PAGE_RESERVED);
248 }
249 } while (!feof(f));
250 fclose(f);
251 }
252 }
253#endif
54936004
FB
254}
255
00f82b8a 256static inline PageDesc *page_find_alloc(target_ulong index)
54936004 257{
54936004
FB
258 PageDesc **lp, *p;
259
54936004
FB
260 lp = &l1_map[index >> L2_BITS];
261 p = *lp;
262 if (!p) {
263 /* allocate if not found */
59817ccb 264 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 265 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
266 *lp = p;
267 }
268 return p + (index & (L2_SIZE - 1));
269}
270
00f82b8a 271static inline PageDesc *page_find(target_ulong index)
54936004 272{
54936004
FB
273 PageDesc *p;
274
54936004
FB
275 p = l1_map[index >> L2_BITS];
276 if (!p)
277 return 0;
fd6ce8f6
FB
278 return p + (index & (L2_SIZE - 1));
279}
280
108c49b8 281static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 282{
108c49b8 283 void **lp, **p;
e3f4e2a4 284 PhysPageDesc *pd;
92e873b9 285
108c49b8
FB
286 p = (void **)l1_phys_map;
287#if TARGET_PHYS_ADDR_SPACE_BITS > 32
288
289#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
290#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
291#endif
292 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
293 p = *lp;
294 if (!p) {
295 /* allocate if not found */
108c49b8
FB
296 if (!alloc)
297 return NULL;
298 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
299 memset(p, 0, sizeof(void *) * L1_SIZE);
300 *lp = p;
301 }
302#endif
303 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
304 pd = *lp;
305 if (!pd) {
306 int i;
108c49b8
FB
307 /* allocate if not found */
308 if (!alloc)
309 return NULL;
e3f4e2a4
PB
310 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
311 *lp = pd;
312 for (i = 0; i < L2_SIZE; i++)
313 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 314 }
e3f4e2a4 315 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
316}
317
108c49b8 318static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 319{
108c49b8 320 return phys_page_find_alloc(index, 0);
92e873b9
FB
321}
322
9fa3e853 323#if !defined(CONFIG_USER_ONLY)
6a00d601 324static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 325static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 326 target_ulong vaddr);
9fa3e853 327#endif
fd6ce8f6 328
26a5f13b
FB
329void code_gen_alloc(unsigned long tb_size)
330{
331 code_gen_buffer_size = tb_size;
332 if (code_gen_buffer_size == 0) {
333 /* XXX: needs ajustments */
334 code_gen_buffer_size = (int)(phys_ram_size / 4);
335 }
336 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
337 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
338 /* The code gen buffer location may have constraints depending on
339 the host cpu and OS */
340#if defined(__linux__)
341 {
342 int flags;
343 flags = MAP_PRIVATE | MAP_ANONYMOUS;
344#if defined(__x86_64__)
345 flags |= MAP_32BIT;
346 /* Cannot map more than that */
347 if (code_gen_buffer_size > (800 * 1024 * 1024))
348 code_gen_buffer_size = (800 * 1024 * 1024);
349#endif
350 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
351 PROT_WRITE | PROT_READ | PROT_EXEC,
352 flags, -1, 0);
353 if (code_gen_buffer == MAP_FAILED) {
354 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
355 exit(1);
356 }
357 }
358#else
359 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
360 if (!code_gen_buffer) {
361 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
362 exit(1);
363 }
364 map_exec(code_gen_buffer, code_gen_buffer_size);
365#endif
366 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
367 code_gen_buffer_max_size = code_gen_buffer_size -
368 code_gen_max_block_size();
369 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
370 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
371}
372
373/* Must be called before using the QEMU cpus. 'tb_size' is the size
374 (in bytes) allocated to the translation buffer. Zero means default
375 size. */
376void cpu_exec_init_all(unsigned long tb_size)
377{
378 page_init();
379 cpu_gen_init();
380 code_gen_alloc(tb_size);
381 code_gen_ptr = code_gen_buffer;
382 io_mem_init();
383}
384
6a00d601 385void cpu_exec_init(CPUState *env)
fd6ce8f6 386{
6a00d601
FB
387 CPUState **penv;
388 int cpu_index;
389
6a00d601
FB
390 env->next_cpu = NULL;
391 penv = &first_cpu;
392 cpu_index = 0;
393 while (*penv != NULL) {
394 penv = (CPUState **)&(*penv)->next_cpu;
395 cpu_index++;
396 }
397 env->cpu_index = cpu_index;
6658ffb8 398 env->nb_watchpoints = 0;
6a00d601 399 *penv = env;
fd6ce8f6
FB
400}
401
9fa3e853
FB
402static inline void invalidate_page_bitmap(PageDesc *p)
403{
404 if (p->code_bitmap) {
59817ccb 405 qemu_free(p->code_bitmap);
9fa3e853
FB
406 p->code_bitmap = NULL;
407 }
408 p->code_write_count = 0;
409}
410
fd6ce8f6
FB
411/* set to NULL all the 'first_tb' fields in all PageDescs */
412static void page_flush_tb(void)
413{
414 int i, j;
415 PageDesc *p;
416
417 for(i = 0; i < L1_SIZE; i++) {
418 p = l1_map[i];
419 if (p) {
9fa3e853
FB
420 for(j = 0; j < L2_SIZE; j++) {
421 p->first_tb = NULL;
422 invalidate_page_bitmap(p);
423 p++;
424 }
fd6ce8f6
FB
425 }
426 }
427}
428
429/* flush all the translation blocks */
d4e8164f 430/* XXX: tb_flush is currently not thread safe */
6a00d601 431void tb_flush(CPUState *env1)
fd6ce8f6 432{
6a00d601 433 CPUState *env;
0124311e 434#if defined(DEBUG_FLUSH)
ab3d1727
BS
435 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
436 (unsigned long)(code_gen_ptr - code_gen_buffer),
437 nb_tbs, nb_tbs > 0 ?
438 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 439#endif
26a5f13b 440 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
441 cpu_abort(env1, "Internal error: code buffer overflow\n");
442
fd6ce8f6 443 nb_tbs = 0;
3b46e624 444
6a00d601
FB
445 for(env = first_cpu; env != NULL; env = env->next_cpu) {
446 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
447 }
9fa3e853 448
8a8a608f 449 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 450 page_flush_tb();
9fa3e853 451
fd6ce8f6 452 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
453 /* XXX: flush processor icache at this point if cache flush is
454 expensive */
e3db7226 455 tb_flush_count++;
fd6ce8f6
FB
456}
457
458#ifdef DEBUG_TB_CHECK
459
bc98a7ef 460static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
461{
462 TranslationBlock *tb;
463 int i;
464 address &= TARGET_PAGE_MASK;
99773bd4
PB
465 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
466 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
467 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
468 address >= tb->pc + tb->size)) {
469 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 470 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
471 }
472 }
473 }
474}
475
476/* verify that all the pages have correct rights for code */
477static void tb_page_check(void)
478{
479 TranslationBlock *tb;
480 int i, flags1, flags2;
3b46e624 481
99773bd4
PB
482 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
483 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
484 flags1 = page_get_flags(tb->pc);
485 flags2 = page_get_flags(tb->pc + tb->size - 1);
486 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
487 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 488 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
489 }
490 }
491 }
492}
493
d4e8164f
FB
494void tb_jmp_check(TranslationBlock *tb)
495{
496 TranslationBlock *tb1;
497 unsigned int n1;
498
499 /* suppress any remaining jumps to this TB */
500 tb1 = tb->jmp_first;
501 for(;;) {
502 n1 = (long)tb1 & 3;
503 tb1 = (TranslationBlock *)((long)tb1 & ~3);
504 if (n1 == 2)
505 break;
506 tb1 = tb1->jmp_next[n1];
507 }
508 /* check end of list */
509 if (tb1 != tb) {
510 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
511 }
512}
513
fd6ce8f6
FB
514#endif
515
516/* invalidate one TB */
517static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
518 int next_offset)
519{
520 TranslationBlock *tb1;
521 for(;;) {
522 tb1 = *ptb;
523 if (tb1 == tb) {
524 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
525 break;
526 }
527 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
528 }
529}
530
9fa3e853
FB
531static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
532{
533 TranslationBlock *tb1;
534 unsigned int n1;
535
536 for(;;) {
537 tb1 = *ptb;
538 n1 = (long)tb1 & 3;
539 tb1 = (TranslationBlock *)((long)tb1 & ~3);
540 if (tb1 == tb) {
541 *ptb = tb1->page_next[n1];
542 break;
543 }
544 ptb = &tb1->page_next[n1];
545 }
546}
547
d4e8164f
FB
548static inline void tb_jmp_remove(TranslationBlock *tb, int n)
549{
550 TranslationBlock *tb1, **ptb;
551 unsigned int n1;
552
553 ptb = &tb->jmp_next[n];
554 tb1 = *ptb;
555 if (tb1) {
556 /* find tb(n) in circular list */
557 for(;;) {
558 tb1 = *ptb;
559 n1 = (long)tb1 & 3;
560 tb1 = (TranslationBlock *)((long)tb1 & ~3);
561 if (n1 == n && tb1 == tb)
562 break;
563 if (n1 == 2) {
564 ptb = &tb1->jmp_first;
565 } else {
566 ptb = &tb1->jmp_next[n1];
567 }
568 }
569 /* now we can suppress tb(n) from the list */
570 *ptb = tb->jmp_next[n];
571
572 tb->jmp_next[n] = NULL;
573 }
574}
575
576/* reset the jump entry 'n' of a TB so that it is not chained to
577 another TB */
578static inline void tb_reset_jump(TranslationBlock *tb, int n)
579{
580 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
581}
582
00f82b8a 583static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 584{
6a00d601 585 CPUState *env;
8a40a180 586 PageDesc *p;
d4e8164f 587 unsigned int h, n1;
00f82b8a 588 target_phys_addr_t phys_pc;
8a40a180 589 TranslationBlock *tb1, *tb2;
3b46e624 590
8a40a180
FB
591 /* remove the TB from the hash list */
592 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
593 h = tb_phys_hash_func(phys_pc);
5fafdf24 594 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
595 offsetof(TranslationBlock, phys_hash_next));
596
597 /* remove the TB from the page list */
598 if (tb->page_addr[0] != page_addr) {
599 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
600 tb_page_remove(&p->first_tb, tb);
601 invalidate_page_bitmap(p);
602 }
603 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
604 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
605 tb_page_remove(&p->first_tb, tb);
606 invalidate_page_bitmap(p);
607 }
608
36bdbe54 609 tb_invalidated_flag = 1;
59817ccb 610
fd6ce8f6 611 /* remove the TB from the hash list */
8a40a180 612 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
613 for(env = first_cpu; env != NULL; env = env->next_cpu) {
614 if (env->tb_jmp_cache[h] == tb)
615 env->tb_jmp_cache[h] = NULL;
616 }
d4e8164f
FB
617
618 /* suppress this TB from the two jump lists */
619 tb_jmp_remove(tb, 0);
620 tb_jmp_remove(tb, 1);
621
622 /* suppress any remaining jumps to this TB */
623 tb1 = tb->jmp_first;
624 for(;;) {
625 n1 = (long)tb1 & 3;
626 if (n1 == 2)
627 break;
628 tb1 = (TranslationBlock *)((long)tb1 & ~3);
629 tb2 = tb1->jmp_next[n1];
630 tb_reset_jump(tb1, n1);
631 tb1->jmp_next[n1] = NULL;
632 tb1 = tb2;
633 }
634 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 635
e3db7226 636 tb_phys_invalidate_count++;
9fa3e853
FB
637}
638
639static inline void set_bits(uint8_t *tab, int start, int len)
640{
641 int end, mask, end1;
642
643 end = start + len;
644 tab += start >> 3;
645 mask = 0xff << (start & 7);
646 if ((start & ~7) == (end & ~7)) {
647 if (start < end) {
648 mask &= ~(0xff << (end & 7));
649 *tab |= mask;
650 }
651 } else {
652 *tab++ |= mask;
653 start = (start + 8) & ~7;
654 end1 = end & ~7;
655 while (start < end1) {
656 *tab++ = 0xff;
657 start += 8;
658 }
659 if (start < end) {
660 mask = ~(0xff << (end & 7));
661 *tab |= mask;
662 }
663 }
664}
665
666static void build_page_bitmap(PageDesc *p)
667{
668 int n, tb_start, tb_end;
669 TranslationBlock *tb;
3b46e624 670
59817ccb 671 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
672 if (!p->code_bitmap)
673 return;
674 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
675
676 tb = p->first_tb;
677 while (tb != NULL) {
678 n = (long)tb & 3;
679 tb = (TranslationBlock *)((long)tb & ~3);
680 /* NOTE: this is subtle as a TB may span two physical pages */
681 if (n == 0) {
682 /* NOTE: tb_end may be after the end of the page, but
683 it is not a problem */
684 tb_start = tb->pc & ~TARGET_PAGE_MASK;
685 tb_end = tb_start + tb->size;
686 if (tb_end > TARGET_PAGE_SIZE)
687 tb_end = TARGET_PAGE_SIZE;
688 } else {
689 tb_start = 0;
690 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
691 }
692 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
693 tb = tb->page_next[n];
694 }
695}
696
d720b93d
FB
697#ifdef TARGET_HAS_PRECISE_SMC
698
5fafdf24 699static void tb_gen_code(CPUState *env,
d720b93d
FB
700 target_ulong pc, target_ulong cs_base, int flags,
701 int cflags)
702{
703 TranslationBlock *tb;
704 uint8_t *tc_ptr;
705 target_ulong phys_pc, phys_page2, virt_page2;
706 int code_gen_size;
707
c27004ec
FB
708 phys_pc = get_phys_addr_code(env, pc);
709 tb = tb_alloc(pc);
d720b93d
FB
710 if (!tb) {
711 /* flush must be done */
712 tb_flush(env);
713 /* cannot fail at this point */
c27004ec 714 tb = tb_alloc(pc);
d720b93d
FB
715 }
716 tc_ptr = code_gen_ptr;
717 tb->tc_ptr = tc_ptr;
718 tb->cs_base = cs_base;
719 tb->flags = flags;
720 tb->cflags = cflags;
d07bde88 721 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 722 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 723
d720b93d 724 /* check next page if needed */
c27004ec 725 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 726 phys_page2 = -1;
c27004ec 727 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
728 phys_page2 = get_phys_addr_code(env, virt_page2);
729 }
730 tb_link_phys(tb, phys_pc, phys_page2);
731}
732#endif
3b46e624 733
9fa3e853
FB
734/* invalidate all TBs which intersect with the target physical page
735 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
736 the same physical page. 'is_cpu_write_access' should be true if called
737 from a real cpu write access: the virtual CPU will exit the current
738 TB if code is modified inside this TB. */
00f82b8a 739void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
740 int is_cpu_write_access)
741{
742 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 743 CPUState *env = cpu_single_env;
9fa3e853 744 PageDesc *p;
ea1c1802 745 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 746 target_ulong tb_start, tb_end;
d720b93d 747 target_ulong current_pc, current_cs_base;
9fa3e853
FB
748
749 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 750 if (!p)
9fa3e853 751 return;
5fafdf24 752 if (!p->code_bitmap &&
d720b93d
FB
753 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
754 is_cpu_write_access) {
9fa3e853
FB
755 /* build code bitmap */
756 build_page_bitmap(p);
757 }
758
759 /* we remove all the TBs in the range [start, end[ */
760 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
761 current_tb_not_found = is_cpu_write_access;
762 current_tb_modified = 0;
763 current_tb = NULL; /* avoid warning */
764 current_pc = 0; /* avoid warning */
765 current_cs_base = 0; /* avoid warning */
766 current_flags = 0; /* avoid warning */
9fa3e853
FB
767 tb = p->first_tb;
768 while (tb != NULL) {
769 n = (long)tb & 3;
770 tb = (TranslationBlock *)((long)tb & ~3);
771 tb_next = tb->page_next[n];
772 /* NOTE: this is subtle as a TB may span two physical pages */
773 if (n == 0) {
774 /* NOTE: tb_end may be after the end of the page, but
775 it is not a problem */
776 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
777 tb_end = tb_start + tb->size;
778 } else {
779 tb_start = tb->page_addr[1];
780 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
781 }
782 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
783#ifdef TARGET_HAS_PRECISE_SMC
784 if (current_tb_not_found) {
785 current_tb_not_found = 0;
786 current_tb = NULL;
787 if (env->mem_write_pc) {
788 /* now we have a real cpu fault */
789 current_tb = tb_find_pc(env->mem_write_pc);
790 }
791 }
792 if (current_tb == tb &&
793 !(current_tb->cflags & CF_SINGLE_INSN)) {
794 /* If we are modifying the current TB, we must stop
795 its execution. We could be more precise by checking
796 that the modification is after the current PC, but it
797 would require a specialized function to partially
798 restore the CPU state */
3b46e624 799
d720b93d 800 current_tb_modified = 1;
5fafdf24 801 cpu_restore_state(current_tb, env,
d720b93d
FB
802 env->mem_write_pc, NULL);
803#if defined(TARGET_I386)
804 current_flags = env->hflags;
805 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
806 current_cs_base = (target_ulong)env->segs[R_CS].base;
807 current_pc = current_cs_base + env->eip;
808#else
809#error unsupported CPU
810#endif
811 }
812#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
813 /* we need to do that to handle the case where a signal
814 occurs while doing tb_phys_invalidate() */
815 saved_tb = NULL;
816 if (env) {
817 saved_tb = env->current_tb;
818 env->current_tb = NULL;
819 }
9fa3e853 820 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
821 if (env) {
822 env->current_tb = saved_tb;
823 if (env->interrupt_request && env->current_tb)
824 cpu_interrupt(env, env->interrupt_request);
825 }
9fa3e853
FB
826 }
827 tb = tb_next;
828 }
829#if !defined(CONFIG_USER_ONLY)
830 /* if no code remaining, no need to continue to use slow writes */
831 if (!p->first_tb) {
832 invalidate_page_bitmap(p);
d720b93d
FB
833 if (is_cpu_write_access) {
834 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
835 }
836 }
837#endif
838#ifdef TARGET_HAS_PRECISE_SMC
839 if (current_tb_modified) {
840 /* we generate a block containing just the instruction
841 modifying the memory. It will ensure that it cannot modify
842 itself */
ea1c1802 843 env->current_tb = NULL;
5fafdf24 844 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
845 CF_SINGLE_INSN);
846 cpu_resume_from_signal(env, NULL);
9fa3e853 847 }
fd6ce8f6 848#endif
9fa3e853 849}
fd6ce8f6 850
9fa3e853 851/* len must be <= 8 and start must be a multiple of len */
00f82b8a 852static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
853{
854 PageDesc *p;
855 int offset, b;
59817ccb 856#if 0
a4193c8a
FB
857 if (1) {
858 if (loglevel) {
5fafdf24
TS
859 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
860 cpu_single_env->mem_write_vaddr, len,
861 cpu_single_env->eip,
a4193c8a
FB
862 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
863 }
59817ccb
FB
864 }
865#endif
9fa3e853 866 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 867 if (!p)
9fa3e853
FB
868 return;
869 if (p->code_bitmap) {
870 offset = start & ~TARGET_PAGE_MASK;
871 b = p->code_bitmap[offset >> 3] >> (offset & 7);
872 if (b & ((1 << len) - 1))
873 goto do_invalidate;
874 } else {
875 do_invalidate:
d720b93d 876 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
877 }
878}
879
9fa3e853 880#if !defined(CONFIG_SOFTMMU)
00f82b8a 881static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 882 unsigned long pc, void *puc)
9fa3e853 883{
d720b93d
FB
884 int n, current_flags, current_tb_modified;
885 target_ulong current_pc, current_cs_base;
9fa3e853 886 PageDesc *p;
d720b93d
FB
887 TranslationBlock *tb, *current_tb;
888#ifdef TARGET_HAS_PRECISE_SMC
889 CPUState *env = cpu_single_env;
890#endif
9fa3e853
FB
891
892 addr &= TARGET_PAGE_MASK;
893 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 894 if (!p)
9fa3e853
FB
895 return;
896 tb = p->first_tb;
d720b93d
FB
897 current_tb_modified = 0;
898 current_tb = NULL;
899 current_pc = 0; /* avoid warning */
900 current_cs_base = 0; /* avoid warning */
901 current_flags = 0; /* avoid warning */
902#ifdef TARGET_HAS_PRECISE_SMC
903 if (tb && pc != 0) {
904 current_tb = tb_find_pc(pc);
905 }
906#endif
9fa3e853
FB
907 while (tb != NULL) {
908 n = (long)tb & 3;
909 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
910#ifdef TARGET_HAS_PRECISE_SMC
911 if (current_tb == tb &&
912 !(current_tb->cflags & CF_SINGLE_INSN)) {
913 /* If we are modifying the current TB, we must stop
914 its execution. We could be more precise by checking
915 that the modification is after the current PC, but it
916 would require a specialized function to partially
917 restore the CPU state */
3b46e624 918
d720b93d
FB
919 current_tb_modified = 1;
920 cpu_restore_state(current_tb, env, pc, puc);
921#if defined(TARGET_I386)
922 current_flags = env->hflags;
923 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
924 current_cs_base = (target_ulong)env->segs[R_CS].base;
925 current_pc = current_cs_base + env->eip;
926#else
927#error unsupported CPU
928#endif
929 }
930#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
931 tb_phys_invalidate(tb, addr);
932 tb = tb->page_next[n];
933 }
fd6ce8f6 934 p->first_tb = NULL;
d720b93d
FB
935#ifdef TARGET_HAS_PRECISE_SMC
936 if (current_tb_modified) {
937 /* we generate a block containing just the instruction
938 modifying the memory. It will ensure that it cannot modify
939 itself */
ea1c1802 940 env->current_tb = NULL;
5fafdf24 941 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
942 CF_SINGLE_INSN);
943 cpu_resume_from_signal(env, puc);
944 }
945#endif
fd6ce8f6 946}
9fa3e853 947#endif
fd6ce8f6
FB
948
949/* add the tb in the target page and protect it if necessary */
5fafdf24 950static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 951 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
952{
953 PageDesc *p;
9fa3e853
FB
954 TranslationBlock *last_first_tb;
955
956 tb->page_addr[n] = page_addr;
3a7d929e 957 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
958 tb->page_next[n] = p->first_tb;
959 last_first_tb = p->first_tb;
960 p->first_tb = (TranslationBlock *)((long)tb | n);
961 invalidate_page_bitmap(p);
fd6ce8f6 962
107db443 963#if defined(TARGET_HAS_SMC) || 1
d720b93d 964
9fa3e853 965#if defined(CONFIG_USER_ONLY)
fd6ce8f6 966 if (p->flags & PAGE_WRITE) {
53a5960a
PB
967 target_ulong addr;
968 PageDesc *p2;
9fa3e853
FB
969 int prot;
970
fd6ce8f6
FB
971 /* force the host page as non writable (writes will have a
972 page fault + mprotect overhead) */
53a5960a 973 page_addr &= qemu_host_page_mask;
fd6ce8f6 974 prot = 0;
53a5960a
PB
975 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
976 addr += TARGET_PAGE_SIZE) {
977
978 p2 = page_find (addr >> TARGET_PAGE_BITS);
979 if (!p2)
980 continue;
981 prot |= p2->flags;
982 p2->flags &= ~PAGE_WRITE;
983 page_get_flags(addr);
984 }
5fafdf24 985 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
986 (prot & PAGE_BITS) & ~PAGE_WRITE);
987#ifdef DEBUG_TB_INVALIDATE
ab3d1727 988 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 989 page_addr);
fd6ce8f6 990#endif
fd6ce8f6 991 }
9fa3e853
FB
992#else
993 /* if some code is already present, then the pages are already
994 protected. So we handle the case where only the first TB is
995 allocated in a physical page */
996 if (!last_first_tb) {
6a00d601 997 tlb_protect_code(page_addr);
9fa3e853
FB
998 }
999#endif
d720b93d
FB
1000
1001#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1002}
1003
1004/* Allocate a new translation block. Flush the translation buffer if
1005 too many translation blocks or too much generated code. */
c27004ec 1006TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1007{
1008 TranslationBlock *tb;
fd6ce8f6 1009
26a5f13b
FB
1010 if (nb_tbs >= code_gen_max_blocks ||
1011 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1012 return NULL;
fd6ce8f6
FB
1013 tb = &tbs[nb_tbs++];
1014 tb->pc = pc;
b448f2f3 1015 tb->cflags = 0;
d4e8164f
FB
1016 return tb;
1017}
1018
9fa3e853
FB
1019/* add a new TB and link it to the physical page tables. phys_page2 is
1020 (-1) to indicate that only one page contains the TB. */
5fafdf24 1021void tb_link_phys(TranslationBlock *tb,
9fa3e853 1022 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1023{
9fa3e853
FB
1024 unsigned int h;
1025 TranslationBlock **ptb;
1026
1027 /* add in the physical hash table */
1028 h = tb_phys_hash_func(phys_pc);
1029 ptb = &tb_phys_hash[h];
1030 tb->phys_hash_next = *ptb;
1031 *ptb = tb;
fd6ce8f6
FB
1032
1033 /* add in the page list */
9fa3e853
FB
1034 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1035 if (phys_page2 != -1)
1036 tb_alloc_page(tb, 1, phys_page2);
1037 else
1038 tb->page_addr[1] = -1;
9fa3e853 1039
d4e8164f
FB
1040 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1041 tb->jmp_next[0] = NULL;
1042 tb->jmp_next[1] = NULL;
1043
1044 /* init original jump addresses */
1045 if (tb->tb_next_offset[0] != 0xffff)
1046 tb_reset_jump(tb, 0);
1047 if (tb->tb_next_offset[1] != 0xffff)
1048 tb_reset_jump(tb, 1);
8a40a180
FB
1049
1050#ifdef DEBUG_TB_CHECK
1051 tb_page_check();
1052#endif
fd6ce8f6
FB
1053}
1054
9fa3e853
FB
1055/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1056 tb[1].tc_ptr. Return NULL if not found */
1057TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1058{
9fa3e853
FB
1059 int m_min, m_max, m;
1060 unsigned long v;
1061 TranslationBlock *tb;
a513fe19
FB
1062
1063 if (nb_tbs <= 0)
1064 return NULL;
1065 if (tc_ptr < (unsigned long)code_gen_buffer ||
1066 tc_ptr >= (unsigned long)code_gen_ptr)
1067 return NULL;
1068 /* binary search (cf Knuth) */
1069 m_min = 0;
1070 m_max = nb_tbs - 1;
1071 while (m_min <= m_max) {
1072 m = (m_min + m_max) >> 1;
1073 tb = &tbs[m];
1074 v = (unsigned long)tb->tc_ptr;
1075 if (v == tc_ptr)
1076 return tb;
1077 else if (tc_ptr < v) {
1078 m_max = m - 1;
1079 } else {
1080 m_min = m + 1;
1081 }
5fafdf24 1082 }
a513fe19
FB
1083 return &tbs[m_max];
1084}
7501267e 1085
ea041c0e
FB
1086static void tb_reset_jump_recursive(TranslationBlock *tb);
1087
1088static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1089{
1090 TranslationBlock *tb1, *tb_next, **ptb;
1091 unsigned int n1;
1092
1093 tb1 = tb->jmp_next[n];
1094 if (tb1 != NULL) {
1095 /* find head of list */
1096 for(;;) {
1097 n1 = (long)tb1 & 3;
1098 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1099 if (n1 == 2)
1100 break;
1101 tb1 = tb1->jmp_next[n1];
1102 }
1103 /* we are now sure now that tb jumps to tb1 */
1104 tb_next = tb1;
1105
1106 /* remove tb from the jmp_first list */
1107 ptb = &tb_next->jmp_first;
1108 for(;;) {
1109 tb1 = *ptb;
1110 n1 = (long)tb1 & 3;
1111 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1112 if (n1 == n && tb1 == tb)
1113 break;
1114 ptb = &tb1->jmp_next[n1];
1115 }
1116 *ptb = tb->jmp_next[n];
1117 tb->jmp_next[n] = NULL;
3b46e624 1118
ea041c0e
FB
1119 /* suppress the jump to next tb in generated code */
1120 tb_reset_jump(tb, n);
1121
0124311e 1122 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1123 tb_reset_jump_recursive(tb_next);
1124 }
1125}
1126
1127static void tb_reset_jump_recursive(TranslationBlock *tb)
1128{
1129 tb_reset_jump_recursive2(tb, 0);
1130 tb_reset_jump_recursive2(tb, 1);
1131}
1132
1fddef4b 1133#if defined(TARGET_HAS_ICE)
d720b93d
FB
1134static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1135{
9b3c35e0
JM
1136 target_phys_addr_t addr;
1137 target_ulong pd;
c2f07f81
PB
1138 ram_addr_t ram_addr;
1139 PhysPageDesc *p;
d720b93d 1140
c2f07f81
PB
1141 addr = cpu_get_phys_page_debug(env, pc);
1142 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1143 if (!p) {
1144 pd = IO_MEM_UNASSIGNED;
1145 } else {
1146 pd = p->phys_offset;
1147 }
1148 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1149 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1150}
c27004ec 1151#endif
d720b93d 1152
6658ffb8
PB
1153/* Add a watchpoint. */
1154int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1155{
1156 int i;
1157
1158 for (i = 0; i < env->nb_watchpoints; i++) {
1159 if (addr == env->watchpoint[i].vaddr)
1160 return 0;
1161 }
1162 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1163 return -1;
1164
1165 i = env->nb_watchpoints++;
1166 env->watchpoint[i].vaddr = addr;
1167 tlb_flush_page(env, addr);
1168 /* FIXME: This flush is needed because of the hack to make memory ops
1169 terminate the TB. It can be removed once the proper IO trap and
1170 re-execute bits are in. */
1171 tb_flush(env);
1172 return i;
1173}
1174
1175/* Remove a watchpoint. */
1176int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1177{
1178 int i;
1179
1180 for (i = 0; i < env->nb_watchpoints; i++) {
1181 if (addr == env->watchpoint[i].vaddr) {
1182 env->nb_watchpoints--;
1183 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1184 tlb_flush_page(env, addr);
1185 return 0;
1186 }
1187 }
1188 return -1;
1189}
1190
7d03f82f
EI
1191/* Remove all watchpoints. */
1192void cpu_watchpoint_remove_all(CPUState *env) {
1193 int i;
1194
1195 for (i = 0; i < env->nb_watchpoints; i++) {
1196 tlb_flush_page(env, env->watchpoint[i].vaddr);
1197 }
1198 env->nb_watchpoints = 0;
1199}
1200
c33a346e
FB
1201/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1202 breakpoint is reached */
2e12669a 1203int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1204{
1fddef4b 1205#if defined(TARGET_HAS_ICE)
4c3a88a2 1206 int i;
3b46e624 1207
4c3a88a2
FB
1208 for(i = 0; i < env->nb_breakpoints; i++) {
1209 if (env->breakpoints[i] == pc)
1210 return 0;
1211 }
1212
1213 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1214 return -1;
1215 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1216
d720b93d 1217 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1218 return 0;
1219#else
1220 return -1;
1221#endif
1222}
1223
7d03f82f
EI
1224/* remove all breakpoints */
1225void cpu_breakpoint_remove_all(CPUState *env) {
1226#if defined(TARGET_HAS_ICE)
1227 int i;
1228 for(i = 0; i < env->nb_breakpoints; i++) {
1229 breakpoint_invalidate(env, env->breakpoints[i]);
1230 }
1231 env->nb_breakpoints = 0;
1232#endif
1233}
1234
4c3a88a2 1235/* remove a breakpoint */
2e12669a 1236int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1237{
1fddef4b 1238#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1239 int i;
1240 for(i = 0; i < env->nb_breakpoints; i++) {
1241 if (env->breakpoints[i] == pc)
1242 goto found;
1243 }
1244 return -1;
1245 found:
4c3a88a2 1246 env->nb_breakpoints--;
1fddef4b
FB
1247 if (i < env->nb_breakpoints)
1248 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1249
1250 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1251 return 0;
1252#else
1253 return -1;
1254#endif
1255}
1256
c33a346e
FB
1257/* enable or disable single step mode. EXCP_DEBUG is returned by the
1258 CPU loop after each instruction */
1259void cpu_single_step(CPUState *env, int enabled)
1260{
1fddef4b 1261#if defined(TARGET_HAS_ICE)
c33a346e
FB
1262 if (env->singlestep_enabled != enabled) {
1263 env->singlestep_enabled = enabled;
1264 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1265 /* XXX: only flush what is necessary */
0124311e 1266 tb_flush(env);
c33a346e
FB
1267 }
1268#endif
1269}
1270
34865134
FB
1271/* enable or disable low levels log */
1272void cpu_set_log(int log_flags)
1273{
1274 loglevel = log_flags;
1275 if (loglevel && !logfile) {
11fcfab4 1276 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1277 if (!logfile) {
1278 perror(logfilename);
1279 _exit(1);
1280 }
9fa3e853
FB
1281#if !defined(CONFIG_SOFTMMU)
1282 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1283 {
1284 static uint8_t logfile_buf[4096];
1285 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1286 }
1287#else
34865134 1288 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1289#endif
e735b91c
PB
1290 log_append = 1;
1291 }
1292 if (!loglevel && logfile) {
1293 fclose(logfile);
1294 logfile = NULL;
34865134
FB
1295 }
1296}
1297
1298void cpu_set_log_filename(const char *filename)
1299{
1300 logfilename = strdup(filename);
e735b91c
PB
1301 if (logfile) {
1302 fclose(logfile);
1303 logfile = NULL;
1304 }
1305 cpu_set_log(loglevel);
34865134 1306}
c33a346e 1307
0124311e 1308/* mask must never be zero, except for A20 change call */
68a79315 1309void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1310{
1311 TranslationBlock *tb;
15a51156 1312 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1313
68a79315 1314 env->interrupt_request |= mask;
ea041c0e
FB
1315 /* if the cpu is currently executing code, we must unlink it and
1316 all the potentially executing TB */
1317 tb = env->current_tb;
ee8b7021
FB
1318 if (tb && !testandset(&interrupt_lock)) {
1319 env->current_tb = NULL;
ea041c0e 1320 tb_reset_jump_recursive(tb);
15a51156 1321 resetlock(&interrupt_lock);
ea041c0e
FB
1322 }
1323}
1324
b54ad049
FB
1325void cpu_reset_interrupt(CPUState *env, int mask)
1326{
1327 env->interrupt_request &= ~mask;
1328}
1329
f193c797 1330CPULogItem cpu_log_items[] = {
5fafdf24 1331 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1332 "show generated host assembly code for each compiled TB" },
1333 { CPU_LOG_TB_IN_ASM, "in_asm",
1334 "show target assembly code for each compiled TB" },
5fafdf24 1335 { CPU_LOG_TB_OP, "op",
57fec1fe 1336 "show micro ops for each compiled TB" },
f193c797 1337 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1338 "show micro ops "
1339#ifdef TARGET_I386
1340 "before eflags optimization and "
f193c797 1341#endif
e01a1157 1342 "after liveness analysis" },
f193c797
FB
1343 { CPU_LOG_INT, "int",
1344 "show interrupts/exceptions in short format" },
1345 { CPU_LOG_EXEC, "exec",
1346 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1347 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1348 "show CPU state before block translation" },
f193c797
FB
1349#ifdef TARGET_I386
1350 { CPU_LOG_PCALL, "pcall",
1351 "show protected mode far calls/returns/exceptions" },
1352#endif
8e3a9fd2 1353#ifdef DEBUG_IOPORT
fd872598
FB
1354 { CPU_LOG_IOPORT, "ioport",
1355 "show all i/o ports accesses" },
8e3a9fd2 1356#endif
f193c797
FB
1357 { 0, NULL, NULL },
1358};
1359
1360static int cmp1(const char *s1, int n, const char *s2)
1361{
1362 if (strlen(s2) != n)
1363 return 0;
1364 return memcmp(s1, s2, n) == 0;
1365}
3b46e624 1366
f193c797
FB
1367/* takes a comma separated list of log masks. Return 0 if error. */
1368int cpu_str_to_log_mask(const char *str)
1369{
1370 CPULogItem *item;
1371 int mask;
1372 const char *p, *p1;
1373
1374 p = str;
1375 mask = 0;
1376 for(;;) {
1377 p1 = strchr(p, ',');
1378 if (!p1)
1379 p1 = p + strlen(p);
8e3a9fd2
FB
1380 if(cmp1(p,p1-p,"all")) {
1381 for(item = cpu_log_items; item->mask != 0; item++) {
1382 mask |= item->mask;
1383 }
1384 } else {
f193c797
FB
1385 for(item = cpu_log_items; item->mask != 0; item++) {
1386 if (cmp1(p, p1 - p, item->name))
1387 goto found;
1388 }
1389 return 0;
8e3a9fd2 1390 }
f193c797
FB
1391 found:
1392 mask |= item->mask;
1393 if (*p1 != ',')
1394 break;
1395 p = p1 + 1;
1396 }
1397 return mask;
1398}
ea041c0e 1399
7501267e
FB
1400void cpu_abort(CPUState *env, const char *fmt, ...)
1401{
1402 va_list ap;
493ae1f0 1403 va_list ap2;
7501267e
FB
1404
1405 va_start(ap, fmt);
493ae1f0 1406 va_copy(ap2, ap);
7501267e
FB
1407 fprintf(stderr, "qemu: fatal: ");
1408 vfprintf(stderr, fmt, ap);
1409 fprintf(stderr, "\n");
1410#ifdef TARGET_I386
7fe48483
FB
1411 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1412#else
1413 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1414#endif
924edcae 1415 if (logfile) {
f9373291 1416 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1417 vfprintf(logfile, fmt, ap2);
f9373291
JM
1418 fprintf(logfile, "\n");
1419#ifdef TARGET_I386
1420 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1421#else
1422 cpu_dump_state(env, logfile, fprintf, 0);
1423#endif
924edcae
AZ
1424 fflush(logfile);
1425 fclose(logfile);
1426 }
493ae1f0 1427 va_end(ap2);
f9373291 1428 va_end(ap);
7501267e
FB
1429 abort();
1430}
1431
c5be9f08
TS
1432CPUState *cpu_copy(CPUState *env)
1433{
01ba9816 1434 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1435 /* preserve chaining and index */
1436 CPUState *next_cpu = new_env->next_cpu;
1437 int cpu_index = new_env->cpu_index;
1438 memcpy(new_env, env, sizeof(CPUState));
1439 new_env->next_cpu = next_cpu;
1440 new_env->cpu_index = cpu_index;
1441 return new_env;
1442}
1443
0124311e
FB
1444#if !defined(CONFIG_USER_ONLY)
1445
5c751e99
EI
1446static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1447{
1448 unsigned int i;
1449
1450 /* Discard jump cache entries for any tb which might potentially
1451 overlap the flushed page. */
1452 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1453 memset (&env->tb_jmp_cache[i], 0,
1454 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1455
1456 i = tb_jmp_cache_hash_page(addr);
1457 memset (&env->tb_jmp_cache[i], 0,
1458 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1459}
1460
ee8b7021
FB
1461/* NOTE: if flush_global is true, also flush global entries (not
1462 implemented yet) */
1463void tlb_flush(CPUState *env, int flush_global)
33417e70 1464{
33417e70 1465 int i;
0124311e 1466
9fa3e853
FB
1467#if defined(DEBUG_TLB)
1468 printf("tlb_flush:\n");
1469#endif
0124311e
FB
1470 /* must reset current TB so that interrupts cannot modify the
1471 links while we are modifying them */
1472 env->current_tb = NULL;
1473
33417e70 1474 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1475 env->tlb_table[0][i].addr_read = -1;
1476 env->tlb_table[0][i].addr_write = -1;
1477 env->tlb_table[0][i].addr_code = -1;
1478 env->tlb_table[1][i].addr_read = -1;
1479 env->tlb_table[1][i].addr_write = -1;
1480 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1481#if (NB_MMU_MODES >= 3)
1482 env->tlb_table[2][i].addr_read = -1;
1483 env->tlb_table[2][i].addr_write = -1;
1484 env->tlb_table[2][i].addr_code = -1;
1485#if (NB_MMU_MODES == 4)
1486 env->tlb_table[3][i].addr_read = -1;
1487 env->tlb_table[3][i].addr_write = -1;
1488 env->tlb_table[3][i].addr_code = -1;
1489#endif
1490#endif
33417e70 1491 }
9fa3e853 1492
8a40a180 1493 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1494
1495#if !defined(CONFIG_SOFTMMU)
1496 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1497#endif
1498#ifdef USE_KQEMU
1499 if (env->kqemu_enabled) {
1500 kqemu_flush(env, flush_global);
1501 }
9fa3e853 1502#endif
e3db7226 1503 tlb_flush_count++;
33417e70
FB
1504}
1505
274da6b2 1506static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1507{
5fafdf24 1508 if (addr == (tlb_entry->addr_read &
84b7b8e7 1509 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1510 addr == (tlb_entry->addr_write &
84b7b8e7 1511 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1512 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1513 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1514 tlb_entry->addr_read = -1;
1515 tlb_entry->addr_write = -1;
1516 tlb_entry->addr_code = -1;
1517 }
61382a50
FB
1518}
1519
2e12669a 1520void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1521{
8a40a180 1522 int i;
0124311e 1523
9fa3e853 1524#if defined(DEBUG_TLB)
108c49b8 1525 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1526#endif
0124311e
FB
1527 /* must reset current TB so that interrupts cannot modify the
1528 links while we are modifying them */
1529 env->current_tb = NULL;
61382a50
FB
1530
1531 addr &= TARGET_PAGE_MASK;
1532 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1533 tlb_flush_entry(&env->tlb_table[0][i], addr);
1534 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1535#if (NB_MMU_MODES >= 3)
1536 tlb_flush_entry(&env->tlb_table[2][i], addr);
1537#if (NB_MMU_MODES == 4)
1538 tlb_flush_entry(&env->tlb_table[3][i], addr);
1539#endif
1540#endif
0124311e 1541
5c751e99 1542 tlb_flush_jmp_cache(env, addr);
9fa3e853 1543
0124311e 1544#if !defined(CONFIG_SOFTMMU)
9fa3e853 1545 if (addr < MMAP_AREA_END)
0124311e 1546 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1547#endif
0a962c02
FB
1548#ifdef USE_KQEMU
1549 if (env->kqemu_enabled) {
1550 kqemu_flush_page(env, addr);
1551 }
1552#endif
9fa3e853
FB
1553}
1554
9fa3e853
FB
1555/* update the TLBs so that writes to code in the virtual page 'addr'
1556 can be detected */
6a00d601 1557static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1558{
5fafdf24 1559 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1560 ram_addr + TARGET_PAGE_SIZE,
1561 CODE_DIRTY_FLAG);
9fa3e853
FB
1562}
1563
9fa3e853 1564/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1565 tested for self modifying code */
5fafdf24 1566static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1567 target_ulong vaddr)
9fa3e853 1568{
3a7d929e 1569 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1570}
1571
5fafdf24 1572static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1573 unsigned long start, unsigned long length)
1574{
1575 unsigned long addr;
84b7b8e7
FB
1576 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1577 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1578 if ((addr - start) < length) {
84b7b8e7 1579 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1580 }
1581 }
1582}
1583
3a7d929e 1584void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1585 int dirty_flags)
1ccde1cb
FB
1586{
1587 CPUState *env;
4f2ac237 1588 unsigned long length, start1;
0a962c02
FB
1589 int i, mask, len;
1590 uint8_t *p;
1ccde1cb
FB
1591
1592 start &= TARGET_PAGE_MASK;
1593 end = TARGET_PAGE_ALIGN(end);
1594
1595 length = end - start;
1596 if (length == 0)
1597 return;
0a962c02 1598 len = length >> TARGET_PAGE_BITS;
3a7d929e 1599#ifdef USE_KQEMU
6a00d601
FB
1600 /* XXX: should not depend on cpu context */
1601 env = first_cpu;
3a7d929e 1602 if (env->kqemu_enabled) {
f23db169
FB
1603 ram_addr_t addr;
1604 addr = start;
1605 for(i = 0; i < len; i++) {
1606 kqemu_set_notdirty(env, addr);
1607 addr += TARGET_PAGE_SIZE;
1608 }
3a7d929e
FB
1609 }
1610#endif
f23db169
FB
1611 mask = ~dirty_flags;
1612 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1613 for(i = 0; i < len; i++)
1614 p[i] &= mask;
1615
1ccde1cb
FB
1616 /* we modify the TLB cache so that the dirty bit will be set again
1617 when accessing the range */
59817ccb 1618 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1619 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1620 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1621 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1622 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1623 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1624#if (NB_MMU_MODES >= 3)
1625 for(i = 0; i < CPU_TLB_SIZE; i++)
1626 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1627#if (NB_MMU_MODES == 4)
1628 for(i = 0; i < CPU_TLB_SIZE; i++)
1629 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1630#endif
1631#endif
6a00d601 1632 }
59817ccb
FB
1633
1634#if !defined(CONFIG_SOFTMMU)
1635 /* XXX: this is expensive */
1636 {
1637 VirtPageDesc *p;
1638 int j;
1639 target_ulong addr;
1640
1641 for(i = 0; i < L1_SIZE; i++) {
1642 p = l1_virt_map[i];
1643 if (p) {
1644 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1645 for(j = 0; j < L2_SIZE; j++) {
1646 if (p->valid_tag == virt_valid_tag &&
1647 p->phys_addr >= start && p->phys_addr < end &&
1648 (p->prot & PROT_WRITE)) {
1649 if (addr < MMAP_AREA_END) {
5fafdf24 1650 mprotect((void *)addr, TARGET_PAGE_SIZE,
59817ccb
FB
1651 p->prot & ~PROT_WRITE);
1652 }
1653 }
1654 addr += TARGET_PAGE_SIZE;
1655 p++;
1656 }
1657 }
1658 }
1659 }
1660#endif
1ccde1cb
FB
1661}
1662
3a7d929e
FB
1663static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1664{
1665 ram_addr_t ram_addr;
1666
84b7b8e7 1667 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1668 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1669 tlb_entry->addend - (unsigned long)phys_ram_base;
1670 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1671 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1672 }
1673 }
1674}
1675
1676/* update the TLB according to the current state of the dirty bits */
1677void cpu_tlb_update_dirty(CPUState *env)
1678{
1679 int i;
1680 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1681 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1682 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1683 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1684#if (NB_MMU_MODES >= 3)
1685 for(i = 0; i < CPU_TLB_SIZE; i++)
1686 tlb_update_dirty(&env->tlb_table[2][i]);
1687#if (NB_MMU_MODES == 4)
1688 for(i = 0; i < CPU_TLB_SIZE; i++)
1689 tlb_update_dirty(&env->tlb_table[3][i]);
1690#endif
1691#endif
3a7d929e
FB
1692}
1693
5fafdf24 1694static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1695 unsigned long start)
1ccde1cb
FB
1696{
1697 unsigned long addr;
84b7b8e7
FB
1698 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1699 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1700 if (addr == start) {
84b7b8e7 1701 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1702 }
1703 }
1704}
1705
1706/* update the TLB corresponding to virtual page vaddr and phys addr
1707 addr so that it is no longer dirty */
6a00d601
FB
1708static inline void tlb_set_dirty(CPUState *env,
1709 unsigned long addr, target_ulong vaddr)
1ccde1cb 1710{
1ccde1cb
FB
1711 int i;
1712
1ccde1cb
FB
1713 addr &= TARGET_PAGE_MASK;
1714 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1715 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1716 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1717#if (NB_MMU_MODES >= 3)
1718 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1719#if (NB_MMU_MODES == 4)
1720 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1721#endif
1722#endif
9fa3e853
FB
1723}
1724
59817ccb
FB
1725/* add a new TLB entry. At most one entry for a given virtual address
1726 is permitted. Return 0 if OK or 2 if the page could not be mapped
1727 (can only happen in non SOFTMMU mode for I/O pages or pages
1728 conflicting with the host address space). */
5fafdf24
TS
1729int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1730 target_phys_addr_t paddr, int prot,
6ebbf390 1731 int mmu_idx, int is_softmmu)
9fa3e853 1732{
92e873b9 1733 PhysPageDesc *p;
4f2ac237 1734 unsigned long pd;
9fa3e853 1735 unsigned int index;
4f2ac237 1736 target_ulong address;
108c49b8 1737 target_phys_addr_t addend;
9fa3e853 1738 int ret;
84b7b8e7 1739 CPUTLBEntry *te;
6658ffb8 1740 int i;
9fa3e853 1741
92e873b9 1742 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1743 if (!p) {
1744 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1745 } else {
1746 pd = p->phys_offset;
9fa3e853
FB
1747 }
1748#if defined(DEBUG_TLB)
6ebbf390
JM
1749 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1750 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1751#endif
1752
1753 ret = 0;
1754#if !defined(CONFIG_SOFTMMU)
5fafdf24 1755 if (is_softmmu)
9fa3e853
FB
1756#endif
1757 {
2a4188a3 1758 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1759 /* IO memory case */
1760 address = vaddr | pd;
1761 addend = paddr;
1762 } else {
1763 /* standard memory */
1764 address = vaddr;
1765 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1766 }
6658ffb8
PB
1767
1768 /* Make accesses to pages with watchpoints go via the
1769 watchpoint trap routines. */
1770 for (i = 0; i < env->nb_watchpoints; i++) {
1771 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1772 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1773 env->watchpoint[i].addend = 0;
6658ffb8
PB
1774 address = vaddr | io_mem_watch;
1775 } else {
d79acba4
AZ
1776 env->watchpoint[i].addend = pd - paddr +
1777 (unsigned long) phys_ram_base;
6658ffb8
PB
1778 /* TODO: Figure out how to make read watchpoints coexist
1779 with code. */
1780 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1781 }
1782 }
1783 }
d79acba4 1784
90f18422 1785 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1786 addend -= vaddr;
6ebbf390 1787 te = &env->tlb_table[mmu_idx][index];
84b7b8e7 1788 te->addend = addend;
67b915a5 1789 if (prot & PAGE_READ) {
84b7b8e7
FB
1790 te->addr_read = address;
1791 } else {
1792 te->addr_read = -1;
1793 }
5c751e99 1794
84b7b8e7
FB
1795 if (prot & PAGE_EXEC) {
1796 te->addr_code = address;
9fa3e853 1797 } else {
84b7b8e7 1798 te->addr_code = -1;
9fa3e853 1799 }
67b915a5 1800 if (prot & PAGE_WRITE) {
5fafdf24 1801 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
856074ec
FB
1802 (pd & IO_MEM_ROMD)) {
1803 /* write access calls the I/O callback */
5fafdf24 1804 te->addr_write = vaddr |
856074ec 1805 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
5fafdf24 1806 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1807 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1808 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1809 } else {
84b7b8e7 1810 te->addr_write = address;
9fa3e853
FB
1811 }
1812 } else {
84b7b8e7 1813 te->addr_write = -1;
9fa3e853
FB
1814 }
1815 }
1816#if !defined(CONFIG_SOFTMMU)
1817 else {
1818 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1819 /* IO access: no mapping is done as it will be handled by the
1820 soft MMU */
1821 if (!(env->hflags & HF_SOFTMMU_MASK))
1822 ret = 2;
1823 } else {
1824 void *map_addr;
59817ccb
FB
1825
1826 if (vaddr >= MMAP_AREA_END) {
1827 ret = 2;
1828 } else {
1829 if (prot & PROT_WRITE) {
5fafdf24 1830 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1831#if defined(TARGET_HAS_SMC) || 1
59817ccb 1832 first_tb ||
d720b93d 1833#endif
5fafdf24 1834 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
59817ccb
FB
1835 !cpu_physical_memory_is_dirty(pd))) {
1836 /* ROM: we do as if code was inside */
1837 /* if code is present, we only map as read only and save the
1838 original mapping */
1839 VirtPageDesc *vp;
3b46e624 1840
90f18422 1841 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1842 vp->phys_addr = pd;
1843 vp->prot = prot;
1844 vp->valid_tag = virt_valid_tag;
1845 prot &= ~PAGE_WRITE;
1846 }
1847 }
5fafdf24 1848 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
59817ccb
FB
1849 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1850 if (map_addr == MAP_FAILED) {
1851 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1852 paddr, vaddr);
9fa3e853 1853 }
9fa3e853
FB
1854 }
1855 }
1856 }
1857#endif
1858 return ret;
1859}
1860
1861/* called from signal handler: invalidate the code and unprotect the
1862 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1863int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1864{
1865#if !defined(CONFIG_SOFTMMU)
1866 VirtPageDesc *vp;
1867
1868#if defined(DEBUG_TLB)
1869 printf("page_unprotect: addr=0x%08x\n", addr);
1870#endif
1871 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1872
1873 /* if it is not mapped, no need to worry here */
1874 if (addr >= MMAP_AREA_END)
1875 return 0;
9fa3e853
FB
1876 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1877 if (!vp)
1878 return 0;
1879 /* NOTE: in this case, validate_tag is _not_ tested as it
1880 validates only the code TLB */
1881 if (vp->valid_tag != virt_valid_tag)
1882 return 0;
1883 if (!(vp->prot & PAGE_WRITE))
1884 return 0;
1885#if defined(DEBUG_TLB)
5fafdf24 1886 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
9fa3e853
FB
1887 addr, vp->phys_addr, vp->prot);
1888#endif
59817ccb
FB
1889 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1890 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1891 (unsigned long)addr, vp->prot);
d720b93d 1892 /* set the dirty bit */
0a962c02 1893 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1894 /* flush the code inside */
1895 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1896 return 1;
1897#else
1898 return 0;
1899#endif
33417e70
FB
1900}
1901
0124311e
FB
1902#else
1903
ee8b7021 1904void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1905{
1906}
1907
2e12669a 1908void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1909{
1910}
1911
5fafdf24
TS
1912int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1913 target_phys_addr_t paddr, int prot,
6ebbf390 1914 int mmu_idx, int is_softmmu)
9fa3e853
FB
1915{
1916 return 0;
1917}
0124311e 1918
9fa3e853
FB
1919/* dump memory mappings */
1920void page_dump(FILE *f)
33417e70 1921{
9fa3e853
FB
1922 unsigned long start, end;
1923 int i, j, prot, prot1;
1924 PageDesc *p;
33417e70 1925
9fa3e853
FB
1926 fprintf(f, "%-8s %-8s %-8s %s\n",
1927 "start", "end", "size", "prot");
1928 start = -1;
1929 end = -1;
1930 prot = 0;
1931 for(i = 0; i <= L1_SIZE; i++) {
1932 if (i < L1_SIZE)
1933 p = l1_map[i];
1934 else
1935 p = NULL;
1936 for(j = 0;j < L2_SIZE; j++) {
1937 if (!p)
1938 prot1 = 0;
1939 else
1940 prot1 = p[j].flags;
1941 if (prot1 != prot) {
1942 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1943 if (start != -1) {
1944 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1945 start, end, end - start,
9fa3e853
FB
1946 prot & PAGE_READ ? 'r' : '-',
1947 prot & PAGE_WRITE ? 'w' : '-',
1948 prot & PAGE_EXEC ? 'x' : '-');
1949 }
1950 if (prot1 != 0)
1951 start = end;
1952 else
1953 start = -1;
1954 prot = prot1;
1955 }
1956 if (!p)
1957 break;
1958 }
33417e70 1959 }
33417e70
FB
1960}
1961
53a5960a 1962int page_get_flags(target_ulong address)
33417e70 1963{
9fa3e853
FB
1964 PageDesc *p;
1965
1966 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1967 if (!p)
9fa3e853
FB
1968 return 0;
1969 return p->flags;
1970}
1971
1972/* modify the flags of a page and invalidate the code if
1973 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1974 depending on PAGE_WRITE */
53a5960a 1975void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1976{
1977 PageDesc *p;
53a5960a 1978 target_ulong addr;
9fa3e853
FB
1979
1980 start = start & TARGET_PAGE_MASK;
1981 end = TARGET_PAGE_ALIGN(end);
1982 if (flags & PAGE_WRITE)
1983 flags |= PAGE_WRITE_ORG;
1984 spin_lock(&tb_lock);
1985 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1986 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1987 /* if the write protection is set, then we invalidate the code
1988 inside */
5fafdf24 1989 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
1990 (flags & PAGE_WRITE) &&
1991 p->first_tb) {
d720b93d 1992 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1993 }
1994 p->flags = flags;
1995 }
1996 spin_unlock(&tb_lock);
33417e70
FB
1997}
1998
3d97b40b
TS
1999int page_check_range(target_ulong start, target_ulong len, int flags)
2000{
2001 PageDesc *p;
2002 target_ulong end;
2003 target_ulong addr;
2004
2005 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2006 start = start & TARGET_PAGE_MASK;
2007
2008 if( end < start )
2009 /* we've wrapped around */
2010 return -1;
2011 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2012 p = page_find(addr >> TARGET_PAGE_BITS);
2013 if( !p )
2014 return -1;
2015 if( !(p->flags & PAGE_VALID) )
2016 return -1;
2017
dae3270c 2018 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2019 return -1;
dae3270c
FB
2020 if (flags & PAGE_WRITE) {
2021 if (!(p->flags & PAGE_WRITE_ORG))
2022 return -1;
2023 /* unprotect the page if it was put read-only because it
2024 contains translated code */
2025 if (!(p->flags & PAGE_WRITE)) {
2026 if (!page_unprotect(addr, 0, NULL))
2027 return -1;
2028 }
2029 return 0;
2030 }
3d97b40b
TS
2031 }
2032 return 0;
2033}
2034
9fa3e853
FB
2035/* called from signal handler: invalidate the code and unprotect the
2036 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2037int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2038{
2039 unsigned int page_index, prot, pindex;
2040 PageDesc *p, *p1;
53a5960a 2041 target_ulong host_start, host_end, addr;
9fa3e853 2042
83fb7adf 2043 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2044 page_index = host_start >> TARGET_PAGE_BITS;
2045 p1 = page_find(page_index);
2046 if (!p1)
2047 return 0;
83fb7adf 2048 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2049 p = p1;
2050 prot = 0;
2051 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2052 prot |= p->flags;
2053 p++;
2054 }
2055 /* if the page was really writable, then we change its
2056 protection back to writable */
2057 if (prot & PAGE_WRITE_ORG) {
2058 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2059 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2060 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2061 (prot & PAGE_BITS) | PAGE_WRITE);
2062 p1[pindex].flags |= PAGE_WRITE;
2063 /* and since the content will be modified, we must invalidate
2064 the corresponding translated code. */
d720b93d 2065 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2066#ifdef DEBUG_TB_CHECK
2067 tb_invalidate_check(address);
2068#endif
2069 return 1;
2070 }
2071 }
2072 return 0;
2073}
2074
6a00d601
FB
2075static inline void tlb_set_dirty(CPUState *env,
2076 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2077{
2078}
9fa3e853
FB
2079#endif /* defined(CONFIG_USER_ONLY) */
2080
db7b5426 2081static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2082 ram_addr_t memory);
2083static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2084 ram_addr_t orig_memory);
db7b5426
BS
2085#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2086 need_subpage) \
2087 do { \
2088 if (addr > start_addr) \
2089 start_addr2 = 0; \
2090 else { \
2091 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2092 if (start_addr2 > 0) \
2093 need_subpage = 1; \
2094 } \
2095 \
49e9fba2 2096 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2097 end_addr2 = TARGET_PAGE_SIZE - 1; \
2098 else { \
2099 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2100 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2101 need_subpage = 1; \
2102 } \
2103 } while (0)
2104
33417e70
FB
2105/* register physical memory. 'size' must be a multiple of the target
2106 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2107 io memory page */
5fafdf24 2108void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2109 ram_addr_t size,
2110 ram_addr_t phys_offset)
33417e70 2111{
108c49b8 2112 target_phys_addr_t addr, end_addr;
92e873b9 2113 PhysPageDesc *p;
9d42037b 2114 CPUState *env;
00f82b8a 2115 ram_addr_t orig_size = size;
db7b5426 2116 void *subpage;
33417e70 2117
5fd386f6 2118 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2119 end_addr = start_addr + (target_phys_addr_t)size;
2120 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2121 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2122 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2123 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2124 target_phys_addr_t start_addr2, end_addr2;
2125 int need_subpage = 0;
2126
2127 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2128 need_subpage);
4254fab8 2129 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2130 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2131 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2132 &p->phys_offset, orig_memory);
2133 } else {
2134 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2135 >> IO_MEM_SHIFT];
2136 }
2137 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2138 } else {
2139 p->phys_offset = phys_offset;
2140 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2141 (phys_offset & IO_MEM_ROMD))
2142 phys_offset += TARGET_PAGE_SIZE;
2143 }
2144 } else {
2145 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2146 p->phys_offset = phys_offset;
2147 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2148 (phys_offset & IO_MEM_ROMD))
2149 phys_offset += TARGET_PAGE_SIZE;
2150 else {
2151 target_phys_addr_t start_addr2, end_addr2;
2152 int need_subpage = 0;
2153
2154 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2155 end_addr2, need_subpage);
2156
4254fab8 2157 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2158 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2159 &p->phys_offset, IO_MEM_UNASSIGNED);
2160 subpage_register(subpage, start_addr2, end_addr2,
2161 phys_offset);
2162 }
2163 }
2164 }
33417e70 2165 }
3b46e624 2166
9d42037b
FB
2167 /* since each CPU stores ram addresses in its TLB cache, we must
2168 reset the modified entries */
2169 /* XXX: slow ! */
2170 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2171 tlb_flush(env, 1);
2172 }
33417e70
FB
2173}
2174
ba863458 2175/* XXX: temporary until new memory mapping API */
00f82b8a 2176ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2177{
2178 PhysPageDesc *p;
2179
2180 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2181 if (!p)
2182 return IO_MEM_UNASSIGNED;
2183 return p->phys_offset;
2184}
2185
e9a1ab19 2186/* XXX: better than nothing */
00f82b8a 2187ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2188{
2189 ram_addr_t addr;
7fb4fdcf 2190 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
ed441467
FB
2191 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2192 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2193 abort();
2194 }
2195 addr = phys_ram_alloc_offset;
2196 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2197 return addr;
2198}
2199
2200void qemu_ram_free(ram_addr_t addr)
2201{
2202}
2203
a4193c8a 2204static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2205{
67d3b957 2206#ifdef DEBUG_UNASSIGNED
ab3d1727 2207 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2208#endif
2209#ifdef TARGET_SPARC
6c36d3fa 2210 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2211#elif TARGET_CRIS
2212 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2213#endif
33417e70
FB
2214 return 0;
2215}
2216
a4193c8a 2217static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2218{
67d3b957 2219#ifdef DEBUG_UNASSIGNED
ab3d1727 2220 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2221#endif
b4f0a316 2222#ifdef TARGET_SPARC
6c36d3fa 2223 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2224#elif TARGET_CRIS
2225 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2226#endif
33417e70
FB
2227}
2228
2229static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2230 unassigned_mem_readb,
2231 unassigned_mem_readb,
2232 unassigned_mem_readb,
2233};
2234
2235static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2236 unassigned_mem_writeb,
2237 unassigned_mem_writeb,
2238 unassigned_mem_writeb,
2239};
2240
3a7d929e 2241static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2242{
3a7d929e
FB
2243 unsigned long ram_addr;
2244 int dirty_flags;
2245 ram_addr = addr - (unsigned long)phys_ram_base;
2246 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2247 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2248#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2249 tb_invalidate_phys_page_fast(ram_addr, 1);
2250 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2251#endif
3a7d929e 2252 }
c27004ec 2253 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2254#ifdef USE_KQEMU
2255 if (cpu_single_env->kqemu_enabled &&
2256 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2257 kqemu_modify_page(cpu_single_env, ram_addr);
2258#endif
f23db169
FB
2259 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2260 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2261 /* we remove the notdirty callback only if the code has been
2262 flushed */
2263 if (dirty_flags == 0xff)
6a00d601 2264 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2265}
2266
3a7d929e 2267static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2268{
3a7d929e
FB
2269 unsigned long ram_addr;
2270 int dirty_flags;
2271 ram_addr = addr - (unsigned long)phys_ram_base;
2272 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2273 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2274#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2275 tb_invalidate_phys_page_fast(ram_addr, 2);
2276 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2277#endif
3a7d929e 2278 }
c27004ec 2279 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2280#ifdef USE_KQEMU
2281 if (cpu_single_env->kqemu_enabled &&
2282 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2283 kqemu_modify_page(cpu_single_env, ram_addr);
2284#endif
f23db169
FB
2285 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2286 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2287 /* we remove the notdirty callback only if the code has been
2288 flushed */
2289 if (dirty_flags == 0xff)
6a00d601 2290 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2291}
2292
3a7d929e 2293static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2294{
3a7d929e
FB
2295 unsigned long ram_addr;
2296 int dirty_flags;
2297 ram_addr = addr - (unsigned long)phys_ram_base;
2298 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2299 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2300#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2301 tb_invalidate_phys_page_fast(ram_addr, 4);
2302 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2303#endif
3a7d929e 2304 }
c27004ec 2305 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2306#ifdef USE_KQEMU
2307 if (cpu_single_env->kqemu_enabled &&
2308 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2309 kqemu_modify_page(cpu_single_env, ram_addr);
2310#endif
f23db169
FB
2311 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2312 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2313 /* we remove the notdirty callback only if the code has been
2314 flushed */
2315 if (dirty_flags == 0xff)
6a00d601 2316 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2317}
2318
3a7d929e 2319static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2320 NULL, /* never used */
2321 NULL, /* never used */
2322 NULL, /* never used */
2323};
2324
1ccde1cb
FB
2325static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2326 notdirty_mem_writeb,
2327 notdirty_mem_writew,
2328 notdirty_mem_writel,
2329};
2330
6658ffb8
PB
2331#if defined(CONFIG_SOFTMMU)
2332/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2333 so these check for a hit then pass through to the normal out-of-line
2334 phys routines. */
2335static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2336{
2337 return ldub_phys(addr);
2338}
2339
2340static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2341{
2342 return lduw_phys(addr);
2343}
2344
2345static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2346{
2347 return ldl_phys(addr);
2348}
2349
2350/* Generate a debug exception if a watchpoint has been hit.
2351 Returns the real physical address of the access. addr will be a host
d79acba4 2352 address in case of a RAM location. */
6658ffb8
PB
2353static target_ulong check_watchpoint(target_phys_addr_t addr)
2354{
2355 CPUState *env = cpu_single_env;
2356 target_ulong watch;
2357 target_ulong retaddr;
2358 int i;
2359
2360 retaddr = addr;
2361 for (i = 0; i < env->nb_watchpoints; i++) {
2362 watch = env->watchpoint[i].vaddr;
2363 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2364 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2365 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2366 cpu_single_env->watchpoint_hit = i + 1;
2367 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2368 break;
2369 }
2370 }
2371 }
2372 return retaddr;
2373}
2374
2375static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2376 uint32_t val)
2377{
2378 addr = check_watchpoint(addr);
2379 stb_phys(addr, val);
2380}
2381
2382static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2383 uint32_t val)
2384{
2385 addr = check_watchpoint(addr);
2386 stw_phys(addr, val);
2387}
2388
2389static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2390 uint32_t val)
2391{
2392 addr = check_watchpoint(addr);
2393 stl_phys(addr, val);
2394}
2395
2396static CPUReadMemoryFunc *watch_mem_read[3] = {
2397 watch_mem_readb,
2398 watch_mem_readw,
2399 watch_mem_readl,
2400};
2401
2402static CPUWriteMemoryFunc *watch_mem_write[3] = {
2403 watch_mem_writeb,
2404 watch_mem_writew,
2405 watch_mem_writel,
2406};
2407#endif
2408
db7b5426
BS
2409static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2410 unsigned int len)
2411{
db7b5426
BS
2412 uint32_t ret;
2413 unsigned int idx;
2414
2415 idx = SUBPAGE_IDX(addr - mmio->base);
2416#if defined(DEBUG_SUBPAGE)
2417 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2418 mmio, len, addr, idx);
2419#endif
3ee89922 2420 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2421
2422 return ret;
2423}
2424
2425static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2426 uint32_t value, unsigned int len)
2427{
db7b5426
BS
2428 unsigned int idx;
2429
2430 idx = SUBPAGE_IDX(addr - mmio->base);
2431#if defined(DEBUG_SUBPAGE)
2432 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2433 mmio, len, addr, idx, value);
2434#endif
3ee89922 2435 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2436}
2437
2438static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2439{
2440#if defined(DEBUG_SUBPAGE)
2441 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2442#endif
2443
2444 return subpage_readlen(opaque, addr, 0);
2445}
2446
2447static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2448 uint32_t value)
2449{
2450#if defined(DEBUG_SUBPAGE)
2451 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2452#endif
2453 subpage_writelen(opaque, addr, value, 0);
2454}
2455
2456static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2457{
2458#if defined(DEBUG_SUBPAGE)
2459 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2460#endif
2461
2462 return subpage_readlen(opaque, addr, 1);
2463}
2464
2465static void subpage_writew (void *opaque, target_phys_addr_t addr,
2466 uint32_t value)
2467{
2468#if defined(DEBUG_SUBPAGE)
2469 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2470#endif
2471 subpage_writelen(opaque, addr, value, 1);
2472}
2473
2474static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2475{
2476#if defined(DEBUG_SUBPAGE)
2477 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2478#endif
2479
2480 return subpage_readlen(opaque, addr, 2);
2481}
2482
2483static void subpage_writel (void *opaque,
2484 target_phys_addr_t addr, uint32_t value)
2485{
2486#if defined(DEBUG_SUBPAGE)
2487 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2488#endif
2489 subpage_writelen(opaque, addr, value, 2);
2490}
2491
2492static CPUReadMemoryFunc *subpage_read[] = {
2493 &subpage_readb,
2494 &subpage_readw,
2495 &subpage_readl,
2496};
2497
2498static CPUWriteMemoryFunc *subpage_write[] = {
2499 &subpage_writeb,
2500 &subpage_writew,
2501 &subpage_writel,
2502};
2503
2504static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2505 ram_addr_t memory)
db7b5426
BS
2506{
2507 int idx, eidx;
4254fab8 2508 unsigned int i;
db7b5426
BS
2509
2510 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2511 return -1;
2512 idx = SUBPAGE_IDX(start);
2513 eidx = SUBPAGE_IDX(end);
2514#if defined(DEBUG_SUBPAGE)
2515 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2516 mmio, start, end, idx, eidx, memory);
2517#endif
2518 memory >>= IO_MEM_SHIFT;
2519 for (; idx <= eidx; idx++) {
4254fab8 2520 for (i = 0; i < 4; i++) {
3ee89922
BS
2521 if (io_mem_read[memory][i]) {
2522 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2523 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2524 }
2525 if (io_mem_write[memory][i]) {
2526 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2527 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2528 }
4254fab8 2529 }
db7b5426
BS
2530 }
2531
2532 return 0;
2533}
2534
00f82b8a
AJ
2535static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2536 ram_addr_t orig_memory)
db7b5426
BS
2537{
2538 subpage_t *mmio;
2539 int subpage_memory;
2540
2541 mmio = qemu_mallocz(sizeof(subpage_t));
2542 if (mmio != NULL) {
2543 mmio->base = base;
2544 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2545#if defined(DEBUG_SUBPAGE)
2546 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2547 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2548#endif
2549 *phys = subpage_memory | IO_MEM_SUBPAGE;
2550 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2551 }
2552
2553 return mmio;
2554}
2555
33417e70
FB
2556static void io_mem_init(void)
2557{
3a7d929e 2558 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2559 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2560 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2561 io_mem_nb = 5;
2562
6658ffb8
PB
2563#if defined(CONFIG_SOFTMMU)
2564 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2565 watch_mem_write, NULL);
2566#endif
1ccde1cb 2567 /* alloc dirty bits array */
0a962c02 2568 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2569 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2570}
2571
2572/* mem_read and mem_write are arrays of functions containing the
2573 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2574 2). Functions can be omitted with a NULL function pointer. The
2575 registered functions may be modified dynamically later.
2576 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2577 modified. If it is zero, a new io zone is allocated. The return
2578 value can be used with cpu_register_physical_memory(). (-1) is
2579 returned if error. */
33417e70
FB
2580int cpu_register_io_memory(int io_index,
2581 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2582 CPUWriteMemoryFunc **mem_write,
2583 void *opaque)
33417e70 2584{
4254fab8 2585 int i, subwidth = 0;
33417e70
FB
2586
2587 if (io_index <= 0) {
b5ff1b31 2588 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2589 return -1;
2590 io_index = io_mem_nb++;
2591 } else {
2592 if (io_index >= IO_MEM_NB_ENTRIES)
2593 return -1;
2594 }
b5ff1b31 2595
33417e70 2596 for(i = 0;i < 3; i++) {
4254fab8
BS
2597 if (!mem_read[i] || !mem_write[i])
2598 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2599 io_mem_read[io_index][i] = mem_read[i];
2600 io_mem_write[io_index][i] = mem_write[i];
2601 }
a4193c8a 2602 io_mem_opaque[io_index] = opaque;
4254fab8 2603 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2604}
61382a50 2605
8926b517
FB
2606CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2607{
2608 return io_mem_write[io_index >> IO_MEM_SHIFT];
2609}
2610
2611CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2612{
2613 return io_mem_read[io_index >> IO_MEM_SHIFT];
2614}
2615
13eb76e0
FB
2616/* physical memory access (slow version, mainly for debug) */
2617#if defined(CONFIG_USER_ONLY)
5fafdf24 2618void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2619 int len, int is_write)
2620{
2621 int l, flags;
2622 target_ulong page;
53a5960a 2623 void * p;
13eb76e0
FB
2624
2625 while (len > 0) {
2626 page = addr & TARGET_PAGE_MASK;
2627 l = (page + TARGET_PAGE_SIZE) - addr;
2628 if (l > len)
2629 l = len;
2630 flags = page_get_flags(page);
2631 if (!(flags & PAGE_VALID))
2632 return;
2633 if (is_write) {
2634 if (!(flags & PAGE_WRITE))
2635 return;
579a97f7 2636 /* XXX: this code should not depend on lock_user */
72fb7daa 2637 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2638 /* FIXME - should this return an error rather than just fail? */
2639 return;
72fb7daa
AJ
2640 memcpy(p, buf, l);
2641 unlock_user(p, addr, l);
13eb76e0
FB
2642 } else {
2643 if (!(flags & PAGE_READ))
2644 return;
579a97f7 2645 /* XXX: this code should not depend on lock_user */
72fb7daa 2646 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2647 /* FIXME - should this return an error rather than just fail? */
2648 return;
72fb7daa 2649 memcpy(buf, p, l);
5b257578 2650 unlock_user(p, addr, 0);
13eb76e0
FB
2651 }
2652 len -= l;
2653 buf += l;
2654 addr += l;
2655 }
2656}
8df1cd07 2657
13eb76e0 2658#else
5fafdf24 2659void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2660 int len, int is_write)
2661{
2662 int l, io_index;
2663 uint8_t *ptr;
2664 uint32_t val;
2e12669a
FB
2665 target_phys_addr_t page;
2666 unsigned long pd;
92e873b9 2667 PhysPageDesc *p;
3b46e624 2668
13eb76e0
FB
2669 while (len > 0) {
2670 page = addr & TARGET_PAGE_MASK;
2671 l = (page + TARGET_PAGE_SIZE) - addr;
2672 if (l > len)
2673 l = len;
92e873b9 2674 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2675 if (!p) {
2676 pd = IO_MEM_UNASSIGNED;
2677 } else {
2678 pd = p->phys_offset;
2679 }
3b46e624 2680
13eb76e0 2681 if (is_write) {
3a7d929e 2682 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2683 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2684 /* XXX: could force cpu_single_env to NULL to avoid
2685 potential bugs */
13eb76e0 2686 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2687 /* 32 bit write access */
c27004ec 2688 val = ldl_p(buf);
a4193c8a 2689 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2690 l = 4;
2691 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2692 /* 16 bit write access */
c27004ec 2693 val = lduw_p(buf);
a4193c8a 2694 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2695 l = 2;
2696 } else {
1c213d19 2697 /* 8 bit write access */
c27004ec 2698 val = ldub_p(buf);
a4193c8a 2699 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2700 l = 1;
2701 }
2702 } else {
b448f2f3
FB
2703 unsigned long addr1;
2704 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2705 /* RAM case */
b448f2f3 2706 ptr = phys_ram_base + addr1;
13eb76e0 2707 memcpy(ptr, buf, l);
3a7d929e
FB
2708 if (!cpu_physical_memory_is_dirty(addr1)) {
2709 /* invalidate code */
2710 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2711 /* set dirty bit */
5fafdf24 2712 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2713 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2714 }
13eb76e0
FB
2715 }
2716 } else {
5fafdf24 2717 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2718 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2719 /* I/O case */
2720 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2721 if (l >= 4 && ((addr & 3) == 0)) {
2722 /* 32 bit read access */
a4193c8a 2723 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2724 stl_p(buf, val);
13eb76e0
FB
2725 l = 4;
2726 } else if (l >= 2 && ((addr & 1) == 0)) {
2727 /* 16 bit read access */
a4193c8a 2728 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2729 stw_p(buf, val);
13eb76e0
FB
2730 l = 2;
2731 } else {
1c213d19 2732 /* 8 bit read access */
a4193c8a 2733 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2734 stb_p(buf, val);
13eb76e0
FB
2735 l = 1;
2736 }
2737 } else {
2738 /* RAM case */
5fafdf24 2739 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2740 (addr & ~TARGET_PAGE_MASK);
2741 memcpy(buf, ptr, l);
2742 }
2743 }
2744 len -= l;
2745 buf += l;
2746 addr += l;
2747 }
2748}
8df1cd07 2749
d0ecd2aa 2750/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2751void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2752 const uint8_t *buf, int len)
2753{
2754 int l;
2755 uint8_t *ptr;
2756 target_phys_addr_t page;
2757 unsigned long pd;
2758 PhysPageDesc *p;
3b46e624 2759
d0ecd2aa
FB
2760 while (len > 0) {
2761 page = addr & TARGET_PAGE_MASK;
2762 l = (page + TARGET_PAGE_SIZE) - addr;
2763 if (l > len)
2764 l = len;
2765 p = phys_page_find(page >> TARGET_PAGE_BITS);
2766 if (!p) {
2767 pd = IO_MEM_UNASSIGNED;
2768 } else {
2769 pd = p->phys_offset;
2770 }
3b46e624 2771
d0ecd2aa 2772 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2773 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2774 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2775 /* do nothing */
2776 } else {
2777 unsigned long addr1;
2778 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2779 /* ROM/RAM case */
2780 ptr = phys_ram_base + addr1;
2781 memcpy(ptr, buf, l);
2782 }
2783 len -= l;
2784 buf += l;
2785 addr += l;
2786 }
2787}
2788
2789
8df1cd07
FB
2790/* warning: addr must be aligned */
2791uint32_t ldl_phys(target_phys_addr_t addr)
2792{
2793 int io_index;
2794 uint8_t *ptr;
2795 uint32_t val;
2796 unsigned long pd;
2797 PhysPageDesc *p;
2798
2799 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2800 if (!p) {
2801 pd = IO_MEM_UNASSIGNED;
2802 } else {
2803 pd = p->phys_offset;
2804 }
3b46e624 2805
5fafdf24 2806 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2807 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2808 /* I/O case */
2809 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2810 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2811 } else {
2812 /* RAM case */
5fafdf24 2813 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2814 (addr & ~TARGET_PAGE_MASK);
2815 val = ldl_p(ptr);
2816 }
2817 return val;
2818}
2819
84b7b8e7
FB
2820/* warning: addr must be aligned */
2821uint64_t ldq_phys(target_phys_addr_t addr)
2822{
2823 int io_index;
2824 uint8_t *ptr;
2825 uint64_t val;
2826 unsigned long pd;
2827 PhysPageDesc *p;
2828
2829 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2830 if (!p) {
2831 pd = IO_MEM_UNASSIGNED;
2832 } else {
2833 pd = p->phys_offset;
2834 }
3b46e624 2835
2a4188a3
FB
2836 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2837 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2838 /* I/O case */
2839 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2840#ifdef TARGET_WORDS_BIGENDIAN
2841 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2842 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2843#else
2844 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2845 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2846#endif
2847 } else {
2848 /* RAM case */
5fafdf24 2849 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2850 (addr & ~TARGET_PAGE_MASK);
2851 val = ldq_p(ptr);
2852 }
2853 return val;
2854}
2855
aab33094
FB
2856/* XXX: optimize */
2857uint32_t ldub_phys(target_phys_addr_t addr)
2858{
2859 uint8_t val;
2860 cpu_physical_memory_read(addr, &val, 1);
2861 return val;
2862}
2863
2864/* XXX: optimize */
2865uint32_t lduw_phys(target_phys_addr_t addr)
2866{
2867 uint16_t val;
2868 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2869 return tswap16(val);
2870}
2871
8df1cd07
FB
2872/* warning: addr must be aligned. The ram page is not masked as dirty
2873 and the code inside is not invalidated. It is useful if the dirty
2874 bits are used to track modified PTEs */
2875void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2876{
2877 int io_index;
2878 uint8_t *ptr;
2879 unsigned long pd;
2880 PhysPageDesc *p;
2881
2882 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2883 if (!p) {
2884 pd = IO_MEM_UNASSIGNED;
2885 } else {
2886 pd = p->phys_offset;
2887 }
3b46e624 2888
3a7d929e 2889 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2890 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2891 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2892 } else {
5fafdf24 2893 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2894 (addr & ~TARGET_PAGE_MASK);
2895 stl_p(ptr, val);
2896 }
2897}
2898
bc98a7ef
JM
2899void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2900{
2901 int io_index;
2902 uint8_t *ptr;
2903 unsigned long pd;
2904 PhysPageDesc *p;
2905
2906 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2907 if (!p) {
2908 pd = IO_MEM_UNASSIGNED;
2909 } else {
2910 pd = p->phys_offset;
2911 }
3b46e624 2912
bc98a7ef
JM
2913 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2914 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2915#ifdef TARGET_WORDS_BIGENDIAN
2916 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2917 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2918#else
2919 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2920 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2921#endif
2922 } else {
5fafdf24 2923 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2924 (addr & ~TARGET_PAGE_MASK);
2925 stq_p(ptr, val);
2926 }
2927}
2928
8df1cd07 2929/* warning: addr must be aligned */
8df1cd07
FB
2930void stl_phys(target_phys_addr_t addr, uint32_t val)
2931{
2932 int io_index;
2933 uint8_t *ptr;
2934 unsigned long pd;
2935 PhysPageDesc *p;
2936
2937 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2938 if (!p) {
2939 pd = IO_MEM_UNASSIGNED;
2940 } else {
2941 pd = p->phys_offset;
2942 }
3b46e624 2943
3a7d929e 2944 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2945 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2946 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2947 } else {
2948 unsigned long addr1;
2949 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2950 /* RAM case */
2951 ptr = phys_ram_base + addr1;
2952 stl_p(ptr, val);
3a7d929e
FB
2953 if (!cpu_physical_memory_is_dirty(addr1)) {
2954 /* invalidate code */
2955 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2956 /* set dirty bit */
f23db169
FB
2957 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2958 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2959 }
8df1cd07
FB
2960 }
2961}
2962
aab33094
FB
2963/* XXX: optimize */
2964void stb_phys(target_phys_addr_t addr, uint32_t val)
2965{
2966 uint8_t v = val;
2967 cpu_physical_memory_write(addr, &v, 1);
2968}
2969
2970/* XXX: optimize */
2971void stw_phys(target_phys_addr_t addr, uint32_t val)
2972{
2973 uint16_t v = tswap16(val);
2974 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2975}
2976
2977/* XXX: optimize */
2978void stq_phys(target_phys_addr_t addr, uint64_t val)
2979{
2980 val = tswap64(val);
2981 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2982}
2983
13eb76e0
FB
2984#endif
2985
2986/* virtual memory access for debug */
5fafdf24 2987int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 2988 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2989{
2990 int l;
9b3c35e0
JM
2991 target_phys_addr_t phys_addr;
2992 target_ulong page;
13eb76e0
FB
2993
2994 while (len > 0) {
2995 page = addr & TARGET_PAGE_MASK;
2996 phys_addr = cpu_get_phys_page_debug(env, page);
2997 /* if no physical page mapped, return an error */
2998 if (phys_addr == -1)
2999 return -1;
3000 l = (page + TARGET_PAGE_SIZE) - addr;
3001 if (l > len)
3002 l = len;
5fafdf24 3003 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3004 buf, l, is_write);
13eb76e0
FB
3005 len -= l;
3006 buf += l;
3007 addr += l;
3008 }
3009 return 0;
3010}
3011
e3db7226
FB
3012void dump_exec_info(FILE *f,
3013 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3014{
3015 int i, target_code_size, max_target_code_size;
3016 int direct_jmp_count, direct_jmp2_count, cross_page;
3017 TranslationBlock *tb;
3b46e624 3018
e3db7226
FB
3019 target_code_size = 0;
3020 max_target_code_size = 0;
3021 cross_page = 0;
3022 direct_jmp_count = 0;
3023 direct_jmp2_count = 0;
3024 for(i = 0; i < nb_tbs; i++) {
3025 tb = &tbs[i];
3026 target_code_size += tb->size;
3027 if (tb->size > max_target_code_size)
3028 max_target_code_size = tb->size;
3029 if (tb->page_addr[1] != -1)
3030 cross_page++;
3031 if (tb->tb_next_offset[0] != 0xffff) {
3032 direct_jmp_count++;
3033 if (tb->tb_next_offset[1] != 0xffff) {
3034 direct_jmp2_count++;
3035 }
3036 }
3037 }
3038 /* XXX: avoid using doubles ? */
57fec1fe 3039 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3040 cpu_fprintf(f, "gen code size %ld/%ld\n",
3041 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3042 cpu_fprintf(f, "TB count %d/%d\n",
3043 nb_tbs, code_gen_max_blocks);
5fafdf24 3044 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3045 nb_tbs ? target_code_size / nb_tbs : 0,
3046 max_target_code_size);
5fafdf24 3047 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3048 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3049 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3050 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3051 cross_page,
e3db7226
FB
3052 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3053 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3054 direct_jmp_count,
e3db7226
FB
3055 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3056 direct_jmp2_count,
3057 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3058 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3059 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3060 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3061 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3062 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3063}
3064
5fafdf24 3065#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3066
3067#define MMUSUFFIX _cmmu
3068#define GETPC() NULL
3069#define env cpu_single_env
b769d8fe 3070#define SOFTMMU_CODE_ACCESS
61382a50
FB
3071
3072#define SHIFT 0
3073#include "softmmu_template.h"
3074
3075#define SHIFT 1
3076#include "softmmu_template.h"
3077
3078#define SHIFT 2
3079#include "softmmu_template.h"
3080
3081#define SHIFT 3
3082#include "softmmu_template.h"
3083
3084#undef env
3085
3086#endif