]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Add a local copy of hpet.h.
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
53a5960a
PB
37#if defined(CONFIG_USER_ONLY)
38#include <qemu.h>
39#endif
54936004 40
fd6ce8f6 41//#define DEBUG_TB_INVALIDATE
66e85a21 42//#define DEBUG_FLUSH
9fa3e853 43//#define DEBUG_TLB
67d3b957 44//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
45
46/* make various TB consistency checks */
47//#define DEBUG_TB_CHECK
98857888 48//#define DEBUG_TLB_CHECK
fd6ce8f6 49
1196be37 50//#define DEBUG_IOPORT
db7b5426 51//#define DEBUG_SUBPAGE
1196be37 52
99773bd4
PB
53#if !defined(CONFIG_USER_ONLY)
54/* TB consistency checks only implemented for usermode emulation. */
55#undef DEBUG_TB_CHECK
56#endif
57
fd6ce8f6
FB
58/* threshold to flush the translated code buffer */
59#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
60
9fa3e853
FB
61#define SMC_BITMAP_USE_THRESHOLD 10
62
63#define MMAP_AREA_START 0x00000000
64#define MMAP_AREA_END 0xa8000000
fd6ce8f6 65
108c49b8
FB
66#if defined(TARGET_SPARC64)
67#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
68#elif defined(TARGET_SPARC)
69#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
70#elif defined(TARGET_ALPHA)
71#define TARGET_PHYS_ADDR_SPACE_BITS 42
72#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
73#elif defined(TARGET_PPC64)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#else
76/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77#define TARGET_PHYS_ADDR_SPACE_BITS 32
78#endif
79
fd6ce8f6 80TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 81TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 82int nb_tbs;
eb51d102
FB
83/* any access to the tbs or the page table must use this lock */
84spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 85
b8076a74 86uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
87uint8_t *code_gen_ptr;
88
9fa3e853
FB
89int phys_ram_size;
90int phys_ram_fd;
91uint8_t *phys_ram_base;
1ccde1cb 92uint8_t *phys_ram_dirty;
e9a1ab19 93static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 94
6a00d601
FB
95CPUState *first_cpu;
96/* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
98CPUState *cpu_single_env;
99
54936004 100typedef struct PageDesc {
92e873b9 101 /* list of TBs intersecting this ram page */
fd6ce8f6 102 TranslationBlock *first_tb;
9fa3e853
FB
103 /* in order to optimize self modifying code, we count the number
104 of lookups we do to a given page to use a bitmap */
105 unsigned int code_write_count;
106 uint8_t *code_bitmap;
107#if defined(CONFIG_USER_ONLY)
108 unsigned long flags;
109#endif
54936004
FB
110} PageDesc;
111
92e873b9
FB
112typedef struct PhysPageDesc {
113 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 114 uint32_t phys_offset;
92e873b9
FB
115} PhysPageDesc;
116
54936004 117#define L2_BITS 10
bedb69ea
JM
118#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119/* XXX: this is a temporary hack for alpha target.
120 * In the future, this is to be replaced by a multi-level table
121 * to actually be able to handle the complete 64 bits address space.
122 */
123#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
124#else
54936004 125#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 126#endif
54936004
FB
127
128#define L1_SIZE (1 << L1_BITS)
129#define L2_SIZE (1 << L2_BITS)
130
33417e70 131static void io_mem_init(void);
fd6ce8f6 132
83fb7adf
FB
133unsigned long qemu_real_host_page_size;
134unsigned long qemu_host_page_bits;
135unsigned long qemu_host_page_size;
136unsigned long qemu_host_page_mask;
54936004 137
92e873b9 138/* XXX: for system emulation, it could just be an array */
54936004 139static PageDesc *l1_map[L1_SIZE];
0a962c02 140PhysPageDesc **l1_phys_map;
54936004 141
33417e70 142/* io memory support */
33417e70
FB
143CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
144CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 145void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 146static int io_mem_nb;
6658ffb8
PB
147#if defined(CONFIG_SOFTMMU)
148static int io_mem_watch;
149#endif
33417e70 150
34865134
FB
151/* log support */
152char *logfilename = "/tmp/qemu.log";
153FILE *logfile;
154int loglevel;
e735b91c 155static int log_append = 0;
34865134 156
e3db7226
FB
157/* statistics */
158static int tlb_flush_count;
159static int tb_flush_count;
160static int tb_phys_invalidate_count;
161
db7b5426
BS
162#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
163typedef struct subpage_t {
164 target_phys_addr_t base;
165 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
166 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
167 void *opaque[TARGET_PAGE_SIZE];
168} subpage_t;
169
b346ff46 170static void page_init(void)
54936004 171{
83fb7adf 172 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 173 TARGET_PAGE_SIZE */
67b915a5 174#ifdef _WIN32
d5a8f07c
FB
175 {
176 SYSTEM_INFO system_info;
177 DWORD old_protect;
178
179 GetSystemInfo(&system_info);
180 qemu_real_host_page_size = system_info.dwPageSize;
181
182 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
183 PAGE_EXECUTE_READWRITE, &old_protect);
184 }
67b915a5 185#else
83fb7adf 186 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
187 {
188 unsigned long start, end;
189
190 start = (unsigned long)code_gen_buffer;
191 start &= ~(qemu_real_host_page_size - 1);
192
193 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
194 end += qemu_real_host_page_size - 1;
195 end &= ~(qemu_real_host_page_size - 1);
196
197 mprotect((void *)start, end - start,
198 PROT_READ | PROT_WRITE | PROT_EXEC);
199 }
67b915a5 200#endif
d5a8f07c 201
83fb7adf
FB
202 if (qemu_host_page_size == 0)
203 qemu_host_page_size = qemu_real_host_page_size;
204 if (qemu_host_page_size < TARGET_PAGE_SIZE)
205 qemu_host_page_size = TARGET_PAGE_SIZE;
206 qemu_host_page_bits = 0;
207 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
208 qemu_host_page_bits++;
209 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
210 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
211 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
212}
213
fd6ce8f6 214static inline PageDesc *page_find_alloc(unsigned int index)
54936004 215{
54936004
FB
216 PageDesc **lp, *p;
217
54936004
FB
218 lp = &l1_map[index >> L2_BITS];
219 p = *lp;
220 if (!p) {
221 /* allocate if not found */
59817ccb 222 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 223 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
224 *lp = p;
225 }
226 return p + (index & (L2_SIZE - 1));
227}
228
fd6ce8f6 229static inline PageDesc *page_find(unsigned int index)
54936004 230{
54936004
FB
231 PageDesc *p;
232
54936004
FB
233 p = l1_map[index >> L2_BITS];
234 if (!p)
235 return 0;
fd6ce8f6
FB
236 return p + (index & (L2_SIZE - 1));
237}
238
108c49b8 239static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 240{
108c49b8 241 void **lp, **p;
e3f4e2a4 242 PhysPageDesc *pd;
92e873b9 243
108c49b8
FB
244 p = (void **)l1_phys_map;
245#if TARGET_PHYS_ADDR_SPACE_BITS > 32
246
247#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
248#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
249#endif
250 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
251 p = *lp;
252 if (!p) {
253 /* allocate if not found */
108c49b8
FB
254 if (!alloc)
255 return NULL;
256 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
257 memset(p, 0, sizeof(void *) * L1_SIZE);
258 *lp = p;
259 }
260#endif
261 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
262 pd = *lp;
263 if (!pd) {
264 int i;
108c49b8
FB
265 /* allocate if not found */
266 if (!alloc)
267 return NULL;
e3f4e2a4
PB
268 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
269 *lp = pd;
270 for (i = 0; i < L2_SIZE; i++)
271 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 272 }
e3f4e2a4 273 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
274}
275
108c49b8 276static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 277{
108c49b8 278 return phys_page_find_alloc(index, 0);
92e873b9
FB
279}
280
9fa3e853 281#if !defined(CONFIG_USER_ONLY)
6a00d601 282static void tlb_protect_code(ram_addr_t ram_addr);
3a7d929e
FB
283static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
284 target_ulong vaddr);
9fa3e853 285#endif
fd6ce8f6 286
6a00d601 287void cpu_exec_init(CPUState *env)
fd6ce8f6 288{
6a00d601
FB
289 CPUState **penv;
290 int cpu_index;
291
fd6ce8f6
FB
292 if (!code_gen_ptr) {
293 code_gen_ptr = code_gen_buffer;
b346ff46 294 page_init();
33417e70 295 io_mem_init();
fd6ce8f6 296 }
6a00d601
FB
297 env->next_cpu = NULL;
298 penv = &first_cpu;
299 cpu_index = 0;
300 while (*penv != NULL) {
301 penv = (CPUState **)&(*penv)->next_cpu;
302 cpu_index++;
303 }
304 env->cpu_index = cpu_index;
6658ffb8 305 env->nb_watchpoints = 0;
6a00d601 306 *penv = env;
fd6ce8f6
FB
307}
308
9fa3e853
FB
309static inline void invalidate_page_bitmap(PageDesc *p)
310{
311 if (p->code_bitmap) {
59817ccb 312 qemu_free(p->code_bitmap);
9fa3e853
FB
313 p->code_bitmap = NULL;
314 }
315 p->code_write_count = 0;
316}
317
fd6ce8f6
FB
318/* set to NULL all the 'first_tb' fields in all PageDescs */
319static void page_flush_tb(void)
320{
321 int i, j;
322 PageDesc *p;
323
324 for(i = 0; i < L1_SIZE; i++) {
325 p = l1_map[i];
326 if (p) {
9fa3e853
FB
327 for(j = 0; j < L2_SIZE; j++) {
328 p->first_tb = NULL;
329 invalidate_page_bitmap(p);
330 p++;
331 }
fd6ce8f6
FB
332 }
333 }
334}
335
336/* flush all the translation blocks */
d4e8164f 337/* XXX: tb_flush is currently not thread safe */
6a00d601 338void tb_flush(CPUState *env1)
fd6ce8f6 339{
6a00d601 340 CPUState *env;
0124311e 341#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
342 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
343 code_gen_ptr - code_gen_buffer,
344 nb_tbs,
0124311e 345 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
346#endif
347 nb_tbs = 0;
6a00d601
FB
348
349 for(env = first_cpu; env != NULL; env = env->next_cpu) {
350 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
351 }
9fa3e853 352
8a8a608f 353 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 354 page_flush_tb();
9fa3e853 355
fd6ce8f6 356 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
357 /* XXX: flush processor icache at this point if cache flush is
358 expensive */
e3db7226 359 tb_flush_count++;
fd6ce8f6
FB
360}
361
362#ifdef DEBUG_TB_CHECK
363
bc98a7ef 364static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
365{
366 TranslationBlock *tb;
367 int i;
368 address &= TARGET_PAGE_MASK;
99773bd4
PB
369 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
370 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
371 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
372 address >= tb->pc + tb->size)) {
373 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 374 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
375 }
376 }
377 }
378}
379
380/* verify that all the pages have correct rights for code */
381static void tb_page_check(void)
382{
383 TranslationBlock *tb;
384 int i, flags1, flags2;
385
99773bd4
PB
386 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
387 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
388 flags1 = page_get_flags(tb->pc);
389 flags2 = page_get_flags(tb->pc + tb->size - 1);
390 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
391 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 392 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
393 }
394 }
395 }
396}
397
d4e8164f
FB
398void tb_jmp_check(TranslationBlock *tb)
399{
400 TranslationBlock *tb1;
401 unsigned int n1;
402
403 /* suppress any remaining jumps to this TB */
404 tb1 = tb->jmp_first;
405 for(;;) {
406 n1 = (long)tb1 & 3;
407 tb1 = (TranslationBlock *)((long)tb1 & ~3);
408 if (n1 == 2)
409 break;
410 tb1 = tb1->jmp_next[n1];
411 }
412 /* check end of list */
413 if (tb1 != tb) {
414 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
415 }
416}
417
fd6ce8f6
FB
418#endif
419
420/* invalidate one TB */
421static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
422 int next_offset)
423{
424 TranslationBlock *tb1;
425 for(;;) {
426 tb1 = *ptb;
427 if (tb1 == tb) {
428 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
429 break;
430 }
431 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
432 }
433}
434
9fa3e853
FB
435static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
436{
437 TranslationBlock *tb1;
438 unsigned int n1;
439
440 for(;;) {
441 tb1 = *ptb;
442 n1 = (long)tb1 & 3;
443 tb1 = (TranslationBlock *)((long)tb1 & ~3);
444 if (tb1 == tb) {
445 *ptb = tb1->page_next[n1];
446 break;
447 }
448 ptb = &tb1->page_next[n1];
449 }
450}
451
d4e8164f
FB
452static inline void tb_jmp_remove(TranslationBlock *tb, int n)
453{
454 TranslationBlock *tb1, **ptb;
455 unsigned int n1;
456
457 ptb = &tb->jmp_next[n];
458 tb1 = *ptb;
459 if (tb1) {
460 /* find tb(n) in circular list */
461 for(;;) {
462 tb1 = *ptb;
463 n1 = (long)tb1 & 3;
464 tb1 = (TranslationBlock *)((long)tb1 & ~3);
465 if (n1 == n && tb1 == tb)
466 break;
467 if (n1 == 2) {
468 ptb = &tb1->jmp_first;
469 } else {
470 ptb = &tb1->jmp_next[n1];
471 }
472 }
473 /* now we can suppress tb(n) from the list */
474 *ptb = tb->jmp_next[n];
475
476 tb->jmp_next[n] = NULL;
477 }
478}
479
480/* reset the jump entry 'n' of a TB so that it is not chained to
481 another TB */
482static inline void tb_reset_jump(TranslationBlock *tb, int n)
483{
484 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
485}
486
8a40a180 487static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 488{
6a00d601 489 CPUState *env;
8a40a180 490 PageDesc *p;
d4e8164f 491 unsigned int h, n1;
8a40a180
FB
492 target_ulong phys_pc;
493 TranslationBlock *tb1, *tb2;
d4e8164f 494
8a40a180
FB
495 /* remove the TB from the hash list */
496 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
497 h = tb_phys_hash_func(phys_pc);
498 tb_remove(&tb_phys_hash[h], tb,
499 offsetof(TranslationBlock, phys_hash_next));
500
501 /* remove the TB from the page list */
502 if (tb->page_addr[0] != page_addr) {
503 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
504 tb_page_remove(&p->first_tb, tb);
505 invalidate_page_bitmap(p);
506 }
507 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
508 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
509 tb_page_remove(&p->first_tb, tb);
510 invalidate_page_bitmap(p);
511 }
512
36bdbe54 513 tb_invalidated_flag = 1;
59817ccb 514
fd6ce8f6 515 /* remove the TB from the hash list */
8a40a180 516 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
517 for(env = first_cpu; env != NULL; env = env->next_cpu) {
518 if (env->tb_jmp_cache[h] == tb)
519 env->tb_jmp_cache[h] = NULL;
520 }
d4e8164f
FB
521
522 /* suppress this TB from the two jump lists */
523 tb_jmp_remove(tb, 0);
524 tb_jmp_remove(tb, 1);
525
526 /* suppress any remaining jumps to this TB */
527 tb1 = tb->jmp_first;
528 for(;;) {
529 n1 = (long)tb1 & 3;
530 if (n1 == 2)
531 break;
532 tb1 = (TranslationBlock *)((long)tb1 & ~3);
533 tb2 = tb1->jmp_next[n1];
534 tb_reset_jump(tb1, n1);
535 tb1->jmp_next[n1] = NULL;
536 tb1 = tb2;
537 }
538 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 539
e3db7226 540 tb_phys_invalidate_count++;
9fa3e853
FB
541}
542
543static inline void set_bits(uint8_t *tab, int start, int len)
544{
545 int end, mask, end1;
546
547 end = start + len;
548 tab += start >> 3;
549 mask = 0xff << (start & 7);
550 if ((start & ~7) == (end & ~7)) {
551 if (start < end) {
552 mask &= ~(0xff << (end & 7));
553 *tab |= mask;
554 }
555 } else {
556 *tab++ |= mask;
557 start = (start + 8) & ~7;
558 end1 = end & ~7;
559 while (start < end1) {
560 *tab++ = 0xff;
561 start += 8;
562 }
563 if (start < end) {
564 mask = ~(0xff << (end & 7));
565 *tab |= mask;
566 }
567 }
568}
569
570static void build_page_bitmap(PageDesc *p)
571{
572 int n, tb_start, tb_end;
573 TranslationBlock *tb;
574
59817ccb 575 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
576 if (!p->code_bitmap)
577 return;
578 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
579
580 tb = p->first_tb;
581 while (tb != NULL) {
582 n = (long)tb & 3;
583 tb = (TranslationBlock *)((long)tb & ~3);
584 /* NOTE: this is subtle as a TB may span two physical pages */
585 if (n == 0) {
586 /* NOTE: tb_end may be after the end of the page, but
587 it is not a problem */
588 tb_start = tb->pc & ~TARGET_PAGE_MASK;
589 tb_end = tb_start + tb->size;
590 if (tb_end > TARGET_PAGE_SIZE)
591 tb_end = TARGET_PAGE_SIZE;
592 } else {
593 tb_start = 0;
594 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
595 }
596 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
597 tb = tb->page_next[n];
598 }
599}
600
d720b93d
FB
601#ifdef TARGET_HAS_PRECISE_SMC
602
603static void tb_gen_code(CPUState *env,
604 target_ulong pc, target_ulong cs_base, int flags,
605 int cflags)
606{
607 TranslationBlock *tb;
608 uint8_t *tc_ptr;
609 target_ulong phys_pc, phys_page2, virt_page2;
610 int code_gen_size;
611
c27004ec
FB
612 phys_pc = get_phys_addr_code(env, pc);
613 tb = tb_alloc(pc);
d720b93d
FB
614 if (!tb) {
615 /* flush must be done */
616 tb_flush(env);
617 /* cannot fail at this point */
c27004ec 618 tb = tb_alloc(pc);
d720b93d
FB
619 }
620 tc_ptr = code_gen_ptr;
621 tb->tc_ptr = tc_ptr;
622 tb->cs_base = cs_base;
623 tb->flags = flags;
624 tb->cflags = cflags;
625 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
626 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
627
628 /* check next page if needed */
c27004ec 629 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 630 phys_page2 = -1;
c27004ec 631 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
632 phys_page2 = get_phys_addr_code(env, virt_page2);
633 }
634 tb_link_phys(tb, phys_pc, phys_page2);
635}
636#endif
637
9fa3e853
FB
638/* invalidate all TBs which intersect with the target physical page
639 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
640 the same physical page. 'is_cpu_write_access' should be true if called
641 from a real cpu write access: the virtual CPU will exit the current
642 TB if code is modified inside this TB. */
643void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
644 int is_cpu_write_access)
645{
646 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 647 CPUState *env = cpu_single_env;
9fa3e853 648 PageDesc *p;
ea1c1802 649 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 650 target_ulong tb_start, tb_end;
d720b93d 651 target_ulong current_pc, current_cs_base;
9fa3e853
FB
652
653 p = page_find(start >> TARGET_PAGE_BITS);
654 if (!p)
655 return;
656 if (!p->code_bitmap &&
d720b93d
FB
657 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
658 is_cpu_write_access) {
9fa3e853
FB
659 /* build code bitmap */
660 build_page_bitmap(p);
661 }
662
663 /* we remove all the TBs in the range [start, end[ */
664 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
665 current_tb_not_found = is_cpu_write_access;
666 current_tb_modified = 0;
667 current_tb = NULL; /* avoid warning */
668 current_pc = 0; /* avoid warning */
669 current_cs_base = 0; /* avoid warning */
670 current_flags = 0; /* avoid warning */
9fa3e853
FB
671 tb = p->first_tb;
672 while (tb != NULL) {
673 n = (long)tb & 3;
674 tb = (TranslationBlock *)((long)tb & ~3);
675 tb_next = tb->page_next[n];
676 /* NOTE: this is subtle as a TB may span two physical pages */
677 if (n == 0) {
678 /* NOTE: tb_end may be after the end of the page, but
679 it is not a problem */
680 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
681 tb_end = tb_start + tb->size;
682 } else {
683 tb_start = tb->page_addr[1];
684 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
685 }
686 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
687#ifdef TARGET_HAS_PRECISE_SMC
688 if (current_tb_not_found) {
689 current_tb_not_found = 0;
690 current_tb = NULL;
691 if (env->mem_write_pc) {
692 /* now we have a real cpu fault */
693 current_tb = tb_find_pc(env->mem_write_pc);
694 }
695 }
696 if (current_tb == tb &&
697 !(current_tb->cflags & CF_SINGLE_INSN)) {
698 /* If we are modifying the current TB, we must stop
699 its execution. We could be more precise by checking
700 that the modification is after the current PC, but it
701 would require a specialized function to partially
702 restore the CPU state */
703
704 current_tb_modified = 1;
705 cpu_restore_state(current_tb, env,
706 env->mem_write_pc, NULL);
707#if defined(TARGET_I386)
708 current_flags = env->hflags;
709 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
710 current_cs_base = (target_ulong)env->segs[R_CS].base;
711 current_pc = current_cs_base + env->eip;
712#else
713#error unsupported CPU
714#endif
715 }
716#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
717 /* we need to do that to handle the case where a signal
718 occurs while doing tb_phys_invalidate() */
719 saved_tb = NULL;
720 if (env) {
721 saved_tb = env->current_tb;
722 env->current_tb = NULL;
723 }
9fa3e853 724 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
725 if (env) {
726 env->current_tb = saved_tb;
727 if (env->interrupt_request && env->current_tb)
728 cpu_interrupt(env, env->interrupt_request);
729 }
9fa3e853
FB
730 }
731 tb = tb_next;
732 }
733#if !defined(CONFIG_USER_ONLY)
734 /* if no code remaining, no need to continue to use slow writes */
735 if (!p->first_tb) {
736 invalidate_page_bitmap(p);
d720b93d
FB
737 if (is_cpu_write_access) {
738 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
739 }
740 }
741#endif
742#ifdef TARGET_HAS_PRECISE_SMC
743 if (current_tb_modified) {
744 /* we generate a block containing just the instruction
745 modifying the memory. It will ensure that it cannot modify
746 itself */
ea1c1802 747 env->current_tb = NULL;
d720b93d
FB
748 tb_gen_code(env, current_pc, current_cs_base, current_flags,
749 CF_SINGLE_INSN);
750 cpu_resume_from_signal(env, NULL);
9fa3e853 751 }
fd6ce8f6 752#endif
9fa3e853 753}
fd6ce8f6 754
9fa3e853 755/* len must be <= 8 and start must be a multiple of len */
d720b93d 756static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
757{
758 PageDesc *p;
759 int offset, b;
59817ccb 760#if 0
a4193c8a
FB
761 if (1) {
762 if (loglevel) {
763 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
764 cpu_single_env->mem_write_vaddr, len,
765 cpu_single_env->eip,
766 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
767 }
59817ccb
FB
768 }
769#endif
9fa3e853
FB
770 p = page_find(start >> TARGET_PAGE_BITS);
771 if (!p)
772 return;
773 if (p->code_bitmap) {
774 offset = start & ~TARGET_PAGE_MASK;
775 b = p->code_bitmap[offset >> 3] >> (offset & 7);
776 if (b & ((1 << len) - 1))
777 goto do_invalidate;
778 } else {
779 do_invalidate:
d720b93d 780 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
781 }
782}
783
9fa3e853 784#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
785static void tb_invalidate_phys_page(target_ulong addr,
786 unsigned long pc, void *puc)
9fa3e853 787{
d720b93d
FB
788 int n, current_flags, current_tb_modified;
789 target_ulong current_pc, current_cs_base;
9fa3e853 790 PageDesc *p;
d720b93d
FB
791 TranslationBlock *tb, *current_tb;
792#ifdef TARGET_HAS_PRECISE_SMC
793 CPUState *env = cpu_single_env;
794#endif
9fa3e853
FB
795
796 addr &= TARGET_PAGE_MASK;
797 p = page_find(addr >> TARGET_PAGE_BITS);
798 if (!p)
799 return;
800 tb = p->first_tb;
d720b93d
FB
801 current_tb_modified = 0;
802 current_tb = NULL;
803 current_pc = 0; /* avoid warning */
804 current_cs_base = 0; /* avoid warning */
805 current_flags = 0; /* avoid warning */
806#ifdef TARGET_HAS_PRECISE_SMC
807 if (tb && pc != 0) {
808 current_tb = tb_find_pc(pc);
809 }
810#endif
9fa3e853
FB
811 while (tb != NULL) {
812 n = (long)tb & 3;
813 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
814#ifdef TARGET_HAS_PRECISE_SMC
815 if (current_tb == tb &&
816 !(current_tb->cflags & CF_SINGLE_INSN)) {
817 /* If we are modifying the current TB, we must stop
818 its execution. We could be more precise by checking
819 that the modification is after the current PC, but it
820 would require a specialized function to partially
821 restore the CPU state */
822
823 current_tb_modified = 1;
824 cpu_restore_state(current_tb, env, pc, puc);
825#if defined(TARGET_I386)
826 current_flags = env->hflags;
827 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
828 current_cs_base = (target_ulong)env->segs[R_CS].base;
829 current_pc = current_cs_base + env->eip;
830#else
831#error unsupported CPU
832#endif
833 }
834#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
835 tb_phys_invalidate(tb, addr);
836 tb = tb->page_next[n];
837 }
fd6ce8f6 838 p->first_tb = NULL;
d720b93d
FB
839#ifdef TARGET_HAS_PRECISE_SMC
840 if (current_tb_modified) {
841 /* we generate a block containing just the instruction
842 modifying the memory. It will ensure that it cannot modify
843 itself */
ea1c1802 844 env->current_tb = NULL;
d720b93d
FB
845 tb_gen_code(env, current_pc, current_cs_base, current_flags,
846 CF_SINGLE_INSN);
847 cpu_resume_from_signal(env, puc);
848 }
849#endif
fd6ce8f6 850}
9fa3e853 851#endif
fd6ce8f6
FB
852
853/* add the tb in the target page and protect it if necessary */
9fa3e853 854static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 855 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
856{
857 PageDesc *p;
9fa3e853
FB
858 TranslationBlock *last_first_tb;
859
860 tb->page_addr[n] = page_addr;
3a7d929e 861 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
862 tb->page_next[n] = p->first_tb;
863 last_first_tb = p->first_tb;
864 p->first_tb = (TranslationBlock *)((long)tb | n);
865 invalidate_page_bitmap(p);
fd6ce8f6 866
107db443 867#if defined(TARGET_HAS_SMC) || 1
d720b93d 868
9fa3e853 869#if defined(CONFIG_USER_ONLY)
fd6ce8f6 870 if (p->flags & PAGE_WRITE) {
53a5960a
PB
871 target_ulong addr;
872 PageDesc *p2;
9fa3e853
FB
873 int prot;
874
fd6ce8f6
FB
875 /* force the host page as non writable (writes will have a
876 page fault + mprotect overhead) */
53a5960a 877 page_addr &= qemu_host_page_mask;
fd6ce8f6 878 prot = 0;
53a5960a
PB
879 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
880 addr += TARGET_PAGE_SIZE) {
881
882 p2 = page_find (addr >> TARGET_PAGE_BITS);
883 if (!p2)
884 continue;
885 prot |= p2->flags;
886 p2->flags &= ~PAGE_WRITE;
887 page_get_flags(addr);
888 }
889 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
890 (prot & PAGE_BITS) & ~PAGE_WRITE);
891#ifdef DEBUG_TB_INVALIDATE
892 printf("protecting code page: 0x%08lx\n",
53a5960a 893 page_addr);
fd6ce8f6 894#endif
fd6ce8f6 895 }
9fa3e853
FB
896#else
897 /* if some code is already present, then the pages are already
898 protected. So we handle the case where only the first TB is
899 allocated in a physical page */
900 if (!last_first_tb) {
6a00d601 901 tlb_protect_code(page_addr);
9fa3e853
FB
902 }
903#endif
d720b93d
FB
904
905#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
906}
907
908/* Allocate a new translation block. Flush the translation buffer if
909 too many translation blocks or too much generated code. */
c27004ec 910TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
911{
912 TranslationBlock *tb;
fd6ce8f6
FB
913
914 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
915 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 916 return NULL;
fd6ce8f6
FB
917 tb = &tbs[nb_tbs++];
918 tb->pc = pc;
b448f2f3 919 tb->cflags = 0;
d4e8164f
FB
920 return tb;
921}
922
9fa3e853
FB
923/* add a new TB and link it to the physical page tables. phys_page2 is
924 (-1) to indicate that only one page contains the TB. */
925void tb_link_phys(TranslationBlock *tb,
926 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 927{
9fa3e853
FB
928 unsigned int h;
929 TranslationBlock **ptb;
930
931 /* add in the physical hash table */
932 h = tb_phys_hash_func(phys_pc);
933 ptb = &tb_phys_hash[h];
934 tb->phys_hash_next = *ptb;
935 *ptb = tb;
fd6ce8f6
FB
936
937 /* add in the page list */
9fa3e853
FB
938 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
939 if (phys_page2 != -1)
940 tb_alloc_page(tb, 1, phys_page2);
941 else
942 tb->page_addr[1] = -1;
9fa3e853 943
d4e8164f
FB
944 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
945 tb->jmp_next[0] = NULL;
946 tb->jmp_next[1] = NULL;
b448f2f3
FB
947#ifdef USE_CODE_COPY
948 tb->cflags &= ~CF_FP_USED;
949 if (tb->cflags & CF_TB_FP_USED)
950 tb->cflags |= CF_FP_USED;
951#endif
d4e8164f
FB
952
953 /* init original jump addresses */
954 if (tb->tb_next_offset[0] != 0xffff)
955 tb_reset_jump(tb, 0);
956 if (tb->tb_next_offset[1] != 0xffff)
957 tb_reset_jump(tb, 1);
8a40a180
FB
958
959#ifdef DEBUG_TB_CHECK
960 tb_page_check();
961#endif
fd6ce8f6
FB
962}
963
9fa3e853
FB
964/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
965 tb[1].tc_ptr. Return NULL if not found */
966TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 967{
9fa3e853
FB
968 int m_min, m_max, m;
969 unsigned long v;
970 TranslationBlock *tb;
a513fe19
FB
971
972 if (nb_tbs <= 0)
973 return NULL;
974 if (tc_ptr < (unsigned long)code_gen_buffer ||
975 tc_ptr >= (unsigned long)code_gen_ptr)
976 return NULL;
977 /* binary search (cf Knuth) */
978 m_min = 0;
979 m_max = nb_tbs - 1;
980 while (m_min <= m_max) {
981 m = (m_min + m_max) >> 1;
982 tb = &tbs[m];
983 v = (unsigned long)tb->tc_ptr;
984 if (v == tc_ptr)
985 return tb;
986 else if (tc_ptr < v) {
987 m_max = m - 1;
988 } else {
989 m_min = m + 1;
990 }
991 }
992 return &tbs[m_max];
993}
7501267e 994
ea041c0e
FB
995static void tb_reset_jump_recursive(TranslationBlock *tb);
996
997static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
998{
999 TranslationBlock *tb1, *tb_next, **ptb;
1000 unsigned int n1;
1001
1002 tb1 = tb->jmp_next[n];
1003 if (tb1 != NULL) {
1004 /* find head of list */
1005 for(;;) {
1006 n1 = (long)tb1 & 3;
1007 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1008 if (n1 == 2)
1009 break;
1010 tb1 = tb1->jmp_next[n1];
1011 }
1012 /* we are now sure now that tb jumps to tb1 */
1013 tb_next = tb1;
1014
1015 /* remove tb from the jmp_first list */
1016 ptb = &tb_next->jmp_first;
1017 for(;;) {
1018 tb1 = *ptb;
1019 n1 = (long)tb1 & 3;
1020 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1021 if (n1 == n && tb1 == tb)
1022 break;
1023 ptb = &tb1->jmp_next[n1];
1024 }
1025 *ptb = tb->jmp_next[n];
1026 tb->jmp_next[n] = NULL;
1027
1028 /* suppress the jump to next tb in generated code */
1029 tb_reset_jump(tb, n);
1030
0124311e 1031 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1032 tb_reset_jump_recursive(tb_next);
1033 }
1034}
1035
1036static void tb_reset_jump_recursive(TranslationBlock *tb)
1037{
1038 tb_reset_jump_recursive2(tb, 0);
1039 tb_reset_jump_recursive2(tb, 1);
1040}
1041
1fddef4b 1042#if defined(TARGET_HAS_ICE)
d720b93d
FB
1043static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1044{
9b3c35e0
JM
1045 target_phys_addr_t addr;
1046 target_ulong pd;
c2f07f81
PB
1047 ram_addr_t ram_addr;
1048 PhysPageDesc *p;
d720b93d 1049
c2f07f81
PB
1050 addr = cpu_get_phys_page_debug(env, pc);
1051 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1052 if (!p) {
1053 pd = IO_MEM_UNASSIGNED;
1054 } else {
1055 pd = p->phys_offset;
1056 }
1057 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1058 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1059}
c27004ec 1060#endif
d720b93d 1061
6658ffb8
PB
1062/* Add a watchpoint. */
1063int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1064{
1065 int i;
1066
1067 for (i = 0; i < env->nb_watchpoints; i++) {
1068 if (addr == env->watchpoint[i].vaddr)
1069 return 0;
1070 }
1071 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1072 return -1;
1073
1074 i = env->nb_watchpoints++;
1075 env->watchpoint[i].vaddr = addr;
1076 tlb_flush_page(env, addr);
1077 /* FIXME: This flush is needed because of the hack to make memory ops
1078 terminate the TB. It can be removed once the proper IO trap and
1079 re-execute bits are in. */
1080 tb_flush(env);
1081 return i;
1082}
1083
1084/* Remove a watchpoint. */
1085int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1086{
1087 int i;
1088
1089 for (i = 0; i < env->nb_watchpoints; i++) {
1090 if (addr == env->watchpoint[i].vaddr) {
1091 env->nb_watchpoints--;
1092 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1093 tlb_flush_page(env, addr);
1094 return 0;
1095 }
1096 }
1097 return -1;
1098}
1099
c33a346e
FB
1100/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1101 breakpoint is reached */
2e12669a 1102int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1103{
1fddef4b 1104#if defined(TARGET_HAS_ICE)
4c3a88a2 1105 int i;
d720b93d 1106
4c3a88a2
FB
1107 for(i = 0; i < env->nb_breakpoints; i++) {
1108 if (env->breakpoints[i] == pc)
1109 return 0;
1110 }
1111
1112 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1113 return -1;
1114 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1115
1116 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1117 return 0;
1118#else
1119 return -1;
1120#endif
1121}
1122
1123/* remove a breakpoint */
2e12669a 1124int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1125{
1fddef4b 1126#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1127 int i;
1128 for(i = 0; i < env->nb_breakpoints; i++) {
1129 if (env->breakpoints[i] == pc)
1130 goto found;
1131 }
1132 return -1;
1133 found:
4c3a88a2 1134 env->nb_breakpoints--;
1fddef4b
FB
1135 if (i < env->nb_breakpoints)
1136 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1137
1138 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1139 return 0;
1140#else
1141 return -1;
1142#endif
1143}
1144
c33a346e
FB
1145/* enable or disable single step mode. EXCP_DEBUG is returned by the
1146 CPU loop after each instruction */
1147void cpu_single_step(CPUState *env, int enabled)
1148{
1fddef4b 1149#if defined(TARGET_HAS_ICE)
c33a346e
FB
1150 if (env->singlestep_enabled != enabled) {
1151 env->singlestep_enabled = enabled;
1152 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1153 /* XXX: only flush what is necessary */
0124311e 1154 tb_flush(env);
c33a346e
FB
1155 }
1156#endif
1157}
1158
34865134
FB
1159/* enable or disable low levels log */
1160void cpu_set_log(int log_flags)
1161{
1162 loglevel = log_flags;
1163 if (loglevel && !logfile) {
11fcfab4 1164 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1165 if (!logfile) {
1166 perror(logfilename);
1167 _exit(1);
1168 }
9fa3e853
FB
1169#if !defined(CONFIG_SOFTMMU)
1170 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1171 {
1172 static uint8_t logfile_buf[4096];
1173 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1174 }
1175#else
34865134 1176 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1177#endif
e735b91c
PB
1178 log_append = 1;
1179 }
1180 if (!loglevel && logfile) {
1181 fclose(logfile);
1182 logfile = NULL;
34865134
FB
1183 }
1184}
1185
1186void cpu_set_log_filename(const char *filename)
1187{
1188 logfilename = strdup(filename);
e735b91c
PB
1189 if (logfile) {
1190 fclose(logfile);
1191 logfile = NULL;
1192 }
1193 cpu_set_log(loglevel);
34865134 1194}
c33a346e 1195
0124311e 1196/* mask must never be zero, except for A20 change call */
68a79315 1197void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1198{
1199 TranslationBlock *tb;
ee8b7021 1200 static int interrupt_lock;
59817ccb 1201
68a79315 1202 env->interrupt_request |= mask;
ea041c0e
FB
1203 /* if the cpu is currently executing code, we must unlink it and
1204 all the potentially executing TB */
1205 tb = env->current_tb;
ee8b7021
FB
1206 if (tb && !testandset(&interrupt_lock)) {
1207 env->current_tb = NULL;
ea041c0e 1208 tb_reset_jump_recursive(tb);
ee8b7021 1209 interrupt_lock = 0;
ea041c0e
FB
1210 }
1211}
1212
b54ad049
FB
1213void cpu_reset_interrupt(CPUState *env, int mask)
1214{
1215 env->interrupt_request &= ~mask;
1216}
1217
f193c797
FB
1218CPULogItem cpu_log_items[] = {
1219 { CPU_LOG_TB_OUT_ASM, "out_asm",
1220 "show generated host assembly code for each compiled TB" },
1221 { CPU_LOG_TB_IN_ASM, "in_asm",
1222 "show target assembly code for each compiled TB" },
1223 { CPU_LOG_TB_OP, "op",
1224 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1225#ifdef TARGET_I386
1226 { CPU_LOG_TB_OP_OPT, "op_opt",
1227 "show micro ops after optimization for each compiled TB" },
1228#endif
1229 { CPU_LOG_INT, "int",
1230 "show interrupts/exceptions in short format" },
1231 { CPU_LOG_EXEC, "exec",
1232 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1233 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1234 "show CPU state before block translation" },
f193c797
FB
1235#ifdef TARGET_I386
1236 { CPU_LOG_PCALL, "pcall",
1237 "show protected mode far calls/returns/exceptions" },
1238#endif
8e3a9fd2 1239#ifdef DEBUG_IOPORT
fd872598
FB
1240 { CPU_LOG_IOPORT, "ioport",
1241 "show all i/o ports accesses" },
8e3a9fd2 1242#endif
f193c797
FB
1243 { 0, NULL, NULL },
1244};
1245
1246static int cmp1(const char *s1, int n, const char *s2)
1247{
1248 if (strlen(s2) != n)
1249 return 0;
1250 return memcmp(s1, s2, n) == 0;
1251}
1252
1253/* takes a comma separated list of log masks. Return 0 if error. */
1254int cpu_str_to_log_mask(const char *str)
1255{
1256 CPULogItem *item;
1257 int mask;
1258 const char *p, *p1;
1259
1260 p = str;
1261 mask = 0;
1262 for(;;) {
1263 p1 = strchr(p, ',');
1264 if (!p1)
1265 p1 = p + strlen(p);
8e3a9fd2
FB
1266 if(cmp1(p,p1-p,"all")) {
1267 for(item = cpu_log_items; item->mask != 0; item++) {
1268 mask |= item->mask;
1269 }
1270 } else {
f193c797
FB
1271 for(item = cpu_log_items; item->mask != 0; item++) {
1272 if (cmp1(p, p1 - p, item->name))
1273 goto found;
1274 }
1275 return 0;
8e3a9fd2 1276 }
f193c797
FB
1277 found:
1278 mask |= item->mask;
1279 if (*p1 != ',')
1280 break;
1281 p = p1 + 1;
1282 }
1283 return mask;
1284}
ea041c0e 1285
7501267e
FB
1286void cpu_abort(CPUState *env, const char *fmt, ...)
1287{
1288 va_list ap;
1289
1290 va_start(ap, fmt);
1291 fprintf(stderr, "qemu: fatal: ");
1292 vfprintf(stderr, fmt, ap);
1293 fprintf(stderr, "\n");
1294#ifdef TARGET_I386
7fe48483
FB
1295 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1296#else
1297 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1298#endif
1299 va_end(ap);
924edcae
AZ
1300 if (logfile) {
1301 fflush(logfile);
1302 fclose(logfile);
1303 }
7501267e
FB
1304 abort();
1305}
1306
c5be9f08
TS
1307CPUState *cpu_copy(CPUState *env)
1308{
1309 CPUState *new_env = cpu_init();
1310 /* preserve chaining and index */
1311 CPUState *next_cpu = new_env->next_cpu;
1312 int cpu_index = new_env->cpu_index;
1313 memcpy(new_env, env, sizeof(CPUState));
1314 new_env->next_cpu = next_cpu;
1315 new_env->cpu_index = cpu_index;
1316 return new_env;
1317}
1318
0124311e
FB
1319#if !defined(CONFIG_USER_ONLY)
1320
ee8b7021
FB
1321/* NOTE: if flush_global is true, also flush global entries (not
1322 implemented yet) */
1323void tlb_flush(CPUState *env, int flush_global)
33417e70 1324{
33417e70 1325 int i;
0124311e 1326
9fa3e853
FB
1327#if defined(DEBUG_TLB)
1328 printf("tlb_flush:\n");
1329#endif
0124311e
FB
1330 /* must reset current TB so that interrupts cannot modify the
1331 links while we are modifying them */
1332 env->current_tb = NULL;
1333
33417e70 1334 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1335 env->tlb_table[0][i].addr_read = -1;
1336 env->tlb_table[0][i].addr_write = -1;
1337 env->tlb_table[0][i].addr_code = -1;
1338 env->tlb_table[1][i].addr_read = -1;
1339 env->tlb_table[1][i].addr_write = -1;
1340 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1341#if (NB_MMU_MODES >= 3)
1342 env->tlb_table[2][i].addr_read = -1;
1343 env->tlb_table[2][i].addr_write = -1;
1344 env->tlb_table[2][i].addr_code = -1;
1345#if (NB_MMU_MODES == 4)
1346 env->tlb_table[3][i].addr_read = -1;
1347 env->tlb_table[3][i].addr_write = -1;
1348 env->tlb_table[3][i].addr_code = -1;
1349#endif
1350#endif
33417e70 1351 }
9fa3e853 1352
8a40a180 1353 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1354
1355#if !defined(CONFIG_SOFTMMU)
1356 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1357#endif
1358#ifdef USE_KQEMU
1359 if (env->kqemu_enabled) {
1360 kqemu_flush(env, flush_global);
1361 }
9fa3e853 1362#endif
e3db7226 1363 tlb_flush_count++;
33417e70
FB
1364}
1365
274da6b2 1366static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1367{
84b7b8e7
FB
1368 if (addr == (tlb_entry->addr_read &
1369 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1370 addr == (tlb_entry->addr_write &
1371 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1372 addr == (tlb_entry->addr_code &
1373 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1374 tlb_entry->addr_read = -1;
1375 tlb_entry->addr_write = -1;
1376 tlb_entry->addr_code = -1;
1377 }
61382a50
FB
1378}
1379
2e12669a 1380void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1381{
8a40a180 1382 int i;
9fa3e853 1383 TranslationBlock *tb;
0124311e 1384
9fa3e853 1385#if defined(DEBUG_TLB)
108c49b8 1386 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1387#endif
0124311e
FB
1388 /* must reset current TB so that interrupts cannot modify the
1389 links while we are modifying them */
1390 env->current_tb = NULL;
61382a50
FB
1391
1392 addr &= TARGET_PAGE_MASK;
1393 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1394 tlb_flush_entry(&env->tlb_table[0][i], addr);
1395 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1396#if (NB_MMU_MODES >= 3)
1397 tlb_flush_entry(&env->tlb_table[2][i], addr);
1398#if (NB_MMU_MODES == 4)
1399 tlb_flush_entry(&env->tlb_table[3][i], addr);
1400#endif
1401#endif
0124311e 1402
b362e5e0
PB
1403 /* Discard jump cache entries for any tb which might potentially
1404 overlap the flushed page. */
1405 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1406 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1407
1408 i = tb_jmp_cache_hash_page(addr);
1409 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1410
0124311e 1411#if !defined(CONFIG_SOFTMMU)
9fa3e853 1412 if (addr < MMAP_AREA_END)
0124311e 1413 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1414#endif
0a962c02
FB
1415#ifdef USE_KQEMU
1416 if (env->kqemu_enabled) {
1417 kqemu_flush_page(env, addr);
1418 }
1419#endif
9fa3e853
FB
1420}
1421
9fa3e853
FB
1422/* update the TLBs so that writes to code in the virtual page 'addr'
1423 can be detected */
6a00d601 1424static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1425{
6a00d601
FB
1426 cpu_physical_memory_reset_dirty(ram_addr,
1427 ram_addr + TARGET_PAGE_SIZE,
1428 CODE_DIRTY_FLAG);
9fa3e853
FB
1429}
1430
9fa3e853 1431/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1432 tested for self modifying code */
1433static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1434 target_ulong vaddr)
9fa3e853 1435{
3a7d929e 1436 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1437}
1438
1439static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1440 unsigned long start, unsigned long length)
1441{
1442 unsigned long addr;
84b7b8e7
FB
1443 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1444 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1445 if ((addr - start) < length) {
84b7b8e7 1446 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1447 }
1448 }
1449}
1450
3a7d929e 1451void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1452 int dirty_flags)
1ccde1cb
FB
1453{
1454 CPUState *env;
4f2ac237 1455 unsigned long length, start1;
0a962c02
FB
1456 int i, mask, len;
1457 uint8_t *p;
1ccde1cb
FB
1458
1459 start &= TARGET_PAGE_MASK;
1460 end = TARGET_PAGE_ALIGN(end);
1461
1462 length = end - start;
1463 if (length == 0)
1464 return;
0a962c02 1465 len = length >> TARGET_PAGE_BITS;
3a7d929e 1466#ifdef USE_KQEMU
6a00d601
FB
1467 /* XXX: should not depend on cpu context */
1468 env = first_cpu;
3a7d929e 1469 if (env->kqemu_enabled) {
f23db169
FB
1470 ram_addr_t addr;
1471 addr = start;
1472 for(i = 0; i < len; i++) {
1473 kqemu_set_notdirty(env, addr);
1474 addr += TARGET_PAGE_SIZE;
1475 }
3a7d929e
FB
1476 }
1477#endif
f23db169
FB
1478 mask = ~dirty_flags;
1479 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1480 for(i = 0; i < len; i++)
1481 p[i] &= mask;
1482
1ccde1cb
FB
1483 /* we modify the TLB cache so that the dirty bit will be set again
1484 when accessing the range */
59817ccb 1485 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1486 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1487 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1488 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1489 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1490 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1491#if (NB_MMU_MODES >= 3)
1492 for(i = 0; i < CPU_TLB_SIZE; i++)
1493 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1494#if (NB_MMU_MODES == 4)
1495 for(i = 0; i < CPU_TLB_SIZE; i++)
1496 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1497#endif
1498#endif
6a00d601 1499 }
59817ccb
FB
1500
1501#if !defined(CONFIG_SOFTMMU)
1502 /* XXX: this is expensive */
1503 {
1504 VirtPageDesc *p;
1505 int j;
1506 target_ulong addr;
1507
1508 for(i = 0; i < L1_SIZE; i++) {
1509 p = l1_virt_map[i];
1510 if (p) {
1511 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1512 for(j = 0; j < L2_SIZE; j++) {
1513 if (p->valid_tag == virt_valid_tag &&
1514 p->phys_addr >= start && p->phys_addr < end &&
1515 (p->prot & PROT_WRITE)) {
1516 if (addr < MMAP_AREA_END) {
1517 mprotect((void *)addr, TARGET_PAGE_SIZE,
1518 p->prot & ~PROT_WRITE);
1519 }
1520 }
1521 addr += TARGET_PAGE_SIZE;
1522 p++;
1523 }
1524 }
1525 }
1526 }
1527#endif
1ccde1cb
FB
1528}
1529
3a7d929e
FB
1530static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1531{
1532 ram_addr_t ram_addr;
1533
84b7b8e7
FB
1534 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1535 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1536 tlb_entry->addend - (unsigned long)phys_ram_base;
1537 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1538 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1539 }
1540 }
1541}
1542
1543/* update the TLB according to the current state of the dirty bits */
1544void cpu_tlb_update_dirty(CPUState *env)
1545{
1546 int i;
1547 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1548 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1549 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1550 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1551#if (NB_MMU_MODES >= 3)
1552 for(i = 0; i < CPU_TLB_SIZE; i++)
1553 tlb_update_dirty(&env->tlb_table[2][i]);
1554#if (NB_MMU_MODES == 4)
1555 for(i = 0; i < CPU_TLB_SIZE; i++)
1556 tlb_update_dirty(&env->tlb_table[3][i]);
1557#endif
1558#endif
3a7d929e
FB
1559}
1560
1ccde1cb 1561static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1562 unsigned long start)
1ccde1cb
FB
1563{
1564 unsigned long addr;
84b7b8e7
FB
1565 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1566 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1567 if (addr == start) {
84b7b8e7 1568 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1569 }
1570 }
1571}
1572
1573/* update the TLB corresponding to virtual page vaddr and phys addr
1574 addr so that it is no longer dirty */
6a00d601
FB
1575static inline void tlb_set_dirty(CPUState *env,
1576 unsigned long addr, target_ulong vaddr)
1ccde1cb 1577{
1ccde1cb
FB
1578 int i;
1579
1ccde1cb
FB
1580 addr &= TARGET_PAGE_MASK;
1581 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1582 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1583 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1584#if (NB_MMU_MODES >= 3)
1585 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1586#if (NB_MMU_MODES == 4)
1587 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1588#endif
1589#endif
9fa3e853
FB
1590}
1591
59817ccb
FB
1592/* add a new TLB entry. At most one entry for a given virtual address
1593 is permitted. Return 0 if OK or 2 if the page could not be mapped
1594 (can only happen in non SOFTMMU mode for I/O pages or pages
1595 conflicting with the host address space). */
84b7b8e7
FB
1596int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1597 target_phys_addr_t paddr, int prot,
1598 int is_user, int is_softmmu)
9fa3e853 1599{
92e873b9 1600 PhysPageDesc *p;
4f2ac237 1601 unsigned long pd;
9fa3e853 1602 unsigned int index;
4f2ac237 1603 target_ulong address;
108c49b8 1604 target_phys_addr_t addend;
9fa3e853 1605 int ret;
84b7b8e7 1606 CPUTLBEntry *te;
6658ffb8 1607 int i;
9fa3e853 1608
92e873b9 1609 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1610 if (!p) {
1611 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1612 } else {
1613 pd = p->phys_offset;
9fa3e853
FB
1614 }
1615#if defined(DEBUG_TLB)
3a7d929e 1616 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
84b7b8e7 1617 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1618#endif
1619
1620 ret = 0;
1621#if !defined(CONFIG_SOFTMMU)
1622 if (is_softmmu)
1623#endif
1624 {
2a4188a3 1625 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1626 /* IO memory case */
1627 address = vaddr | pd;
1628 addend = paddr;
1629 } else {
1630 /* standard memory */
1631 address = vaddr;
1632 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1633 }
6658ffb8
PB
1634
1635 /* Make accesses to pages with watchpoints go via the
1636 watchpoint trap routines. */
1637 for (i = 0; i < env->nb_watchpoints; i++) {
1638 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1639 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1640 env->watchpoint[i].addend = 0;
6658ffb8
PB
1641 address = vaddr | io_mem_watch;
1642 } else {
d79acba4
AZ
1643 env->watchpoint[i].addend = pd - paddr +
1644 (unsigned long) phys_ram_base;
6658ffb8
PB
1645 /* TODO: Figure out how to make read watchpoints coexist
1646 with code. */
1647 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1648 }
1649 }
1650 }
d79acba4 1651
90f18422 1652 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1653 addend -= vaddr;
84b7b8e7
FB
1654 te = &env->tlb_table[is_user][index];
1655 te->addend = addend;
67b915a5 1656 if (prot & PAGE_READ) {
84b7b8e7
FB
1657 te->addr_read = address;
1658 } else {
1659 te->addr_read = -1;
1660 }
1661 if (prot & PAGE_EXEC) {
1662 te->addr_code = address;
9fa3e853 1663 } else {
84b7b8e7 1664 te->addr_code = -1;
9fa3e853 1665 }
67b915a5 1666 if (prot & PAGE_WRITE) {
856074ec
FB
1667 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1668 (pd & IO_MEM_ROMD)) {
1669 /* write access calls the I/O callback */
1670 te->addr_write = vaddr |
1671 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
3a7d929e 1672 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1673 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1674 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1675 } else {
84b7b8e7 1676 te->addr_write = address;
9fa3e853
FB
1677 }
1678 } else {
84b7b8e7 1679 te->addr_write = -1;
9fa3e853
FB
1680 }
1681 }
1682#if !defined(CONFIG_SOFTMMU)
1683 else {
1684 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1685 /* IO access: no mapping is done as it will be handled by the
1686 soft MMU */
1687 if (!(env->hflags & HF_SOFTMMU_MASK))
1688 ret = 2;
1689 } else {
1690 void *map_addr;
59817ccb
FB
1691
1692 if (vaddr >= MMAP_AREA_END) {
1693 ret = 2;
1694 } else {
1695 if (prot & PROT_WRITE) {
1696 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1697#if defined(TARGET_HAS_SMC) || 1
59817ccb 1698 first_tb ||
d720b93d 1699#endif
59817ccb
FB
1700 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1701 !cpu_physical_memory_is_dirty(pd))) {
1702 /* ROM: we do as if code was inside */
1703 /* if code is present, we only map as read only and save the
1704 original mapping */
1705 VirtPageDesc *vp;
1706
90f18422 1707 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1708 vp->phys_addr = pd;
1709 vp->prot = prot;
1710 vp->valid_tag = virt_valid_tag;
1711 prot &= ~PAGE_WRITE;
1712 }
1713 }
1714 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1715 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1716 if (map_addr == MAP_FAILED) {
1717 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1718 paddr, vaddr);
9fa3e853 1719 }
9fa3e853
FB
1720 }
1721 }
1722 }
1723#endif
1724 return ret;
1725}
1726
1727/* called from signal handler: invalidate the code and unprotect the
1728 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1729int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1730{
1731#if !defined(CONFIG_SOFTMMU)
1732 VirtPageDesc *vp;
1733
1734#if defined(DEBUG_TLB)
1735 printf("page_unprotect: addr=0x%08x\n", addr);
1736#endif
1737 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1738
1739 /* if it is not mapped, no need to worry here */
1740 if (addr >= MMAP_AREA_END)
1741 return 0;
9fa3e853
FB
1742 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1743 if (!vp)
1744 return 0;
1745 /* NOTE: in this case, validate_tag is _not_ tested as it
1746 validates only the code TLB */
1747 if (vp->valid_tag != virt_valid_tag)
1748 return 0;
1749 if (!(vp->prot & PAGE_WRITE))
1750 return 0;
1751#if defined(DEBUG_TLB)
1752 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1753 addr, vp->phys_addr, vp->prot);
1754#endif
59817ccb
FB
1755 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1756 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1757 (unsigned long)addr, vp->prot);
d720b93d 1758 /* set the dirty bit */
0a962c02 1759 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1760 /* flush the code inside */
1761 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1762 return 1;
1763#else
1764 return 0;
1765#endif
33417e70
FB
1766}
1767
0124311e
FB
1768#else
1769
ee8b7021 1770void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1771{
1772}
1773
2e12669a 1774void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1775{
1776}
1777
84b7b8e7
FB
1778int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1779 target_phys_addr_t paddr, int prot,
1780 int is_user, int is_softmmu)
9fa3e853
FB
1781{
1782 return 0;
1783}
0124311e 1784
9fa3e853
FB
1785/* dump memory mappings */
1786void page_dump(FILE *f)
33417e70 1787{
9fa3e853
FB
1788 unsigned long start, end;
1789 int i, j, prot, prot1;
1790 PageDesc *p;
33417e70 1791
9fa3e853
FB
1792 fprintf(f, "%-8s %-8s %-8s %s\n",
1793 "start", "end", "size", "prot");
1794 start = -1;
1795 end = -1;
1796 prot = 0;
1797 for(i = 0; i <= L1_SIZE; i++) {
1798 if (i < L1_SIZE)
1799 p = l1_map[i];
1800 else
1801 p = NULL;
1802 for(j = 0;j < L2_SIZE; j++) {
1803 if (!p)
1804 prot1 = 0;
1805 else
1806 prot1 = p[j].flags;
1807 if (prot1 != prot) {
1808 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1809 if (start != -1) {
1810 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1811 start, end, end - start,
1812 prot & PAGE_READ ? 'r' : '-',
1813 prot & PAGE_WRITE ? 'w' : '-',
1814 prot & PAGE_EXEC ? 'x' : '-');
1815 }
1816 if (prot1 != 0)
1817 start = end;
1818 else
1819 start = -1;
1820 prot = prot1;
1821 }
1822 if (!p)
1823 break;
1824 }
33417e70 1825 }
33417e70
FB
1826}
1827
53a5960a 1828int page_get_flags(target_ulong address)
33417e70 1829{
9fa3e853
FB
1830 PageDesc *p;
1831
1832 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1833 if (!p)
9fa3e853
FB
1834 return 0;
1835 return p->flags;
1836}
1837
1838/* modify the flags of a page and invalidate the code if
1839 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1840 depending on PAGE_WRITE */
53a5960a 1841void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1842{
1843 PageDesc *p;
53a5960a 1844 target_ulong addr;
9fa3e853
FB
1845
1846 start = start & TARGET_PAGE_MASK;
1847 end = TARGET_PAGE_ALIGN(end);
1848 if (flags & PAGE_WRITE)
1849 flags |= PAGE_WRITE_ORG;
1850 spin_lock(&tb_lock);
1851 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1852 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1853 /* if the write protection is set, then we invalidate the code
1854 inside */
1855 if (!(p->flags & PAGE_WRITE) &&
1856 (flags & PAGE_WRITE) &&
1857 p->first_tb) {
d720b93d 1858 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1859 }
1860 p->flags = flags;
1861 }
1862 spin_unlock(&tb_lock);
33417e70
FB
1863}
1864
9fa3e853
FB
1865/* called from signal handler: invalidate the code and unprotect the
1866 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1867int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1868{
1869 unsigned int page_index, prot, pindex;
1870 PageDesc *p, *p1;
53a5960a 1871 target_ulong host_start, host_end, addr;
9fa3e853 1872
83fb7adf 1873 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1874 page_index = host_start >> TARGET_PAGE_BITS;
1875 p1 = page_find(page_index);
1876 if (!p1)
1877 return 0;
83fb7adf 1878 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1879 p = p1;
1880 prot = 0;
1881 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1882 prot |= p->flags;
1883 p++;
1884 }
1885 /* if the page was really writable, then we change its
1886 protection back to writable */
1887 if (prot & PAGE_WRITE_ORG) {
1888 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1889 if (!(p1[pindex].flags & PAGE_WRITE)) {
53a5960a 1890 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1891 (prot & PAGE_BITS) | PAGE_WRITE);
1892 p1[pindex].flags |= PAGE_WRITE;
1893 /* and since the content will be modified, we must invalidate
1894 the corresponding translated code. */
d720b93d 1895 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1896#ifdef DEBUG_TB_CHECK
1897 tb_invalidate_check(address);
1898#endif
1899 return 1;
1900 }
1901 }
1902 return 0;
1903}
1904
1905/* call this function when system calls directly modify a memory area */
53a5960a
PB
1906/* ??? This should be redundant now we have lock_user. */
1907void page_unprotect_range(target_ulong data, target_ulong data_size)
9fa3e853 1908{
53a5960a 1909 target_ulong start, end, addr;
9fa3e853 1910
53a5960a 1911 start = data;
9fa3e853
FB
1912 end = start + data_size;
1913 start &= TARGET_PAGE_MASK;
1914 end = TARGET_PAGE_ALIGN(end);
1915 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1916 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1917 }
1918}
1919
6a00d601
FB
1920static inline void tlb_set_dirty(CPUState *env,
1921 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1922{
1923}
9fa3e853
FB
1924#endif /* defined(CONFIG_USER_ONLY) */
1925
db7b5426
BS
1926static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1927 int memory);
1928static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1929 int orig_memory);
1930#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1931 need_subpage) \
1932 do { \
1933 if (addr > start_addr) \
1934 start_addr2 = 0; \
1935 else { \
1936 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1937 if (start_addr2 > 0) \
1938 need_subpage = 1; \
1939 } \
1940 \
49e9fba2 1941 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
1942 end_addr2 = TARGET_PAGE_SIZE - 1; \
1943 else { \
1944 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1945 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
1946 need_subpage = 1; \
1947 } \
1948 } while (0)
1949
33417e70
FB
1950/* register physical memory. 'size' must be a multiple of the target
1951 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1952 io memory page */
2e12669a
FB
1953void cpu_register_physical_memory(target_phys_addr_t start_addr,
1954 unsigned long size,
1955 unsigned long phys_offset)
33417e70 1956{
108c49b8 1957 target_phys_addr_t addr, end_addr;
92e873b9 1958 PhysPageDesc *p;
9d42037b 1959 CPUState *env;
db7b5426
BS
1960 unsigned long orig_size = size;
1961 void *subpage;
33417e70 1962
5fd386f6 1963 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
1964 end_addr = start_addr + (target_phys_addr_t)size;
1965 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
1966 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1967 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
1968 unsigned long orig_memory = p->phys_offset;
1969 target_phys_addr_t start_addr2, end_addr2;
1970 int need_subpage = 0;
1971
1972 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
1973 need_subpage);
1974 if (need_subpage) {
1975 if (!(orig_memory & IO_MEM_SUBPAGE)) {
1976 subpage = subpage_init((addr & TARGET_PAGE_MASK),
1977 &p->phys_offset, orig_memory);
1978 } else {
1979 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
1980 >> IO_MEM_SHIFT];
1981 }
1982 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
1983 } else {
1984 p->phys_offset = phys_offset;
1985 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1986 (phys_offset & IO_MEM_ROMD))
1987 phys_offset += TARGET_PAGE_SIZE;
1988 }
1989 } else {
1990 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1991 p->phys_offset = phys_offset;
1992 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1993 (phys_offset & IO_MEM_ROMD))
1994 phys_offset += TARGET_PAGE_SIZE;
1995 else {
1996 target_phys_addr_t start_addr2, end_addr2;
1997 int need_subpage = 0;
1998
1999 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2000 end_addr2, need_subpage);
2001
2002 if (need_subpage) {
2003 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2004 &p->phys_offset, IO_MEM_UNASSIGNED);
2005 subpage_register(subpage, start_addr2, end_addr2,
2006 phys_offset);
2007 }
2008 }
2009 }
33417e70 2010 }
9d42037b
FB
2011
2012 /* since each CPU stores ram addresses in its TLB cache, we must
2013 reset the modified entries */
2014 /* XXX: slow ! */
2015 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2016 tlb_flush(env, 1);
2017 }
33417e70
FB
2018}
2019
ba863458
FB
2020/* XXX: temporary until new memory mapping API */
2021uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2022{
2023 PhysPageDesc *p;
2024
2025 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2026 if (!p)
2027 return IO_MEM_UNASSIGNED;
2028 return p->phys_offset;
2029}
2030
e9a1ab19
FB
2031/* XXX: better than nothing */
2032ram_addr_t qemu_ram_alloc(unsigned int size)
2033{
2034 ram_addr_t addr;
2035 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2036 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2037 size, phys_ram_size);
2038 abort();
2039 }
2040 addr = phys_ram_alloc_offset;
2041 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2042 return addr;
2043}
2044
2045void qemu_ram_free(ram_addr_t addr)
2046{
2047}
2048
a4193c8a 2049static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2050{
67d3b957 2051#ifdef DEBUG_UNASSIGNED
6c36d3fa 2052 printf("Unassigned mem read " TARGET_FMT_lx "\n", addr);
b4f0a316
BS
2053#endif
2054#ifdef TARGET_SPARC
6c36d3fa 2055 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2056#endif
33417e70
FB
2057 return 0;
2058}
2059
a4193c8a 2060static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2061{
67d3b957 2062#ifdef DEBUG_UNASSIGNED
6c36d3fa 2063 printf("Unassigned mem write " TARGET_FMT_lx " = 0x%x\n", addr, val);
67d3b957 2064#endif
b4f0a316 2065#ifdef TARGET_SPARC
6c36d3fa 2066 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2067#endif
33417e70
FB
2068}
2069
2070static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2071 unassigned_mem_readb,
2072 unassigned_mem_readb,
2073 unassigned_mem_readb,
2074};
2075
2076static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2077 unassigned_mem_writeb,
2078 unassigned_mem_writeb,
2079 unassigned_mem_writeb,
2080};
2081
3a7d929e 2082static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2083{
3a7d929e
FB
2084 unsigned long ram_addr;
2085 int dirty_flags;
2086 ram_addr = addr - (unsigned long)phys_ram_base;
2087 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2088 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2089#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2090 tb_invalidate_phys_page_fast(ram_addr, 1);
2091 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2092#endif
3a7d929e 2093 }
c27004ec 2094 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2095#ifdef USE_KQEMU
2096 if (cpu_single_env->kqemu_enabled &&
2097 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2098 kqemu_modify_page(cpu_single_env, ram_addr);
2099#endif
f23db169
FB
2100 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2101 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2102 /* we remove the notdirty callback only if the code has been
2103 flushed */
2104 if (dirty_flags == 0xff)
6a00d601 2105 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2106}
2107
3a7d929e 2108static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2109{
3a7d929e
FB
2110 unsigned long ram_addr;
2111 int dirty_flags;
2112 ram_addr = addr - (unsigned long)phys_ram_base;
2113 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2114 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2115#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2116 tb_invalidate_phys_page_fast(ram_addr, 2);
2117 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2118#endif
3a7d929e 2119 }
c27004ec 2120 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2121#ifdef USE_KQEMU
2122 if (cpu_single_env->kqemu_enabled &&
2123 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2124 kqemu_modify_page(cpu_single_env, ram_addr);
2125#endif
f23db169
FB
2126 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2127 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2128 /* we remove the notdirty callback only if the code has been
2129 flushed */
2130 if (dirty_flags == 0xff)
6a00d601 2131 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2132}
2133
3a7d929e 2134static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2135{
3a7d929e
FB
2136 unsigned long ram_addr;
2137 int dirty_flags;
2138 ram_addr = addr - (unsigned long)phys_ram_base;
2139 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2140 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2141#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2142 tb_invalidate_phys_page_fast(ram_addr, 4);
2143 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2144#endif
3a7d929e 2145 }
c27004ec 2146 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2147#ifdef USE_KQEMU
2148 if (cpu_single_env->kqemu_enabled &&
2149 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2150 kqemu_modify_page(cpu_single_env, ram_addr);
2151#endif
f23db169
FB
2152 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2153 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2154 /* we remove the notdirty callback only if the code has been
2155 flushed */
2156 if (dirty_flags == 0xff)
6a00d601 2157 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2158}
2159
3a7d929e 2160static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2161 NULL, /* never used */
2162 NULL, /* never used */
2163 NULL, /* never used */
2164};
2165
1ccde1cb
FB
2166static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2167 notdirty_mem_writeb,
2168 notdirty_mem_writew,
2169 notdirty_mem_writel,
2170};
2171
6658ffb8
PB
2172#if defined(CONFIG_SOFTMMU)
2173/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2174 so these check for a hit then pass through to the normal out-of-line
2175 phys routines. */
2176static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2177{
2178 return ldub_phys(addr);
2179}
2180
2181static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2182{
2183 return lduw_phys(addr);
2184}
2185
2186static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2187{
2188 return ldl_phys(addr);
2189}
2190
2191/* Generate a debug exception if a watchpoint has been hit.
2192 Returns the real physical address of the access. addr will be a host
d79acba4 2193 address in case of a RAM location. */
6658ffb8
PB
2194static target_ulong check_watchpoint(target_phys_addr_t addr)
2195{
2196 CPUState *env = cpu_single_env;
2197 target_ulong watch;
2198 target_ulong retaddr;
2199 int i;
2200
2201 retaddr = addr;
2202 for (i = 0; i < env->nb_watchpoints; i++) {
2203 watch = env->watchpoint[i].vaddr;
2204 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2205 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2206 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2207 cpu_single_env->watchpoint_hit = i + 1;
2208 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2209 break;
2210 }
2211 }
2212 }
2213 return retaddr;
2214}
2215
2216static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2217 uint32_t val)
2218{
2219 addr = check_watchpoint(addr);
2220 stb_phys(addr, val);
2221}
2222
2223static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2224 uint32_t val)
2225{
2226 addr = check_watchpoint(addr);
2227 stw_phys(addr, val);
2228}
2229
2230static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2231 uint32_t val)
2232{
2233 addr = check_watchpoint(addr);
2234 stl_phys(addr, val);
2235}
2236
2237static CPUReadMemoryFunc *watch_mem_read[3] = {
2238 watch_mem_readb,
2239 watch_mem_readw,
2240 watch_mem_readl,
2241};
2242
2243static CPUWriteMemoryFunc *watch_mem_write[3] = {
2244 watch_mem_writeb,
2245 watch_mem_writew,
2246 watch_mem_writel,
2247};
2248#endif
2249
db7b5426
BS
2250static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2251 unsigned int len)
2252{
2253 CPUReadMemoryFunc **mem_read;
2254 uint32_t ret;
2255 unsigned int idx;
2256
2257 idx = SUBPAGE_IDX(addr - mmio->base);
2258#if defined(DEBUG_SUBPAGE)
2259 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2260 mmio, len, addr, idx);
2261#endif
2262 mem_read = mmio->mem_read[idx];
2263 ret = (*mem_read[len])(mmio->opaque[idx], addr);
2264
2265 return ret;
2266}
2267
2268static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2269 uint32_t value, unsigned int len)
2270{
2271 CPUWriteMemoryFunc **mem_write;
2272 unsigned int idx;
2273
2274 idx = SUBPAGE_IDX(addr - mmio->base);
2275#if defined(DEBUG_SUBPAGE)
2276 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2277 mmio, len, addr, idx, value);
2278#endif
2279 mem_write = mmio->mem_write[idx];
2280 (*mem_write[len])(mmio->opaque[idx], addr, value);
2281}
2282
2283static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2284{
2285#if defined(DEBUG_SUBPAGE)
2286 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2287#endif
2288
2289 return subpage_readlen(opaque, addr, 0);
2290}
2291
2292static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2293 uint32_t value)
2294{
2295#if defined(DEBUG_SUBPAGE)
2296 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2297#endif
2298 subpage_writelen(opaque, addr, value, 0);
2299}
2300
2301static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2302{
2303#if defined(DEBUG_SUBPAGE)
2304 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2305#endif
2306
2307 return subpage_readlen(opaque, addr, 1);
2308}
2309
2310static void subpage_writew (void *opaque, target_phys_addr_t addr,
2311 uint32_t value)
2312{
2313#if defined(DEBUG_SUBPAGE)
2314 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2315#endif
2316 subpage_writelen(opaque, addr, value, 1);
2317}
2318
2319static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2320{
2321#if defined(DEBUG_SUBPAGE)
2322 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2323#endif
2324
2325 return subpage_readlen(opaque, addr, 2);
2326}
2327
2328static void subpage_writel (void *opaque,
2329 target_phys_addr_t addr, uint32_t value)
2330{
2331#if defined(DEBUG_SUBPAGE)
2332 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2333#endif
2334 subpage_writelen(opaque, addr, value, 2);
2335}
2336
2337static CPUReadMemoryFunc *subpage_read[] = {
2338 &subpage_readb,
2339 &subpage_readw,
2340 &subpage_readl,
2341};
2342
2343static CPUWriteMemoryFunc *subpage_write[] = {
2344 &subpage_writeb,
2345 &subpage_writew,
2346 &subpage_writel,
2347};
2348
2349static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2350 int memory)
2351{
2352 int idx, eidx;
2353
2354 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2355 return -1;
2356 idx = SUBPAGE_IDX(start);
2357 eidx = SUBPAGE_IDX(end);
2358#if defined(DEBUG_SUBPAGE)
2359 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2360 mmio, start, end, idx, eidx, memory);
2361#endif
2362 memory >>= IO_MEM_SHIFT;
2363 for (; idx <= eidx; idx++) {
2364 mmio->mem_read[idx] = io_mem_read[memory];
2365 mmio->mem_write[idx] = io_mem_write[memory];
2366 mmio->opaque[idx] = io_mem_opaque[memory];
2367 }
2368
2369 return 0;
2370}
2371
2372static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2373 int orig_memory)
2374{
2375 subpage_t *mmio;
2376 int subpage_memory;
2377
2378 mmio = qemu_mallocz(sizeof(subpage_t));
2379 if (mmio != NULL) {
2380 mmio->base = base;
2381 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2382#if defined(DEBUG_SUBPAGE)
2383 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2384 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2385#endif
2386 *phys = subpage_memory | IO_MEM_SUBPAGE;
2387 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2388 }
2389
2390 return mmio;
2391}
2392
33417e70
FB
2393static void io_mem_init(void)
2394{
3a7d929e 2395 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2396 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2397 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2398 io_mem_nb = 5;
2399
6658ffb8
PB
2400#if defined(CONFIG_SOFTMMU)
2401 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2402 watch_mem_write, NULL);
2403#endif
1ccde1cb 2404 /* alloc dirty bits array */
0a962c02 2405 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2406 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2407}
2408
2409/* mem_read and mem_write are arrays of functions containing the
2410 function to access byte (index 0), word (index 1) and dword (index
2411 2). All functions must be supplied. If io_index is non zero, the
2412 corresponding io zone is modified. If it is zero, a new io zone is
2413 allocated. The return value can be used with
2414 cpu_register_physical_memory(). (-1) is returned if error. */
2415int cpu_register_io_memory(int io_index,
2416 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2417 CPUWriteMemoryFunc **mem_write,
2418 void *opaque)
33417e70
FB
2419{
2420 int i;
2421
2422 if (io_index <= 0) {
b5ff1b31 2423 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2424 return -1;
2425 io_index = io_mem_nb++;
2426 } else {
2427 if (io_index >= IO_MEM_NB_ENTRIES)
2428 return -1;
2429 }
b5ff1b31 2430
33417e70
FB
2431 for(i = 0;i < 3; i++) {
2432 io_mem_read[io_index][i] = mem_read[i];
2433 io_mem_write[io_index][i] = mem_write[i];
2434 }
a4193c8a 2435 io_mem_opaque[io_index] = opaque;
33417e70
FB
2436 return io_index << IO_MEM_SHIFT;
2437}
61382a50 2438
8926b517
FB
2439CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2440{
2441 return io_mem_write[io_index >> IO_MEM_SHIFT];
2442}
2443
2444CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2445{
2446 return io_mem_read[io_index >> IO_MEM_SHIFT];
2447}
2448
13eb76e0
FB
2449/* physical memory access (slow version, mainly for debug) */
2450#if defined(CONFIG_USER_ONLY)
2e12669a 2451void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2452 int len, int is_write)
2453{
2454 int l, flags;
2455 target_ulong page;
53a5960a 2456 void * p;
13eb76e0
FB
2457
2458 while (len > 0) {
2459 page = addr & TARGET_PAGE_MASK;
2460 l = (page + TARGET_PAGE_SIZE) - addr;
2461 if (l > len)
2462 l = len;
2463 flags = page_get_flags(page);
2464 if (!(flags & PAGE_VALID))
2465 return;
2466 if (is_write) {
2467 if (!(flags & PAGE_WRITE))
2468 return;
53a5960a
PB
2469 p = lock_user(addr, len, 0);
2470 memcpy(p, buf, len);
2471 unlock_user(p, addr, len);
13eb76e0
FB
2472 } else {
2473 if (!(flags & PAGE_READ))
2474 return;
53a5960a
PB
2475 p = lock_user(addr, len, 1);
2476 memcpy(buf, p, len);
2477 unlock_user(p, addr, 0);
13eb76e0
FB
2478 }
2479 len -= l;
2480 buf += l;
2481 addr += l;
2482 }
2483}
8df1cd07 2484
13eb76e0 2485#else
2e12669a 2486void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2487 int len, int is_write)
2488{
2489 int l, io_index;
2490 uint8_t *ptr;
2491 uint32_t val;
2e12669a
FB
2492 target_phys_addr_t page;
2493 unsigned long pd;
92e873b9 2494 PhysPageDesc *p;
13eb76e0
FB
2495
2496 while (len > 0) {
2497 page = addr & TARGET_PAGE_MASK;
2498 l = (page + TARGET_PAGE_SIZE) - addr;
2499 if (l > len)
2500 l = len;
92e873b9 2501 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2502 if (!p) {
2503 pd = IO_MEM_UNASSIGNED;
2504 } else {
2505 pd = p->phys_offset;
2506 }
2507
2508 if (is_write) {
3a7d929e 2509 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2510 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2511 /* XXX: could force cpu_single_env to NULL to avoid
2512 potential bugs */
13eb76e0 2513 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2514 /* 32 bit write access */
c27004ec 2515 val = ldl_p(buf);
a4193c8a 2516 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2517 l = 4;
2518 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2519 /* 16 bit write access */
c27004ec 2520 val = lduw_p(buf);
a4193c8a 2521 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2522 l = 2;
2523 } else {
1c213d19 2524 /* 8 bit write access */
c27004ec 2525 val = ldub_p(buf);
a4193c8a 2526 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2527 l = 1;
2528 }
2529 } else {
b448f2f3
FB
2530 unsigned long addr1;
2531 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2532 /* RAM case */
b448f2f3 2533 ptr = phys_ram_base + addr1;
13eb76e0 2534 memcpy(ptr, buf, l);
3a7d929e
FB
2535 if (!cpu_physical_memory_is_dirty(addr1)) {
2536 /* invalidate code */
2537 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2538 /* set dirty bit */
f23db169
FB
2539 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2540 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2541 }
13eb76e0
FB
2542 }
2543 } else {
2a4188a3
FB
2544 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2545 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2546 /* I/O case */
2547 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2548 if (l >= 4 && ((addr & 3) == 0)) {
2549 /* 32 bit read access */
a4193c8a 2550 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2551 stl_p(buf, val);
13eb76e0
FB
2552 l = 4;
2553 } else if (l >= 2 && ((addr & 1) == 0)) {
2554 /* 16 bit read access */
a4193c8a 2555 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2556 stw_p(buf, val);
13eb76e0
FB
2557 l = 2;
2558 } else {
1c213d19 2559 /* 8 bit read access */
a4193c8a 2560 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2561 stb_p(buf, val);
13eb76e0
FB
2562 l = 1;
2563 }
2564 } else {
2565 /* RAM case */
2566 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2567 (addr & ~TARGET_PAGE_MASK);
2568 memcpy(buf, ptr, l);
2569 }
2570 }
2571 len -= l;
2572 buf += l;
2573 addr += l;
2574 }
2575}
8df1cd07 2576
d0ecd2aa
FB
2577/* used for ROM loading : can write in RAM and ROM */
2578void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2579 const uint8_t *buf, int len)
2580{
2581 int l;
2582 uint8_t *ptr;
2583 target_phys_addr_t page;
2584 unsigned long pd;
2585 PhysPageDesc *p;
2586
2587 while (len > 0) {
2588 page = addr & TARGET_PAGE_MASK;
2589 l = (page + TARGET_PAGE_SIZE) - addr;
2590 if (l > len)
2591 l = len;
2592 p = phys_page_find(page >> TARGET_PAGE_BITS);
2593 if (!p) {
2594 pd = IO_MEM_UNASSIGNED;
2595 } else {
2596 pd = p->phys_offset;
2597 }
2598
2599 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2600 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2601 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2602 /* do nothing */
2603 } else {
2604 unsigned long addr1;
2605 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2606 /* ROM/RAM case */
2607 ptr = phys_ram_base + addr1;
2608 memcpy(ptr, buf, l);
2609 }
2610 len -= l;
2611 buf += l;
2612 addr += l;
2613 }
2614}
2615
2616
8df1cd07
FB
2617/* warning: addr must be aligned */
2618uint32_t ldl_phys(target_phys_addr_t addr)
2619{
2620 int io_index;
2621 uint8_t *ptr;
2622 uint32_t val;
2623 unsigned long pd;
2624 PhysPageDesc *p;
2625
2626 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2627 if (!p) {
2628 pd = IO_MEM_UNASSIGNED;
2629 } else {
2630 pd = p->phys_offset;
2631 }
2632
2a4188a3
FB
2633 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2634 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2635 /* I/O case */
2636 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2637 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2638 } else {
2639 /* RAM case */
2640 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2641 (addr & ~TARGET_PAGE_MASK);
2642 val = ldl_p(ptr);
2643 }
2644 return val;
2645}
2646
84b7b8e7
FB
2647/* warning: addr must be aligned */
2648uint64_t ldq_phys(target_phys_addr_t addr)
2649{
2650 int io_index;
2651 uint8_t *ptr;
2652 uint64_t val;
2653 unsigned long pd;
2654 PhysPageDesc *p;
2655
2656 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2657 if (!p) {
2658 pd = IO_MEM_UNASSIGNED;
2659 } else {
2660 pd = p->phys_offset;
2661 }
2662
2a4188a3
FB
2663 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2664 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2665 /* I/O case */
2666 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2667#ifdef TARGET_WORDS_BIGENDIAN
2668 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2669 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2670#else
2671 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2672 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2673#endif
2674 } else {
2675 /* RAM case */
2676 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2677 (addr & ~TARGET_PAGE_MASK);
2678 val = ldq_p(ptr);
2679 }
2680 return val;
2681}
2682
aab33094
FB
2683/* XXX: optimize */
2684uint32_t ldub_phys(target_phys_addr_t addr)
2685{
2686 uint8_t val;
2687 cpu_physical_memory_read(addr, &val, 1);
2688 return val;
2689}
2690
2691/* XXX: optimize */
2692uint32_t lduw_phys(target_phys_addr_t addr)
2693{
2694 uint16_t val;
2695 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2696 return tswap16(val);
2697}
2698
8df1cd07
FB
2699/* warning: addr must be aligned. The ram page is not masked as dirty
2700 and the code inside is not invalidated. It is useful if the dirty
2701 bits are used to track modified PTEs */
2702void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2703{
2704 int io_index;
2705 uint8_t *ptr;
2706 unsigned long pd;
2707 PhysPageDesc *p;
2708
2709 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2710 if (!p) {
2711 pd = IO_MEM_UNASSIGNED;
2712 } else {
2713 pd = p->phys_offset;
2714 }
2715
3a7d929e 2716 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2717 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2718 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2719 } else {
2720 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2721 (addr & ~TARGET_PAGE_MASK);
2722 stl_p(ptr, val);
2723 }
2724}
2725
bc98a7ef
JM
2726void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2727{
2728 int io_index;
2729 uint8_t *ptr;
2730 unsigned long pd;
2731 PhysPageDesc *p;
2732
2733 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2734 if (!p) {
2735 pd = IO_MEM_UNASSIGNED;
2736 } else {
2737 pd = p->phys_offset;
2738 }
2739
2740 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2741 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2742#ifdef TARGET_WORDS_BIGENDIAN
2743 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2744 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2745#else
2746 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2747 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2748#endif
2749 } else {
2750 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2751 (addr & ~TARGET_PAGE_MASK);
2752 stq_p(ptr, val);
2753 }
2754}
2755
8df1cd07 2756/* warning: addr must be aligned */
8df1cd07
FB
2757void stl_phys(target_phys_addr_t addr, uint32_t val)
2758{
2759 int io_index;
2760 uint8_t *ptr;
2761 unsigned long pd;
2762 PhysPageDesc *p;
2763
2764 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2765 if (!p) {
2766 pd = IO_MEM_UNASSIGNED;
2767 } else {
2768 pd = p->phys_offset;
2769 }
2770
3a7d929e 2771 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2772 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2773 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2774 } else {
2775 unsigned long addr1;
2776 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2777 /* RAM case */
2778 ptr = phys_ram_base + addr1;
2779 stl_p(ptr, val);
3a7d929e
FB
2780 if (!cpu_physical_memory_is_dirty(addr1)) {
2781 /* invalidate code */
2782 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2783 /* set dirty bit */
f23db169
FB
2784 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2785 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2786 }
8df1cd07
FB
2787 }
2788}
2789
aab33094
FB
2790/* XXX: optimize */
2791void stb_phys(target_phys_addr_t addr, uint32_t val)
2792{
2793 uint8_t v = val;
2794 cpu_physical_memory_write(addr, &v, 1);
2795}
2796
2797/* XXX: optimize */
2798void stw_phys(target_phys_addr_t addr, uint32_t val)
2799{
2800 uint16_t v = tswap16(val);
2801 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2802}
2803
2804/* XXX: optimize */
2805void stq_phys(target_phys_addr_t addr, uint64_t val)
2806{
2807 val = tswap64(val);
2808 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2809}
2810
13eb76e0
FB
2811#endif
2812
2813/* virtual memory access for debug */
b448f2f3
FB
2814int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2815 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2816{
2817 int l;
9b3c35e0
JM
2818 target_phys_addr_t phys_addr;
2819 target_ulong page;
13eb76e0
FB
2820
2821 while (len > 0) {
2822 page = addr & TARGET_PAGE_MASK;
2823 phys_addr = cpu_get_phys_page_debug(env, page);
2824 /* if no physical page mapped, return an error */
2825 if (phys_addr == -1)
2826 return -1;
2827 l = (page + TARGET_PAGE_SIZE) - addr;
2828 if (l > len)
2829 l = len;
b448f2f3
FB
2830 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2831 buf, l, is_write);
13eb76e0
FB
2832 len -= l;
2833 buf += l;
2834 addr += l;
2835 }
2836 return 0;
2837}
2838
e3db7226
FB
2839void dump_exec_info(FILE *f,
2840 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2841{
2842 int i, target_code_size, max_target_code_size;
2843 int direct_jmp_count, direct_jmp2_count, cross_page;
2844 TranslationBlock *tb;
2845
2846 target_code_size = 0;
2847 max_target_code_size = 0;
2848 cross_page = 0;
2849 direct_jmp_count = 0;
2850 direct_jmp2_count = 0;
2851 for(i = 0; i < nb_tbs; i++) {
2852 tb = &tbs[i];
2853 target_code_size += tb->size;
2854 if (tb->size > max_target_code_size)
2855 max_target_code_size = tb->size;
2856 if (tb->page_addr[1] != -1)
2857 cross_page++;
2858 if (tb->tb_next_offset[0] != 0xffff) {
2859 direct_jmp_count++;
2860 if (tb->tb_next_offset[1] != 0xffff) {
2861 direct_jmp2_count++;
2862 }
2863 }
2864 }
2865 /* XXX: avoid using doubles ? */
2866 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2867 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2868 nb_tbs ? target_code_size / nb_tbs : 0,
2869 max_target_code_size);
2870 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2871 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2872 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2873 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2874 cross_page,
2875 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2876 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2877 direct_jmp_count,
2878 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2879 direct_jmp2_count,
2880 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2881 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2882 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2883 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2884}
2885
61382a50
FB
2886#if !defined(CONFIG_USER_ONLY)
2887
2888#define MMUSUFFIX _cmmu
2889#define GETPC() NULL
2890#define env cpu_single_env
b769d8fe 2891#define SOFTMMU_CODE_ACCESS
61382a50
FB
2892
2893#define SHIFT 0
2894#include "softmmu_template.h"
2895
2896#define SHIFT 1
2897#include "softmmu_template.h"
2898
2899#define SHIFT 2
2900#include "softmmu_template.h"
2901
2902#define SHIFT 3
2903#include "softmmu_template.h"
2904
2905#undef env
2906
2907#endif