]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
MCF5208 timer fix.
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
53a5960a
PB
37#if defined(CONFIG_USER_ONLY)
38#include <qemu.h>
39#endif
54936004 40
fd6ce8f6 41//#define DEBUG_TB_INVALIDATE
66e85a21 42//#define DEBUG_FLUSH
9fa3e853 43//#define DEBUG_TLB
67d3b957 44//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
45
46/* make various TB consistency checks */
47//#define DEBUG_TB_CHECK
98857888 48//#define DEBUG_TLB_CHECK
fd6ce8f6 49
1196be37 50//#define DEBUG_IOPORT
db7b5426 51//#define DEBUG_SUBPAGE
1196be37 52
99773bd4
PB
53#if !defined(CONFIG_USER_ONLY)
54/* TB consistency checks only implemented for usermode emulation. */
55#undef DEBUG_TB_CHECK
56#endif
57
fd6ce8f6
FB
58/* threshold to flush the translated code buffer */
59#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
60
9fa3e853
FB
61#define SMC_BITMAP_USE_THRESHOLD 10
62
63#define MMAP_AREA_START 0x00000000
64#define MMAP_AREA_END 0xa8000000
fd6ce8f6 65
108c49b8
FB
66#if defined(TARGET_SPARC64)
67#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
68#elif defined(TARGET_SPARC)
69#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
70#elif defined(TARGET_ALPHA)
71#define TARGET_PHYS_ADDR_SPACE_BITS 42
72#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
73#elif defined(TARGET_PPC64)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#else
76/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77#define TARGET_PHYS_ADDR_SPACE_BITS 32
78#endif
79
fd6ce8f6 80TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 81TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 82int nb_tbs;
eb51d102
FB
83/* any access to the tbs or the page table must use this lock */
84spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 85
b8076a74 86uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
87uint8_t *code_gen_ptr;
88
9fa3e853
FB
89int phys_ram_size;
90int phys_ram_fd;
91uint8_t *phys_ram_base;
1ccde1cb 92uint8_t *phys_ram_dirty;
e9a1ab19 93static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 94
6a00d601
FB
95CPUState *first_cpu;
96/* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
98CPUState *cpu_single_env;
99
54936004 100typedef struct PageDesc {
92e873b9 101 /* list of TBs intersecting this ram page */
fd6ce8f6 102 TranslationBlock *first_tb;
9fa3e853
FB
103 /* in order to optimize self modifying code, we count the number
104 of lookups we do to a given page to use a bitmap */
105 unsigned int code_write_count;
106 uint8_t *code_bitmap;
107#if defined(CONFIG_USER_ONLY)
108 unsigned long flags;
109#endif
54936004
FB
110} PageDesc;
111
92e873b9
FB
112typedef struct PhysPageDesc {
113 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 114 uint32_t phys_offset;
92e873b9
FB
115} PhysPageDesc;
116
54936004 117#define L2_BITS 10
bedb69ea
JM
118#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119/* XXX: this is a temporary hack for alpha target.
120 * In the future, this is to be replaced by a multi-level table
121 * to actually be able to handle the complete 64 bits address space.
122 */
123#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
124#else
54936004 125#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 126#endif
54936004
FB
127
128#define L1_SIZE (1 << L1_BITS)
129#define L2_SIZE (1 << L2_BITS)
130
33417e70 131static void io_mem_init(void);
fd6ce8f6 132
83fb7adf
FB
133unsigned long qemu_real_host_page_size;
134unsigned long qemu_host_page_bits;
135unsigned long qemu_host_page_size;
136unsigned long qemu_host_page_mask;
54936004 137
92e873b9 138/* XXX: for system emulation, it could just be an array */
54936004 139static PageDesc *l1_map[L1_SIZE];
0a962c02 140PhysPageDesc **l1_phys_map;
54936004 141
33417e70 142/* io memory support */
33417e70
FB
143CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
144CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 145void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 146static int io_mem_nb;
6658ffb8
PB
147#if defined(CONFIG_SOFTMMU)
148static int io_mem_watch;
149#endif
33417e70 150
34865134
FB
151/* log support */
152char *logfilename = "/tmp/qemu.log";
153FILE *logfile;
154int loglevel;
155
e3db7226
FB
156/* statistics */
157static int tlb_flush_count;
158static int tb_flush_count;
159static int tb_phys_invalidate_count;
160
db7b5426
BS
161#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
162typedef struct subpage_t {
163 target_phys_addr_t base;
164 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
165 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
166 void *opaque[TARGET_PAGE_SIZE];
167} subpage_t;
168
b346ff46 169static void page_init(void)
54936004 170{
83fb7adf 171 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 172 TARGET_PAGE_SIZE */
67b915a5 173#ifdef _WIN32
d5a8f07c
FB
174 {
175 SYSTEM_INFO system_info;
176 DWORD old_protect;
177
178 GetSystemInfo(&system_info);
179 qemu_real_host_page_size = system_info.dwPageSize;
180
181 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
182 PAGE_EXECUTE_READWRITE, &old_protect);
183 }
67b915a5 184#else
83fb7adf 185 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
186 {
187 unsigned long start, end;
188
189 start = (unsigned long)code_gen_buffer;
190 start &= ~(qemu_real_host_page_size - 1);
191
192 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
193 end += qemu_real_host_page_size - 1;
194 end &= ~(qemu_real_host_page_size - 1);
195
196 mprotect((void *)start, end - start,
197 PROT_READ | PROT_WRITE | PROT_EXEC);
198 }
67b915a5 199#endif
d5a8f07c 200
83fb7adf
FB
201 if (qemu_host_page_size == 0)
202 qemu_host_page_size = qemu_real_host_page_size;
203 if (qemu_host_page_size < TARGET_PAGE_SIZE)
204 qemu_host_page_size = TARGET_PAGE_SIZE;
205 qemu_host_page_bits = 0;
206 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
207 qemu_host_page_bits++;
208 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
209 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
210 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
211}
212
fd6ce8f6 213static inline PageDesc *page_find_alloc(unsigned int index)
54936004 214{
54936004
FB
215 PageDesc **lp, *p;
216
54936004
FB
217 lp = &l1_map[index >> L2_BITS];
218 p = *lp;
219 if (!p) {
220 /* allocate if not found */
59817ccb 221 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 222 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
223 *lp = p;
224 }
225 return p + (index & (L2_SIZE - 1));
226}
227
fd6ce8f6 228static inline PageDesc *page_find(unsigned int index)
54936004 229{
54936004
FB
230 PageDesc *p;
231
54936004
FB
232 p = l1_map[index >> L2_BITS];
233 if (!p)
234 return 0;
fd6ce8f6
FB
235 return p + (index & (L2_SIZE - 1));
236}
237
108c49b8 238static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 239{
108c49b8 240 void **lp, **p;
e3f4e2a4 241 PhysPageDesc *pd;
92e873b9 242
108c49b8
FB
243 p = (void **)l1_phys_map;
244#if TARGET_PHYS_ADDR_SPACE_BITS > 32
245
246#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
247#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
248#endif
249 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
250 p = *lp;
251 if (!p) {
252 /* allocate if not found */
108c49b8
FB
253 if (!alloc)
254 return NULL;
255 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
256 memset(p, 0, sizeof(void *) * L1_SIZE);
257 *lp = p;
258 }
259#endif
260 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
261 pd = *lp;
262 if (!pd) {
263 int i;
108c49b8
FB
264 /* allocate if not found */
265 if (!alloc)
266 return NULL;
e3f4e2a4
PB
267 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
268 *lp = pd;
269 for (i = 0; i < L2_SIZE; i++)
270 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 271 }
e3f4e2a4 272 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
273}
274
108c49b8 275static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 276{
108c49b8 277 return phys_page_find_alloc(index, 0);
92e873b9
FB
278}
279
9fa3e853 280#if !defined(CONFIG_USER_ONLY)
6a00d601 281static void tlb_protect_code(ram_addr_t ram_addr);
3a7d929e
FB
282static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
283 target_ulong vaddr);
9fa3e853 284#endif
fd6ce8f6 285
6a00d601 286void cpu_exec_init(CPUState *env)
fd6ce8f6 287{
6a00d601
FB
288 CPUState **penv;
289 int cpu_index;
290
fd6ce8f6
FB
291 if (!code_gen_ptr) {
292 code_gen_ptr = code_gen_buffer;
b346ff46 293 page_init();
33417e70 294 io_mem_init();
fd6ce8f6 295 }
6a00d601
FB
296 env->next_cpu = NULL;
297 penv = &first_cpu;
298 cpu_index = 0;
299 while (*penv != NULL) {
300 penv = (CPUState **)&(*penv)->next_cpu;
301 cpu_index++;
302 }
303 env->cpu_index = cpu_index;
6658ffb8 304 env->nb_watchpoints = 0;
6a00d601 305 *penv = env;
fd6ce8f6
FB
306}
307
9fa3e853
FB
308static inline void invalidate_page_bitmap(PageDesc *p)
309{
310 if (p->code_bitmap) {
59817ccb 311 qemu_free(p->code_bitmap);
9fa3e853
FB
312 p->code_bitmap = NULL;
313 }
314 p->code_write_count = 0;
315}
316
fd6ce8f6
FB
317/* set to NULL all the 'first_tb' fields in all PageDescs */
318static void page_flush_tb(void)
319{
320 int i, j;
321 PageDesc *p;
322
323 for(i = 0; i < L1_SIZE; i++) {
324 p = l1_map[i];
325 if (p) {
9fa3e853
FB
326 for(j = 0; j < L2_SIZE; j++) {
327 p->first_tb = NULL;
328 invalidate_page_bitmap(p);
329 p++;
330 }
fd6ce8f6
FB
331 }
332 }
333}
334
335/* flush all the translation blocks */
d4e8164f 336/* XXX: tb_flush is currently not thread safe */
6a00d601 337void tb_flush(CPUState *env1)
fd6ce8f6 338{
6a00d601 339 CPUState *env;
0124311e 340#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
341 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
342 code_gen_ptr - code_gen_buffer,
343 nb_tbs,
0124311e 344 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
345#endif
346 nb_tbs = 0;
6a00d601
FB
347
348 for(env = first_cpu; env != NULL; env = env->next_cpu) {
349 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
350 }
9fa3e853 351
8a8a608f 352 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 353 page_flush_tb();
9fa3e853 354
fd6ce8f6 355 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
356 /* XXX: flush processor icache at this point if cache flush is
357 expensive */
e3db7226 358 tb_flush_count++;
fd6ce8f6
FB
359}
360
361#ifdef DEBUG_TB_CHECK
362
bc98a7ef 363static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
364{
365 TranslationBlock *tb;
366 int i;
367 address &= TARGET_PAGE_MASK;
99773bd4
PB
368 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
369 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
370 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
371 address >= tb->pc + tb->size)) {
372 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 373 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
374 }
375 }
376 }
377}
378
379/* verify that all the pages have correct rights for code */
380static void tb_page_check(void)
381{
382 TranslationBlock *tb;
383 int i, flags1, flags2;
384
99773bd4
PB
385 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
386 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
387 flags1 = page_get_flags(tb->pc);
388 flags2 = page_get_flags(tb->pc + tb->size - 1);
389 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
390 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 391 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
392 }
393 }
394 }
395}
396
d4e8164f
FB
397void tb_jmp_check(TranslationBlock *tb)
398{
399 TranslationBlock *tb1;
400 unsigned int n1;
401
402 /* suppress any remaining jumps to this TB */
403 tb1 = tb->jmp_first;
404 for(;;) {
405 n1 = (long)tb1 & 3;
406 tb1 = (TranslationBlock *)((long)tb1 & ~3);
407 if (n1 == 2)
408 break;
409 tb1 = tb1->jmp_next[n1];
410 }
411 /* check end of list */
412 if (tb1 != tb) {
413 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
414 }
415}
416
fd6ce8f6
FB
417#endif
418
419/* invalidate one TB */
420static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
421 int next_offset)
422{
423 TranslationBlock *tb1;
424 for(;;) {
425 tb1 = *ptb;
426 if (tb1 == tb) {
427 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
428 break;
429 }
430 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
431 }
432}
433
9fa3e853
FB
434static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
435{
436 TranslationBlock *tb1;
437 unsigned int n1;
438
439 for(;;) {
440 tb1 = *ptb;
441 n1 = (long)tb1 & 3;
442 tb1 = (TranslationBlock *)((long)tb1 & ~3);
443 if (tb1 == tb) {
444 *ptb = tb1->page_next[n1];
445 break;
446 }
447 ptb = &tb1->page_next[n1];
448 }
449}
450
d4e8164f
FB
451static inline void tb_jmp_remove(TranslationBlock *tb, int n)
452{
453 TranslationBlock *tb1, **ptb;
454 unsigned int n1;
455
456 ptb = &tb->jmp_next[n];
457 tb1 = *ptb;
458 if (tb1) {
459 /* find tb(n) in circular list */
460 for(;;) {
461 tb1 = *ptb;
462 n1 = (long)tb1 & 3;
463 tb1 = (TranslationBlock *)((long)tb1 & ~3);
464 if (n1 == n && tb1 == tb)
465 break;
466 if (n1 == 2) {
467 ptb = &tb1->jmp_first;
468 } else {
469 ptb = &tb1->jmp_next[n1];
470 }
471 }
472 /* now we can suppress tb(n) from the list */
473 *ptb = tb->jmp_next[n];
474
475 tb->jmp_next[n] = NULL;
476 }
477}
478
479/* reset the jump entry 'n' of a TB so that it is not chained to
480 another TB */
481static inline void tb_reset_jump(TranslationBlock *tb, int n)
482{
483 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
484}
485
8a40a180 486static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 487{
6a00d601 488 CPUState *env;
8a40a180 489 PageDesc *p;
d4e8164f 490 unsigned int h, n1;
8a40a180
FB
491 target_ulong phys_pc;
492 TranslationBlock *tb1, *tb2;
d4e8164f 493
8a40a180
FB
494 /* remove the TB from the hash list */
495 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
496 h = tb_phys_hash_func(phys_pc);
497 tb_remove(&tb_phys_hash[h], tb,
498 offsetof(TranslationBlock, phys_hash_next));
499
500 /* remove the TB from the page list */
501 if (tb->page_addr[0] != page_addr) {
502 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
503 tb_page_remove(&p->first_tb, tb);
504 invalidate_page_bitmap(p);
505 }
506 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
507 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
508 tb_page_remove(&p->first_tb, tb);
509 invalidate_page_bitmap(p);
510 }
511
36bdbe54 512 tb_invalidated_flag = 1;
59817ccb 513
fd6ce8f6 514 /* remove the TB from the hash list */
8a40a180 515 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
516 for(env = first_cpu; env != NULL; env = env->next_cpu) {
517 if (env->tb_jmp_cache[h] == tb)
518 env->tb_jmp_cache[h] = NULL;
519 }
d4e8164f
FB
520
521 /* suppress this TB from the two jump lists */
522 tb_jmp_remove(tb, 0);
523 tb_jmp_remove(tb, 1);
524
525 /* suppress any remaining jumps to this TB */
526 tb1 = tb->jmp_first;
527 for(;;) {
528 n1 = (long)tb1 & 3;
529 if (n1 == 2)
530 break;
531 tb1 = (TranslationBlock *)((long)tb1 & ~3);
532 tb2 = tb1->jmp_next[n1];
533 tb_reset_jump(tb1, n1);
534 tb1->jmp_next[n1] = NULL;
535 tb1 = tb2;
536 }
537 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 538
e3db7226 539 tb_phys_invalidate_count++;
9fa3e853
FB
540}
541
542static inline void set_bits(uint8_t *tab, int start, int len)
543{
544 int end, mask, end1;
545
546 end = start + len;
547 tab += start >> 3;
548 mask = 0xff << (start & 7);
549 if ((start & ~7) == (end & ~7)) {
550 if (start < end) {
551 mask &= ~(0xff << (end & 7));
552 *tab |= mask;
553 }
554 } else {
555 *tab++ |= mask;
556 start = (start + 8) & ~7;
557 end1 = end & ~7;
558 while (start < end1) {
559 *tab++ = 0xff;
560 start += 8;
561 }
562 if (start < end) {
563 mask = ~(0xff << (end & 7));
564 *tab |= mask;
565 }
566 }
567}
568
569static void build_page_bitmap(PageDesc *p)
570{
571 int n, tb_start, tb_end;
572 TranslationBlock *tb;
573
59817ccb 574 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
575 if (!p->code_bitmap)
576 return;
577 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
578
579 tb = p->first_tb;
580 while (tb != NULL) {
581 n = (long)tb & 3;
582 tb = (TranslationBlock *)((long)tb & ~3);
583 /* NOTE: this is subtle as a TB may span two physical pages */
584 if (n == 0) {
585 /* NOTE: tb_end may be after the end of the page, but
586 it is not a problem */
587 tb_start = tb->pc & ~TARGET_PAGE_MASK;
588 tb_end = tb_start + tb->size;
589 if (tb_end > TARGET_PAGE_SIZE)
590 tb_end = TARGET_PAGE_SIZE;
591 } else {
592 tb_start = 0;
593 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
594 }
595 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
596 tb = tb->page_next[n];
597 }
598}
599
d720b93d
FB
600#ifdef TARGET_HAS_PRECISE_SMC
601
602static void tb_gen_code(CPUState *env,
603 target_ulong pc, target_ulong cs_base, int flags,
604 int cflags)
605{
606 TranslationBlock *tb;
607 uint8_t *tc_ptr;
608 target_ulong phys_pc, phys_page2, virt_page2;
609 int code_gen_size;
610
c27004ec
FB
611 phys_pc = get_phys_addr_code(env, pc);
612 tb = tb_alloc(pc);
d720b93d
FB
613 if (!tb) {
614 /* flush must be done */
615 tb_flush(env);
616 /* cannot fail at this point */
c27004ec 617 tb = tb_alloc(pc);
d720b93d
FB
618 }
619 tc_ptr = code_gen_ptr;
620 tb->tc_ptr = tc_ptr;
621 tb->cs_base = cs_base;
622 tb->flags = flags;
623 tb->cflags = cflags;
624 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
625 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
626
627 /* check next page if needed */
c27004ec 628 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 629 phys_page2 = -1;
c27004ec 630 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
631 phys_page2 = get_phys_addr_code(env, virt_page2);
632 }
633 tb_link_phys(tb, phys_pc, phys_page2);
634}
635#endif
636
9fa3e853
FB
637/* invalidate all TBs which intersect with the target physical page
638 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
639 the same physical page. 'is_cpu_write_access' should be true if called
640 from a real cpu write access: the virtual CPU will exit the current
641 TB if code is modified inside this TB. */
642void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
643 int is_cpu_write_access)
644{
645 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 646 CPUState *env = cpu_single_env;
9fa3e853 647 PageDesc *p;
ea1c1802 648 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 649 target_ulong tb_start, tb_end;
d720b93d 650 target_ulong current_pc, current_cs_base;
9fa3e853
FB
651
652 p = page_find(start >> TARGET_PAGE_BITS);
653 if (!p)
654 return;
655 if (!p->code_bitmap &&
d720b93d
FB
656 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
657 is_cpu_write_access) {
9fa3e853
FB
658 /* build code bitmap */
659 build_page_bitmap(p);
660 }
661
662 /* we remove all the TBs in the range [start, end[ */
663 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
664 current_tb_not_found = is_cpu_write_access;
665 current_tb_modified = 0;
666 current_tb = NULL; /* avoid warning */
667 current_pc = 0; /* avoid warning */
668 current_cs_base = 0; /* avoid warning */
669 current_flags = 0; /* avoid warning */
9fa3e853
FB
670 tb = p->first_tb;
671 while (tb != NULL) {
672 n = (long)tb & 3;
673 tb = (TranslationBlock *)((long)tb & ~3);
674 tb_next = tb->page_next[n];
675 /* NOTE: this is subtle as a TB may span two physical pages */
676 if (n == 0) {
677 /* NOTE: tb_end may be after the end of the page, but
678 it is not a problem */
679 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
680 tb_end = tb_start + tb->size;
681 } else {
682 tb_start = tb->page_addr[1];
683 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
684 }
685 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
686#ifdef TARGET_HAS_PRECISE_SMC
687 if (current_tb_not_found) {
688 current_tb_not_found = 0;
689 current_tb = NULL;
690 if (env->mem_write_pc) {
691 /* now we have a real cpu fault */
692 current_tb = tb_find_pc(env->mem_write_pc);
693 }
694 }
695 if (current_tb == tb &&
696 !(current_tb->cflags & CF_SINGLE_INSN)) {
697 /* If we are modifying the current TB, we must stop
698 its execution. We could be more precise by checking
699 that the modification is after the current PC, but it
700 would require a specialized function to partially
701 restore the CPU state */
702
703 current_tb_modified = 1;
704 cpu_restore_state(current_tb, env,
705 env->mem_write_pc, NULL);
706#if defined(TARGET_I386)
707 current_flags = env->hflags;
708 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
709 current_cs_base = (target_ulong)env->segs[R_CS].base;
710 current_pc = current_cs_base + env->eip;
711#else
712#error unsupported CPU
713#endif
714 }
715#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
716 /* we need to do that to handle the case where a signal
717 occurs while doing tb_phys_invalidate() */
718 saved_tb = NULL;
719 if (env) {
720 saved_tb = env->current_tb;
721 env->current_tb = NULL;
722 }
9fa3e853 723 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
724 if (env) {
725 env->current_tb = saved_tb;
726 if (env->interrupt_request && env->current_tb)
727 cpu_interrupt(env, env->interrupt_request);
728 }
9fa3e853
FB
729 }
730 tb = tb_next;
731 }
732#if !defined(CONFIG_USER_ONLY)
733 /* if no code remaining, no need to continue to use slow writes */
734 if (!p->first_tb) {
735 invalidate_page_bitmap(p);
d720b93d
FB
736 if (is_cpu_write_access) {
737 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
738 }
739 }
740#endif
741#ifdef TARGET_HAS_PRECISE_SMC
742 if (current_tb_modified) {
743 /* we generate a block containing just the instruction
744 modifying the memory. It will ensure that it cannot modify
745 itself */
ea1c1802 746 env->current_tb = NULL;
d720b93d
FB
747 tb_gen_code(env, current_pc, current_cs_base, current_flags,
748 CF_SINGLE_INSN);
749 cpu_resume_from_signal(env, NULL);
9fa3e853 750 }
fd6ce8f6 751#endif
9fa3e853 752}
fd6ce8f6 753
9fa3e853 754/* len must be <= 8 and start must be a multiple of len */
d720b93d 755static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
756{
757 PageDesc *p;
758 int offset, b;
59817ccb 759#if 0
a4193c8a
FB
760 if (1) {
761 if (loglevel) {
762 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
763 cpu_single_env->mem_write_vaddr, len,
764 cpu_single_env->eip,
765 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
766 }
59817ccb
FB
767 }
768#endif
9fa3e853
FB
769 p = page_find(start >> TARGET_PAGE_BITS);
770 if (!p)
771 return;
772 if (p->code_bitmap) {
773 offset = start & ~TARGET_PAGE_MASK;
774 b = p->code_bitmap[offset >> 3] >> (offset & 7);
775 if (b & ((1 << len) - 1))
776 goto do_invalidate;
777 } else {
778 do_invalidate:
d720b93d 779 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
780 }
781}
782
9fa3e853 783#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
784static void tb_invalidate_phys_page(target_ulong addr,
785 unsigned long pc, void *puc)
9fa3e853 786{
d720b93d
FB
787 int n, current_flags, current_tb_modified;
788 target_ulong current_pc, current_cs_base;
9fa3e853 789 PageDesc *p;
d720b93d
FB
790 TranslationBlock *tb, *current_tb;
791#ifdef TARGET_HAS_PRECISE_SMC
792 CPUState *env = cpu_single_env;
793#endif
9fa3e853
FB
794
795 addr &= TARGET_PAGE_MASK;
796 p = page_find(addr >> TARGET_PAGE_BITS);
797 if (!p)
798 return;
799 tb = p->first_tb;
d720b93d
FB
800 current_tb_modified = 0;
801 current_tb = NULL;
802 current_pc = 0; /* avoid warning */
803 current_cs_base = 0; /* avoid warning */
804 current_flags = 0; /* avoid warning */
805#ifdef TARGET_HAS_PRECISE_SMC
806 if (tb && pc != 0) {
807 current_tb = tb_find_pc(pc);
808 }
809#endif
9fa3e853
FB
810 while (tb != NULL) {
811 n = (long)tb & 3;
812 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
813#ifdef TARGET_HAS_PRECISE_SMC
814 if (current_tb == tb &&
815 !(current_tb->cflags & CF_SINGLE_INSN)) {
816 /* If we are modifying the current TB, we must stop
817 its execution. We could be more precise by checking
818 that the modification is after the current PC, but it
819 would require a specialized function to partially
820 restore the CPU state */
821
822 current_tb_modified = 1;
823 cpu_restore_state(current_tb, env, pc, puc);
824#if defined(TARGET_I386)
825 current_flags = env->hflags;
826 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
827 current_cs_base = (target_ulong)env->segs[R_CS].base;
828 current_pc = current_cs_base + env->eip;
829#else
830#error unsupported CPU
831#endif
832 }
833#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
834 tb_phys_invalidate(tb, addr);
835 tb = tb->page_next[n];
836 }
fd6ce8f6 837 p->first_tb = NULL;
d720b93d
FB
838#ifdef TARGET_HAS_PRECISE_SMC
839 if (current_tb_modified) {
840 /* we generate a block containing just the instruction
841 modifying the memory. It will ensure that it cannot modify
842 itself */
ea1c1802 843 env->current_tb = NULL;
d720b93d
FB
844 tb_gen_code(env, current_pc, current_cs_base, current_flags,
845 CF_SINGLE_INSN);
846 cpu_resume_from_signal(env, puc);
847 }
848#endif
fd6ce8f6 849}
9fa3e853 850#endif
fd6ce8f6
FB
851
852/* add the tb in the target page and protect it if necessary */
9fa3e853 853static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 854 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
855{
856 PageDesc *p;
9fa3e853
FB
857 TranslationBlock *last_first_tb;
858
859 tb->page_addr[n] = page_addr;
3a7d929e 860 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
861 tb->page_next[n] = p->first_tb;
862 last_first_tb = p->first_tb;
863 p->first_tb = (TranslationBlock *)((long)tb | n);
864 invalidate_page_bitmap(p);
fd6ce8f6 865
107db443 866#if defined(TARGET_HAS_SMC) || 1
d720b93d 867
9fa3e853 868#if defined(CONFIG_USER_ONLY)
fd6ce8f6 869 if (p->flags & PAGE_WRITE) {
53a5960a
PB
870 target_ulong addr;
871 PageDesc *p2;
9fa3e853
FB
872 int prot;
873
fd6ce8f6
FB
874 /* force the host page as non writable (writes will have a
875 page fault + mprotect overhead) */
53a5960a 876 page_addr &= qemu_host_page_mask;
fd6ce8f6 877 prot = 0;
53a5960a
PB
878 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
879 addr += TARGET_PAGE_SIZE) {
880
881 p2 = page_find (addr >> TARGET_PAGE_BITS);
882 if (!p2)
883 continue;
884 prot |= p2->flags;
885 p2->flags &= ~PAGE_WRITE;
886 page_get_flags(addr);
887 }
888 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
889 (prot & PAGE_BITS) & ~PAGE_WRITE);
890#ifdef DEBUG_TB_INVALIDATE
891 printf("protecting code page: 0x%08lx\n",
53a5960a 892 page_addr);
fd6ce8f6 893#endif
fd6ce8f6 894 }
9fa3e853
FB
895#else
896 /* if some code is already present, then the pages are already
897 protected. So we handle the case where only the first TB is
898 allocated in a physical page */
899 if (!last_first_tb) {
6a00d601 900 tlb_protect_code(page_addr);
9fa3e853
FB
901 }
902#endif
d720b93d
FB
903
904#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
905}
906
907/* Allocate a new translation block. Flush the translation buffer if
908 too many translation blocks or too much generated code. */
c27004ec 909TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
910{
911 TranslationBlock *tb;
fd6ce8f6
FB
912
913 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
914 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 915 return NULL;
fd6ce8f6
FB
916 tb = &tbs[nb_tbs++];
917 tb->pc = pc;
b448f2f3 918 tb->cflags = 0;
d4e8164f
FB
919 return tb;
920}
921
9fa3e853
FB
922/* add a new TB and link it to the physical page tables. phys_page2 is
923 (-1) to indicate that only one page contains the TB. */
924void tb_link_phys(TranslationBlock *tb,
925 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 926{
9fa3e853
FB
927 unsigned int h;
928 TranslationBlock **ptb;
929
930 /* add in the physical hash table */
931 h = tb_phys_hash_func(phys_pc);
932 ptb = &tb_phys_hash[h];
933 tb->phys_hash_next = *ptb;
934 *ptb = tb;
fd6ce8f6
FB
935
936 /* add in the page list */
9fa3e853
FB
937 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
938 if (phys_page2 != -1)
939 tb_alloc_page(tb, 1, phys_page2);
940 else
941 tb->page_addr[1] = -1;
9fa3e853 942
d4e8164f
FB
943 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
944 tb->jmp_next[0] = NULL;
945 tb->jmp_next[1] = NULL;
b448f2f3
FB
946#ifdef USE_CODE_COPY
947 tb->cflags &= ~CF_FP_USED;
948 if (tb->cflags & CF_TB_FP_USED)
949 tb->cflags |= CF_FP_USED;
950#endif
d4e8164f
FB
951
952 /* init original jump addresses */
953 if (tb->tb_next_offset[0] != 0xffff)
954 tb_reset_jump(tb, 0);
955 if (tb->tb_next_offset[1] != 0xffff)
956 tb_reset_jump(tb, 1);
8a40a180
FB
957
958#ifdef DEBUG_TB_CHECK
959 tb_page_check();
960#endif
fd6ce8f6
FB
961}
962
9fa3e853
FB
963/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
964 tb[1].tc_ptr. Return NULL if not found */
965TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 966{
9fa3e853
FB
967 int m_min, m_max, m;
968 unsigned long v;
969 TranslationBlock *tb;
a513fe19
FB
970
971 if (nb_tbs <= 0)
972 return NULL;
973 if (tc_ptr < (unsigned long)code_gen_buffer ||
974 tc_ptr >= (unsigned long)code_gen_ptr)
975 return NULL;
976 /* binary search (cf Knuth) */
977 m_min = 0;
978 m_max = nb_tbs - 1;
979 while (m_min <= m_max) {
980 m = (m_min + m_max) >> 1;
981 tb = &tbs[m];
982 v = (unsigned long)tb->tc_ptr;
983 if (v == tc_ptr)
984 return tb;
985 else if (tc_ptr < v) {
986 m_max = m - 1;
987 } else {
988 m_min = m + 1;
989 }
990 }
991 return &tbs[m_max];
992}
7501267e 993
ea041c0e
FB
994static void tb_reset_jump_recursive(TranslationBlock *tb);
995
996static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
997{
998 TranslationBlock *tb1, *tb_next, **ptb;
999 unsigned int n1;
1000
1001 tb1 = tb->jmp_next[n];
1002 if (tb1 != NULL) {
1003 /* find head of list */
1004 for(;;) {
1005 n1 = (long)tb1 & 3;
1006 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1007 if (n1 == 2)
1008 break;
1009 tb1 = tb1->jmp_next[n1];
1010 }
1011 /* we are now sure now that tb jumps to tb1 */
1012 tb_next = tb1;
1013
1014 /* remove tb from the jmp_first list */
1015 ptb = &tb_next->jmp_first;
1016 for(;;) {
1017 tb1 = *ptb;
1018 n1 = (long)tb1 & 3;
1019 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1020 if (n1 == n && tb1 == tb)
1021 break;
1022 ptb = &tb1->jmp_next[n1];
1023 }
1024 *ptb = tb->jmp_next[n];
1025 tb->jmp_next[n] = NULL;
1026
1027 /* suppress the jump to next tb in generated code */
1028 tb_reset_jump(tb, n);
1029
0124311e 1030 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1031 tb_reset_jump_recursive(tb_next);
1032 }
1033}
1034
1035static void tb_reset_jump_recursive(TranslationBlock *tb)
1036{
1037 tb_reset_jump_recursive2(tb, 0);
1038 tb_reset_jump_recursive2(tb, 1);
1039}
1040
1fddef4b 1041#if defined(TARGET_HAS_ICE)
d720b93d
FB
1042static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1043{
9b3c35e0
JM
1044 target_phys_addr_t addr;
1045 target_ulong pd;
c2f07f81
PB
1046 ram_addr_t ram_addr;
1047 PhysPageDesc *p;
d720b93d 1048
c2f07f81
PB
1049 addr = cpu_get_phys_page_debug(env, pc);
1050 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1051 if (!p) {
1052 pd = IO_MEM_UNASSIGNED;
1053 } else {
1054 pd = p->phys_offset;
1055 }
1056 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1057 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1058}
c27004ec 1059#endif
d720b93d 1060
6658ffb8
PB
1061/* Add a watchpoint. */
1062int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1063{
1064 int i;
1065
1066 for (i = 0; i < env->nb_watchpoints; i++) {
1067 if (addr == env->watchpoint[i].vaddr)
1068 return 0;
1069 }
1070 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1071 return -1;
1072
1073 i = env->nb_watchpoints++;
1074 env->watchpoint[i].vaddr = addr;
1075 tlb_flush_page(env, addr);
1076 /* FIXME: This flush is needed because of the hack to make memory ops
1077 terminate the TB. It can be removed once the proper IO trap and
1078 re-execute bits are in. */
1079 tb_flush(env);
1080 return i;
1081}
1082
1083/* Remove a watchpoint. */
1084int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1085{
1086 int i;
1087
1088 for (i = 0; i < env->nb_watchpoints; i++) {
1089 if (addr == env->watchpoint[i].vaddr) {
1090 env->nb_watchpoints--;
1091 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1092 tlb_flush_page(env, addr);
1093 return 0;
1094 }
1095 }
1096 return -1;
1097}
1098
c33a346e
FB
1099/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1100 breakpoint is reached */
2e12669a 1101int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1102{
1fddef4b 1103#if defined(TARGET_HAS_ICE)
4c3a88a2 1104 int i;
d720b93d 1105
4c3a88a2
FB
1106 for(i = 0; i < env->nb_breakpoints; i++) {
1107 if (env->breakpoints[i] == pc)
1108 return 0;
1109 }
1110
1111 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1112 return -1;
1113 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1114
1115 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1116 return 0;
1117#else
1118 return -1;
1119#endif
1120}
1121
1122/* remove a breakpoint */
2e12669a 1123int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1124{
1fddef4b 1125#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1126 int i;
1127 for(i = 0; i < env->nb_breakpoints; i++) {
1128 if (env->breakpoints[i] == pc)
1129 goto found;
1130 }
1131 return -1;
1132 found:
4c3a88a2 1133 env->nb_breakpoints--;
1fddef4b
FB
1134 if (i < env->nb_breakpoints)
1135 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1136
1137 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1138 return 0;
1139#else
1140 return -1;
1141#endif
1142}
1143
c33a346e
FB
1144/* enable or disable single step mode. EXCP_DEBUG is returned by the
1145 CPU loop after each instruction */
1146void cpu_single_step(CPUState *env, int enabled)
1147{
1fddef4b 1148#if defined(TARGET_HAS_ICE)
c33a346e
FB
1149 if (env->singlestep_enabled != enabled) {
1150 env->singlestep_enabled = enabled;
1151 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1152 /* XXX: only flush what is necessary */
0124311e 1153 tb_flush(env);
c33a346e
FB
1154 }
1155#endif
1156}
1157
34865134
FB
1158/* enable or disable low levels log */
1159void cpu_set_log(int log_flags)
1160{
1161 loglevel = log_flags;
1162 if (loglevel && !logfile) {
1163 logfile = fopen(logfilename, "w");
1164 if (!logfile) {
1165 perror(logfilename);
1166 _exit(1);
1167 }
9fa3e853
FB
1168#if !defined(CONFIG_SOFTMMU)
1169 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1170 {
1171 static uint8_t logfile_buf[4096];
1172 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1173 }
1174#else
34865134 1175 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1176#endif
34865134
FB
1177 }
1178}
1179
1180void cpu_set_log_filename(const char *filename)
1181{
1182 logfilename = strdup(filename);
1183}
c33a346e 1184
0124311e 1185/* mask must never be zero, except for A20 change call */
68a79315 1186void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1187{
1188 TranslationBlock *tb;
ee8b7021 1189 static int interrupt_lock;
59817ccb 1190
68a79315 1191 env->interrupt_request |= mask;
ea041c0e
FB
1192 /* if the cpu is currently executing code, we must unlink it and
1193 all the potentially executing TB */
1194 tb = env->current_tb;
ee8b7021
FB
1195 if (tb && !testandset(&interrupt_lock)) {
1196 env->current_tb = NULL;
ea041c0e 1197 tb_reset_jump_recursive(tb);
ee8b7021 1198 interrupt_lock = 0;
ea041c0e
FB
1199 }
1200}
1201
b54ad049
FB
1202void cpu_reset_interrupt(CPUState *env, int mask)
1203{
1204 env->interrupt_request &= ~mask;
1205}
1206
f193c797
FB
1207CPULogItem cpu_log_items[] = {
1208 { CPU_LOG_TB_OUT_ASM, "out_asm",
1209 "show generated host assembly code for each compiled TB" },
1210 { CPU_LOG_TB_IN_ASM, "in_asm",
1211 "show target assembly code for each compiled TB" },
1212 { CPU_LOG_TB_OP, "op",
1213 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1214#ifdef TARGET_I386
1215 { CPU_LOG_TB_OP_OPT, "op_opt",
1216 "show micro ops after optimization for each compiled TB" },
1217#endif
1218 { CPU_LOG_INT, "int",
1219 "show interrupts/exceptions in short format" },
1220 { CPU_LOG_EXEC, "exec",
1221 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1222 { CPU_LOG_TB_CPU, "cpu",
1223 "show CPU state before bloc translation" },
f193c797
FB
1224#ifdef TARGET_I386
1225 { CPU_LOG_PCALL, "pcall",
1226 "show protected mode far calls/returns/exceptions" },
1227#endif
8e3a9fd2 1228#ifdef DEBUG_IOPORT
fd872598
FB
1229 { CPU_LOG_IOPORT, "ioport",
1230 "show all i/o ports accesses" },
8e3a9fd2 1231#endif
f193c797
FB
1232 { 0, NULL, NULL },
1233};
1234
1235static int cmp1(const char *s1, int n, const char *s2)
1236{
1237 if (strlen(s2) != n)
1238 return 0;
1239 return memcmp(s1, s2, n) == 0;
1240}
1241
1242/* takes a comma separated list of log masks. Return 0 if error. */
1243int cpu_str_to_log_mask(const char *str)
1244{
1245 CPULogItem *item;
1246 int mask;
1247 const char *p, *p1;
1248
1249 p = str;
1250 mask = 0;
1251 for(;;) {
1252 p1 = strchr(p, ',');
1253 if (!p1)
1254 p1 = p + strlen(p);
8e3a9fd2
FB
1255 if(cmp1(p,p1-p,"all")) {
1256 for(item = cpu_log_items; item->mask != 0; item++) {
1257 mask |= item->mask;
1258 }
1259 } else {
f193c797
FB
1260 for(item = cpu_log_items; item->mask != 0; item++) {
1261 if (cmp1(p, p1 - p, item->name))
1262 goto found;
1263 }
1264 return 0;
8e3a9fd2 1265 }
f193c797
FB
1266 found:
1267 mask |= item->mask;
1268 if (*p1 != ',')
1269 break;
1270 p = p1 + 1;
1271 }
1272 return mask;
1273}
ea041c0e 1274
7501267e
FB
1275void cpu_abort(CPUState *env, const char *fmt, ...)
1276{
1277 va_list ap;
1278
1279 va_start(ap, fmt);
1280 fprintf(stderr, "qemu: fatal: ");
1281 vfprintf(stderr, fmt, ap);
1282 fprintf(stderr, "\n");
1283#ifdef TARGET_I386
7fe48483
FB
1284 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1285#else
1286 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1287#endif
1288 va_end(ap);
1289 abort();
1290}
1291
c5be9f08
TS
1292CPUState *cpu_copy(CPUState *env)
1293{
1294 CPUState *new_env = cpu_init();
1295 /* preserve chaining and index */
1296 CPUState *next_cpu = new_env->next_cpu;
1297 int cpu_index = new_env->cpu_index;
1298 memcpy(new_env, env, sizeof(CPUState));
1299 new_env->next_cpu = next_cpu;
1300 new_env->cpu_index = cpu_index;
1301 return new_env;
1302}
1303
0124311e
FB
1304#if !defined(CONFIG_USER_ONLY)
1305
ee8b7021
FB
1306/* NOTE: if flush_global is true, also flush global entries (not
1307 implemented yet) */
1308void tlb_flush(CPUState *env, int flush_global)
33417e70 1309{
33417e70 1310 int i;
0124311e 1311
9fa3e853
FB
1312#if defined(DEBUG_TLB)
1313 printf("tlb_flush:\n");
1314#endif
0124311e
FB
1315 /* must reset current TB so that interrupts cannot modify the
1316 links while we are modifying them */
1317 env->current_tb = NULL;
1318
33417e70 1319 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1320 env->tlb_table[0][i].addr_read = -1;
1321 env->tlb_table[0][i].addr_write = -1;
1322 env->tlb_table[0][i].addr_code = -1;
1323 env->tlb_table[1][i].addr_read = -1;
1324 env->tlb_table[1][i].addr_write = -1;
1325 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1326#if (NB_MMU_MODES >= 3)
1327 env->tlb_table[2][i].addr_read = -1;
1328 env->tlb_table[2][i].addr_write = -1;
1329 env->tlb_table[2][i].addr_code = -1;
1330#if (NB_MMU_MODES == 4)
1331 env->tlb_table[3][i].addr_read = -1;
1332 env->tlb_table[3][i].addr_write = -1;
1333 env->tlb_table[3][i].addr_code = -1;
1334#endif
1335#endif
33417e70 1336 }
9fa3e853 1337
8a40a180 1338 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1339
1340#if !defined(CONFIG_SOFTMMU)
1341 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1342#endif
1343#ifdef USE_KQEMU
1344 if (env->kqemu_enabled) {
1345 kqemu_flush(env, flush_global);
1346 }
9fa3e853 1347#endif
e3db7226 1348 tlb_flush_count++;
33417e70
FB
1349}
1350
274da6b2 1351static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1352{
84b7b8e7
FB
1353 if (addr == (tlb_entry->addr_read &
1354 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1355 addr == (tlb_entry->addr_write &
1356 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1357 addr == (tlb_entry->addr_code &
1358 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1359 tlb_entry->addr_read = -1;
1360 tlb_entry->addr_write = -1;
1361 tlb_entry->addr_code = -1;
1362 }
61382a50
FB
1363}
1364
2e12669a 1365void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1366{
8a40a180 1367 int i;
9fa3e853 1368 TranslationBlock *tb;
0124311e 1369
9fa3e853 1370#if defined(DEBUG_TLB)
108c49b8 1371 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1372#endif
0124311e
FB
1373 /* must reset current TB so that interrupts cannot modify the
1374 links while we are modifying them */
1375 env->current_tb = NULL;
61382a50
FB
1376
1377 addr &= TARGET_PAGE_MASK;
1378 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1379 tlb_flush_entry(&env->tlb_table[0][i], addr);
1380 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1381#if (NB_MMU_MODES >= 3)
1382 tlb_flush_entry(&env->tlb_table[2][i], addr);
1383#if (NB_MMU_MODES == 4)
1384 tlb_flush_entry(&env->tlb_table[3][i], addr);
1385#endif
1386#endif
0124311e 1387
b362e5e0
PB
1388 /* Discard jump cache entries for any tb which might potentially
1389 overlap the flushed page. */
1390 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1391 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1392
1393 i = tb_jmp_cache_hash_page(addr);
1394 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1395
0124311e 1396#if !defined(CONFIG_SOFTMMU)
9fa3e853 1397 if (addr < MMAP_AREA_END)
0124311e 1398 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1399#endif
0a962c02
FB
1400#ifdef USE_KQEMU
1401 if (env->kqemu_enabled) {
1402 kqemu_flush_page(env, addr);
1403 }
1404#endif
9fa3e853
FB
1405}
1406
9fa3e853
FB
1407/* update the TLBs so that writes to code in the virtual page 'addr'
1408 can be detected */
6a00d601 1409static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1410{
6a00d601
FB
1411 cpu_physical_memory_reset_dirty(ram_addr,
1412 ram_addr + TARGET_PAGE_SIZE,
1413 CODE_DIRTY_FLAG);
9fa3e853
FB
1414}
1415
9fa3e853 1416/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1417 tested for self modifying code */
1418static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1419 target_ulong vaddr)
9fa3e853 1420{
3a7d929e 1421 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1422}
1423
1424static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1425 unsigned long start, unsigned long length)
1426{
1427 unsigned long addr;
84b7b8e7
FB
1428 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1429 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1430 if ((addr - start) < length) {
84b7b8e7 1431 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1432 }
1433 }
1434}
1435
3a7d929e 1436void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1437 int dirty_flags)
1ccde1cb
FB
1438{
1439 CPUState *env;
4f2ac237 1440 unsigned long length, start1;
0a962c02
FB
1441 int i, mask, len;
1442 uint8_t *p;
1ccde1cb
FB
1443
1444 start &= TARGET_PAGE_MASK;
1445 end = TARGET_PAGE_ALIGN(end);
1446
1447 length = end - start;
1448 if (length == 0)
1449 return;
0a962c02 1450 len = length >> TARGET_PAGE_BITS;
3a7d929e 1451#ifdef USE_KQEMU
6a00d601
FB
1452 /* XXX: should not depend on cpu context */
1453 env = first_cpu;
3a7d929e 1454 if (env->kqemu_enabled) {
f23db169
FB
1455 ram_addr_t addr;
1456 addr = start;
1457 for(i = 0; i < len; i++) {
1458 kqemu_set_notdirty(env, addr);
1459 addr += TARGET_PAGE_SIZE;
1460 }
3a7d929e
FB
1461 }
1462#endif
f23db169
FB
1463 mask = ~dirty_flags;
1464 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1465 for(i = 0; i < len; i++)
1466 p[i] &= mask;
1467
1ccde1cb
FB
1468 /* we modify the TLB cache so that the dirty bit will be set again
1469 when accessing the range */
59817ccb 1470 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1471 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1472 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1473 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1474 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1475 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1476#if (NB_MMU_MODES >= 3)
1477 for(i = 0; i < CPU_TLB_SIZE; i++)
1478 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1479#if (NB_MMU_MODES == 4)
1480 for(i = 0; i < CPU_TLB_SIZE; i++)
1481 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1482#endif
1483#endif
6a00d601 1484 }
59817ccb
FB
1485
1486#if !defined(CONFIG_SOFTMMU)
1487 /* XXX: this is expensive */
1488 {
1489 VirtPageDesc *p;
1490 int j;
1491 target_ulong addr;
1492
1493 for(i = 0; i < L1_SIZE; i++) {
1494 p = l1_virt_map[i];
1495 if (p) {
1496 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1497 for(j = 0; j < L2_SIZE; j++) {
1498 if (p->valid_tag == virt_valid_tag &&
1499 p->phys_addr >= start && p->phys_addr < end &&
1500 (p->prot & PROT_WRITE)) {
1501 if (addr < MMAP_AREA_END) {
1502 mprotect((void *)addr, TARGET_PAGE_SIZE,
1503 p->prot & ~PROT_WRITE);
1504 }
1505 }
1506 addr += TARGET_PAGE_SIZE;
1507 p++;
1508 }
1509 }
1510 }
1511 }
1512#endif
1ccde1cb
FB
1513}
1514
3a7d929e
FB
1515static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1516{
1517 ram_addr_t ram_addr;
1518
84b7b8e7
FB
1519 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1520 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1521 tlb_entry->addend - (unsigned long)phys_ram_base;
1522 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1523 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1524 }
1525 }
1526}
1527
1528/* update the TLB according to the current state of the dirty bits */
1529void cpu_tlb_update_dirty(CPUState *env)
1530{
1531 int i;
1532 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1533 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1534 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1535 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1536#if (NB_MMU_MODES >= 3)
1537 for(i = 0; i < CPU_TLB_SIZE; i++)
1538 tlb_update_dirty(&env->tlb_table[2][i]);
1539#if (NB_MMU_MODES == 4)
1540 for(i = 0; i < CPU_TLB_SIZE; i++)
1541 tlb_update_dirty(&env->tlb_table[3][i]);
1542#endif
1543#endif
3a7d929e
FB
1544}
1545
1ccde1cb 1546static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1547 unsigned long start)
1ccde1cb
FB
1548{
1549 unsigned long addr;
84b7b8e7
FB
1550 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1551 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1552 if (addr == start) {
84b7b8e7 1553 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1554 }
1555 }
1556}
1557
1558/* update the TLB corresponding to virtual page vaddr and phys addr
1559 addr so that it is no longer dirty */
6a00d601
FB
1560static inline void tlb_set_dirty(CPUState *env,
1561 unsigned long addr, target_ulong vaddr)
1ccde1cb 1562{
1ccde1cb
FB
1563 int i;
1564
1ccde1cb
FB
1565 addr &= TARGET_PAGE_MASK;
1566 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1567 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1568 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1569#if (NB_MMU_MODES >= 3)
1570 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1571#if (NB_MMU_MODES == 4)
1572 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1573#endif
1574#endif
9fa3e853
FB
1575}
1576
59817ccb
FB
1577/* add a new TLB entry. At most one entry for a given virtual address
1578 is permitted. Return 0 if OK or 2 if the page could not be mapped
1579 (can only happen in non SOFTMMU mode for I/O pages or pages
1580 conflicting with the host address space). */
84b7b8e7
FB
1581int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1582 target_phys_addr_t paddr, int prot,
1583 int is_user, int is_softmmu)
9fa3e853 1584{
92e873b9 1585 PhysPageDesc *p;
4f2ac237 1586 unsigned long pd;
9fa3e853 1587 unsigned int index;
4f2ac237 1588 target_ulong address;
108c49b8 1589 target_phys_addr_t addend;
9fa3e853 1590 int ret;
84b7b8e7 1591 CPUTLBEntry *te;
6658ffb8 1592 int i;
9fa3e853 1593
92e873b9 1594 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1595 if (!p) {
1596 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1597 } else {
1598 pd = p->phys_offset;
9fa3e853
FB
1599 }
1600#if defined(DEBUG_TLB)
3a7d929e 1601 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
84b7b8e7 1602 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1603#endif
1604
1605 ret = 0;
1606#if !defined(CONFIG_SOFTMMU)
1607 if (is_softmmu)
1608#endif
1609 {
2a4188a3 1610 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1611 /* IO memory case */
1612 address = vaddr | pd;
1613 addend = paddr;
1614 } else {
1615 /* standard memory */
1616 address = vaddr;
1617 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1618 }
6658ffb8
PB
1619
1620 /* Make accesses to pages with watchpoints go via the
1621 watchpoint trap routines. */
1622 for (i = 0; i < env->nb_watchpoints; i++) {
1623 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1624 if (address & ~TARGET_PAGE_MASK) {
1625 env->watchpoint[i].is_ram = 0;
1626 address = vaddr | io_mem_watch;
1627 } else {
1628 env->watchpoint[i].is_ram = 1;
1629 /* TODO: Figure out how to make read watchpoints coexist
1630 with code. */
1631 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1632 }
1633 }
1634 }
9fa3e853 1635
90f18422 1636 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1637 addend -= vaddr;
84b7b8e7
FB
1638 te = &env->tlb_table[is_user][index];
1639 te->addend = addend;
67b915a5 1640 if (prot & PAGE_READ) {
84b7b8e7
FB
1641 te->addr_read = address;
1642 } else {
1643 te->addr_read = -1;
1644 }
1645 if (prot & PAGE_EXEC) {
1646 te->addr_code = address;
9fa3e853 1647 } else {
84b7b8e7 1648 te->addr_code = -1;
9fa3e853 1649 }
67b915a5 1650 if (prot & PAGE_WRITE) {
856074ec
FB
1651 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1652 (pd & IO_MEM_ROMD)) {
1653 /* write access calls the I/O callback */
1654 te->addr_write = vaddr |
1655 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
3a7d929e 1656 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1657 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1658 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1659 } else {
84b7b8e7 1660 te->addr_write = address;
9fa3e853
FB
1661 }
1662 } else {
84b7b8e7 1663 te->addr_write = -1;
9fa3e853
FB
1664 }
1665 }
1666#if !defined(CONFIG_SOFTMMU)
1667 else {
1668 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1669 /* IO access: no mapping is done as it will be handled by the
1670 soft MMU */
1671 if (!(env->hflags & HF_SOFTMMU_MASK))
1672 ret = 2;
1673 } else {
1674 void *map_addr;
59817ccb
FB
1675
1676 if (vaddr >= MMAP_AREA_END) {
1677 ret = 2;
1678 } else {
1679 if (prot & PROT_WRITE) {
1680 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1681#if defined(TARGET_HAS_SMC) || 1
59817ccb 1682 first_tb ||
d720b93d 1683#endif
59817ccb
FB
1684 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1685 !cpu_physical_memory_is_dirty(pd))) {
1686 /* ROM: we do as if code was inside */
1687 /* if code is present, we only map as read only and save the
1688 original mapping */
1689 VirtPageDesc *vp;
1690
90f18422 1691 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1692 vp->phys_addr = pd;
1693 vp->prot = prot;
1694 vp->valid_tag = virt_valid_tag;
1695 prot &= ~PAGE_WRITE;
1696 }
1697 }
1698 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1699 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1700 if (map_addr == MAP_FAILED) {
1701 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1702 paddr, vaddr);
9fa3e853 1703 }
9fa3e853
FB
1704 }
1705 }
1706 }
1707#endif
1708 return ret;
1709}
1710
1711/* called from signal handler: invalidate the code and unprotect the
1712 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1713int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1714{
1715#if !defined(CONFIG_SOFTMMU)
1716 VirtPageDesc *vp;
1717
1718#if defined(DEBUG_TLB)
1719 printf("page_unprotect: addr=0x%08x\n", addr);
1720#endif
1721 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1722
1723 /* if it is not mapped, no need to worry here */
1724 if (addr >= MMAP_AREA_END)
1725 return 0;
9fa3e853
FB
1726 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1727 if (!vp)
1728 return 0;
1729 /* NOTE: in this case, validate_tag is _not_ tested as it
1730 validates only the code TLB */
1731 if (vp->valid_tag != virt_valid_tag)
1732 return 0;
1733 if (!(vp->prot & PAGE_WRITE))
1734 return 0;
1735#if defined(DEBUG_TLB)
1736 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1737 addr, vp->phys_addr, vp->prot);
1738#endif
59817ccb
FB
1739 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1740 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1741 (unsigned long)addr, vp->prot);
d720b93d 1742 /* set the dirty bit */
0a962c02 1743 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1744 /* flush the code inside */
1745 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1746 return 1;
1747#else
1748 return 0;
1749#endif
33417e70
FB
1750}
1751
0124311e
FB
1752#else
1753
ee8b7021 1754void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1755{
1756}
1757
2e12669a 1758void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1759{
1760}
1761
84b7b8e7
FB
1762int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1763 target_phys_addr_t paddr, int prot,
1764 int is_user, int is_softmmu)
9fa3e853
FB
1765{
1766 return 0;
1767}
0124311e 1768
9fa3e853
FB
1769/* dump memory mappings */
1770void page_dump(FILE *f)
33417e70 1771{
9fa3e853
FB
1772 unsigned long start, end;
1773 int i, j, prot, prot1;
1774 PageDesc *p;
33417e70 1775
9fa3e853
FB
1776 fprintf(f, "%-8s %-8s %-8s %s\n",
1777 "start", "end", "size", "prot");
1778 start = -1;
1779 end = -1;
1780 prot = 0;
1781 for(i = 0; i <= L1_SIZE; i++) {
1782 if (i < L1_SIZE)
1783 p = l1_map[i];
1784 else
1785 p = NULL;
1786 for(j = 0;j < L2_SIZE; j++) {
1787 if (!p)
1788 prot1 = 0;
1789 else
1790 prot1 = p[j].flags;
1791 if (prot1 != prot) {
1792 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1793 if (start != -1) {
1794 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1795 start, end, end - start,
1796 prot & PAGE_READ ? 'r' : '-',
1797 prot & PAGE_WRITE ? 'w' : '-',
1798 prot & PAGE_EXEC ? 'x' : '-');
1799 }
1800 if (prot1 != 0)
1801 start = end;
1802 else
1803 start = -1;
1804 prot = prot1;
1805 }
1806 if (!p)
1807 break;
1808 }
33417e70 1809 }
33417e70
FB
1810}
1811
53a5960a 1812int page_get_flags(target_ulong address)
33417e70 1813{
9fa3e853
FB
1814 PageDesc *p;
1815
1816 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1817 if (!p)
9fa3e853
FB
1818 return 0;
1819 return p->flags;
1820}
1821
1822/* modify the flags of a page and invalidate the code if
1823 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1824 depending on PAGE_WRITE */
53a5960a 1825void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1826{
1827 PageDesc *p;
53a5960a 1828 target_ulong addr;
9fa3e853
FB
1829
1830 start = start & TARGET_PAGE_MASK;
1831 end = TARGET_PAGE_ALIGN(end);
1832 if (flags & PAGE_WRITE)
1833 flags |= PAGE_WRITE_ORG;
1834 spin_lock(&tb_lock);
1835 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1836 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1837 /* if the write protection is set, then we invalidate the code
1838 inside */
1839 if (!(p->flags & PAGE_WRITE) &&
1840 (flags & PAGE_WRITE) &&
1841 p->first_tb) {
d720b93d 1842 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1843 }
1844 p->flags = flags;
1845 }
1846 spin_unlock(&tb_lock);
33417e70
FB
1847}
1848
9fa3e853
FB
1849/* called from signal handler: invalidate the code and unprotect the
1850 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1851int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1852{
1853 unsigned int page_index, prot, pindex;
1854 PageDesc *p, *p1;
53a5960a 1855 target_ulong host_start, host_end, addr;
9fa3e853 1856
83fb7adf 1857 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1858 page_index = host_start >> TARGET_PAGE_BITS;
1859 p1 = page_find(page_index);
1860 if (!p1)
1861 return 0;
83fb7adf 1862 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1863 p = p1;
1864 prot = 0;
1865 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1866 prot |= p->flags;
1867 p++;
1868 }
1869 /* if the page was really writable, then we change its
1870 protection back to writable */
1871 if (prot & PAGE_WRITE_ORG) {
1872 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1873 if (!(p1[pindex].flags & PAGE_WRITE)) {
53a5960a 1874 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1875 (prot & PAGE_BITS) | PAGE_WRITE);
1876 p1[pindex].flags |= PAGE_WRITE;
1877 /* and since the content will be modified, we must invalidate
1878 the corresponding translated code. */
d720b93d 1879 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1880#ifdef DEBUG_TB_CHECK
1881 tb_invalidate_check(address);
1882#endif
1883 return 1;
1884 }
1885 }
1886 return 0;
1887}
1888
1889/* call this function when system calls directly modify a memory area */
53a5960a
PB
1890/* ??? This should be redundant now we have lock_user. */
1891void page_unprotect_range(target_ulong data, target_ulong data_size)
9fa3e853 1892{
53a5960a 1893 target_ulong start, end, addr;
9fa3e853 1894
53a5960a 1895 start = data;
9fa3e853
FB
1896 end = start + data_size;
1897 start &= TARGET_PAGE_MASK;
1898 end = TARGET_PAGE_ALIGN(end);
1899 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1900 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1901 }
1902}
1903
6a00d601
FB
1904static inline void tlb_set_dirty(CPUState *env,
1905 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1906{
1907}
9fa3e853
FB
1908#endif /* defined(CONFIG_USER_ONLY) */
1909
db7b5426
BS
1910static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1911 int memory);
1912static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1913 int orig_memory);
1914#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1915 need_subpage) \
1916 do { \
1917 if (addr > start_addr) \
1918 start_addr2 = 0; \
1919 else { \
1920 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1921 if (start_addr2 > 0) \
1922 need_subpage = 1; \
1923 } \
1924 \
49e9fba2 1925 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
1926 end_addr2 = TARGET_PAGE_SIZE - 1; \
1927 else { \
1928 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1929 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
1930 need_subpage = 1; \
1931 } \
1932 } while (0)
1933
33417e70
FB
1934/* register physical memory. 'size' must be a multiple of the target
1935 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1936 io memory page */
2e12669a
FB
1937void cpu_register_physical_memory(target_phys_addr_t start_addr,
1938 unsigned long size,
1939 unsigned long phys_offset)
33417e70 1940{
108c49b8 1941 target_phys_addr_t addr, end_addr;
92e873b9 1942 PhysPageDesc *p;
9d42037b 1943 CPUState *env;
db7b5426
BS
1944 unsigned long orig_size = size;
1945 void *subpage;
33417e70 1946
5fd386f6 1947 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
1948 end_addr = start_addr + (target_phys_addr_t)size;
1949 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
1950 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1951 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
1952 unsigned long orig_memory = p->phys_offset;
1953 target_phys_addr_t start_addr2, end_addr2;
1954 int need_subpage = 0;
1955
1956 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
1957 need_subpage);
1958 if (need_subpage) {
1959 if (!(orig_memory & IO_MEM_SUBPAGE)) {
1960 subpage = subpage_init((addr & TARGET_PAGE_MASK),
1961 &p->phys_offset, orig_memory);
1962 } else {
1963 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
1964 >> IO_MEM_SHIFT];
1965 }
1966 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
1967 } else {
1968 p->phys_offset = phys_offset;
1969 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1970 (phys_offset & IO_MEM_ROMD))
1971 phys_offset += TARGET_PAGE_SIZE;
1972 }
1973 } else {
1974 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1975 p->phys_offset = phys_offset;
1976 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1977 (phys_offset & IO_MEM_ROMD))
1978 phys_offset += TARGET_PAGE_SIZE;
1979 else {
1980 target_phys_addr_t start_addr2, end_addr2;
1981 int need_subpage = 0;
1982
1983 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
1984 end_addr2, need_subpage);
1985
1986 if (need_subpage) {
1987 subpage = subpage_init((addr & TARGET_PAGE_MASK),
1988 &p->phys_offset, IO_MEM_UNASSIGNED);
1989 subpage_register(subpage, start_addr2, end_addr2,
1990 phys_offset);
1991 }
1992 }
1993 }
33417e70 1994 }
9d42037b
FB
1995
1996 /* since each CPU stores ram addresses in its TLB cache, we must
1997 reset the modified entries */
1998 /* XXX: slow ! */
1999 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2000 tlb_flush(env, 1);
2001 }
33417e70
FB
2002}
2003
ba863458
FB
2004/* XXX: temporary until new memory mapping API */
2005uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2006{
2007 PhysPageDesc *p;
2008
2009 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2010 if (!p)
2011 return IO_MEM_UNASSIGNED;
2012 return p->phys_offset;
2013}
2014
e9a1ab19
FB
2015/* XXX: better than nothing */
2016ram_addr_t qemu_ram_alloc(unsigned int size)
2017{
2018 ram_addr_t addr;
2019 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2020 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2021 size, phys_ram_size);
2022 abort();
2023 }
2024 addr = phys_ram_alloc_offset;
2025 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2026 return addr;
2027}
2028
2029void qemu_ram_free(ram_addr_t addr)
2030{
2031}
2032
a4193c8a 2033static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2034{
67d3b957 2035#ifdef DEBUG_UNASSIGNED
6c36d3fa 2036 printf("Unassigned mem read " TARGET_FMT_lx "\n", addr);
b4f0a316
BS
2037#endif
2038#ifdef TARGET_SPARC
6c36d3fa 2039 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2040#endif
33417e70
FB
2041 return 0;
2042}
2043
a4193c8a 2044static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2045{
67d3b957 2046#ifdef DEBUG_UNASSIGNED
6c36d3fa 2047 printf("Unassigned mem write " TARGET_FMT_lx " = 0x%x\n", addr, val);
67d3b957 2048#endif
b4f0a316 2049#ifdef TARGET_SPARC
6c36d3fa 2050 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2051#endif
33417e70
FB
2052}
2053
2054static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2055 unassigned_mem_readb,
2056 unassigned_mem_readb,
2057 unassigned_mem_readb,
2058};
2059
2060static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2061 unassigned_mem_writeb,
2062 unassigned_mem_writeb,
2063 unassigned_mem_writeb,
2064};
2065
3a7d929e 2066static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2067{
3a7d929e
FB
2068 unsigned long ram_addr;
2069 int dirty_flags;
2070 ram_addr = addr - (unsigned long)phys_ram_base;
2071 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2072 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2073#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2074 tb_invalidate_phys_page_fast(ram_addr, 1);
2075 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2076#endif
3a7d929e 2077 }
c27004ec 2078 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2079#ifdef USE_KQEMU
2080 if (cpu_single_env->kqemu_enabled &&
2081 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2082 kqemu_modify_page(cpu_single_env, ram_addr);
2083#endif
f23db169
FB
2084 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2085 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2086 /* we remove the notdirty callback only if the code has been
2087 flushed */
2088 if (dirty_flags == 0xff)
6a00d601 2089 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2090}
2091
3a7d929e 2092static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2093{
3a7d929e
FB
2094 unsigned long ram_addr;
2095 int dirty_flags;
2096 ram_addr = addr - (unsigned long)phys_ram_base;
2097 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2098 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2099#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2100 tb_invalidate_phys_page_fast(ram_addr, 2);
2101 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2102#endif
3a7d929e 2103 }
c27004ec 2104 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2105#ifdef USE_KQEMU
2106 if (cpu_single_env->kqemu_enabled &&
2107 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2108 kqemu_modify_page(cpu_single_env, ram_addr);
2109#endif
f23db169
FB
2110 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2111 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2112 /* we remove the notdirty callback only if the code has been
2113 flushed */
2114 if (dirty_flags == 0xff)
6a00d601 2115 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2116}
2117
3a7d929e 2118static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2119{
3a7d929e
FB
2120 unsigned long ram_addr;
2121 int dirty_flags;
2122 ram_addr = addr - (unsigned long)phys_ram_base;
2123 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2124 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2125#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2126 tb_invalidate_phys_page_fast(ram_addr, 4);
2127 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2128#endif
3a7d929e 2129 }
c27004ec 2130 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2131#ifdef USE_KQEMU
2132 if (cpu_single_env->kqemu_enabled &&
2133 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2134 kqemu_modify_page(cpu_single_env, ram_addr);
2135#endif
f23db169
FB
2136 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2137 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2138 /* we remove the notdirty callback only if the code has been
2139 flushed */
2140 if (dirty_flags == 0xff)
6a00d601 2141 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2142}
2143
3a7d929e 2144static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2145 NULL, /* never used */
2146 NULL, /* never used */
2147 NULL, /* never used */
2148};
2149
1ccde1cb
FB
2150static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2151 notdirty_mem_writeb,
2152 notdirty_mem_writew,
2153 notdirty_mem_writel,
2154};
2155
6658ffb8
PB
2156#if defined(CONFIG_SOFTMMU)
2157/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2158 so these check for a hit then pass through to the normal out-of-line
2159 phys routines. */
2160static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2161{
2162 return ldub_phys(addr);
2163}
2164
2165static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2166{
2167 return lduw_phys(addr);
2168}
2169
2170static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2171{
2172 return ldl_phys(addr);
2173}
2174
2175/* Generate a debug exception if a watchpoint has been hit.
2176 Returns the real physical address of the access. addr will be a host
2177 address in the is_ram case. */
2178static target_ulong check_watchpoint(target_phys_addr_t addr)
2179{
2180 CPUState *env = cpu_single_env;
2181 target_ulong watch;
2182 target_ulong retaddr;
2183 int i;
2184
2185 retaddr = addr;
2186 for (i = 0; i < env->nb_watchpoints; i++) {
2187 watch = env->watchpoint[i].vaddr;
2188 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2189 if (env->watchpoint[i].is_ram)
2190 retaddr = addr - (unsigned long)phys_ram_base;
2191 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2192 cpu_single_env->watchpoint_hit = i + 1;
2193 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2194 break;
2195 }
2196 }
2197 }
2198 return retaddr;
2199}
2200
2201static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2202 uint32_t val)
2203{
2204 addr = check_watchpoint(addr);
2205 stb_phys(addr, val);
2206}
2207
2208static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2209 uint32_t val)
2210{
2211 addr = check_watchpoint(addr);
2212 stw_phys(addr, val);
2213}
2214
2215static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2216 uint32_t val)
2217{
2218 addr = check_watchpoint(addr);
2219 stl_phys(addr, val);
2220}
2221
2222static CPUReadMemoryFunc *watch_mem_read[3] = {
2223 watch_mem_readb,
2224 watch_mem_readw,
2225 watch_mem_readl,
2226};
2227
2228static CPUWriteMemoryFunc *watch_mem_write[3] = {
2229 watch_mem_writeb,
2230 watch_mem_writew,
2231 watch_mem_writel,
2232};
2233#endif
2234
db7b5426
BS
2235static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2236 unsigned int len)
2237{
2238 CPUReadMemoryFunc **mem_read;
2239 uint32_t ret;
2240 unsigned int idx;
2241
2242 idx = SUBPAGE_IDX(addr - mmio->base);
2243#if defined(DEBUG_SUBPAGE)
2244 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2245 mmio, len, addr, idx);
2246#endif
2247 mem_read = mmio->mem_read[idx];
2248 ret = (*mem_read[len])(mmio->opaque[idx], addr);
2249
2250 return ret;
2251}
2252
2253static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2254 uint32_t value, unsigned int len)
2255{
2256 CPUWriteMemoryFunc **mem_write;
2257 unsigned int idx;
2258
2259 idx = SUBPAGE_IDX(addr - mmio->base);
2260#if defined(DEBUG_SUBPAGE)
2261 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2262 mmio, len, addr, idx, value);
2263#endif
2264 mem_write = mmio->mem_write[idx];
2265 (*mem_write[len])(mmio->opaque[idx], addr, value);
2266}
2267
2268static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2269{
2270#if defined(DEBUG_SUBPAGE)
2271 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2272#endif
2273
2274 return subpage_readlen(opaque, addr, 0);
2275}
2276
2277static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2278 uint32_t value)
2279{
2280#if defined(DEBUG_SUBPAGE)
2281 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2282#endif
2283 subpage_writelen(opaque, addr, value, 0);
2284}
2285
2286static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2287{
2288#if defined(DEBUG_SUBPAGE)
2289 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2290#endif
2291
2292 return subpage_readlen(opaque, addr, 1);
2293}
2294
2295static void subpage_writew (void *opaque, target_phys_addr_t addr,
2296 uint32_t value)
2297{
2298#if defined(DEBUG_SUBPAGE)
2299 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2300#endif
2301 subpage_writelen(opaque, addr, value, 1);
2302}
2303
2304static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2305{
2306#if defined(DEBUG_SUBPAGE)
2307 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2308#endif
2309
2310 return subpage_readlen(opaque, addr, 2);
2311}
2312
2313static void subpage_writel (void *opaque,
2314 target_phys_addr_t addr, uint32_t value)
2315{
2316#if defined(DEBUG_SUBPAGE)
2317 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2318#endif
2319 subpage_writelen(opaque, addr, value, 2);
2320}
2321
2322static CPUReadMemoryFunc *subpage_read[] = {
2323 &subpage_readb,
2324 &subpage_readw,
2325 &subpage_readl,
2326};
2327
2328static CPUWriteMemoryFunc *subpage_write[] = {
2329 &subpage_writeb,
2330 &subpage_writew,
2331 &subpage_writel,
2332};
2333
2334static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2335 int memory)
2336{
2337 int idx, eidx;
2338
2339 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2340 return -1;
2341 idx = SUBPAGE_IDX(start);
2342 eidx = SUBPAGE_IDX(end);
2343#if defined(DEBUG_SUBPAGE)
2344 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2345 mmio, start, end, idx, eidx, memory);
2346#endif
2347 memory >>= IO_MEM_SHIFT;
2348 for (; idx <= eidx; idx++) {
2349 mmio->mem_read[idx] = io_mem_read[memory];
2350 mmio->mem_write[idx] = io_mem_write[memory];
2351 mmio->opaque[idx] = io_mem_opaque[memory];
2352 }
2353
2354 return 0;
2355}
2356
2357static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2358 int orig_memory)
2359{
2360 subpage_t *mmio;
2361 int subpage_memory;
2362
2363 mmio = qemu_mallocz(sizeof(subpage_t));
2364 if (mmio != NULL) {
2365 mmio->base = base;
2366 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2367#if defined(DEBUG_SUBPAGE)
2368 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2369 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2370#endif
2371 *phys = subpage_memory | IO_MEM_SUBPAGE;
2372 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2373 }
2374
2375 return mmio;
2376}
2377
33417e70
FB
2378static void io_mem_init(void)
2379{
3a7d929e 2380 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2381 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2382 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2383 io_mem_nb = 5;
2384
6658ffb8
PB
2385#if defined(CONFIG_SOFTMMU)
2386 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2387 watch_mem_write, NULL);
2388#endif
1ccde1cb 2389 /* alloc dirty bits array */
0a962c02 2390 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2391 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2392}
2393
2394/* mem_read and mem_write are arrays of functions containing the
2395 function to access byte (index 0), word (index 1) and dword (index
2396 2). All functions must be supplied. If io_index is non zero, the
2397 corresponding io zone is modified. If it is zero, a new io zone is
2398 allocated. The return value can be used with
2399 cpu_register_physical_memory(). (-1) is returned if error. */
2400int cpu_register_io_memory(int io_index,
2401 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2402 CPUWriteMemoryFunc **mem_write,
2403 void *opaque)
33417e70
FB
2404{
2405 int i;
2406
2407 if (io_index <= 0) {
b5ff1b31 2408 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2409 return -1;
2410 io_index = io_mem_nb++;
2411 } else {
2412 if (io_index >= IO_MEM_NB_ENTRIES)
2413 return -1;
2414 }
b5ff1b31 2415
33417e70
FB
2416 for(i = 0;i < 3; i++) {
2417 io_mem_read[io_index][i] = mem_read[i];
2418 io_mem_write[io_index][i] = mem_write[i];
2419 }
a4193c8a 2420 io_mem_opaque[io_index] = opaque;
33417e70
FB
2421 return io_index << IO_MEM_SHIFT;
2422}
61382a50 2423
8926b517
FB
2424CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2425{
2426 return io_mem_write[io_index >> IO_MEM_SHIFT];
2427}
2428
2429CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2430{
2431 return io_mem_read[io_index >> IO_MEM_SHIFT];
2432}
2433
13eb76e0
FB
2434/* physical memory access (slow version, mainly for debug) */
2435#if defined(CONFIG_USER_ONLY)
2e12669a 2436void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2437 int len, int is_write)
2438{
2439 int l, flags;
2440 target_ulong page;
53a5960a 2441 void * p;
13eb76e0
FB
2442
2443 while (len > 0) {
2444 page = addr & TARGET_PAGE_MASK;
2445 l = (page + TARGET_PAGE_SIZE) - addr;
2446 if (l > len)
2447 l = len;
2448 flags = page_get_flags(page);
2449 if (!(flags & PAGE_VALID))
2450 return;
2451 if (is_write) {
2452 if (!(flags & PAGE_WRITE))
2453 return;
53a5960a
PB
2454 p = lock_user(addr, len, 0);
2455 memcpy(p, buf, len);
2456 unlock_user(p, addr, len);
13eb76e0
FB
2457 } else {
2458 if (!(flags & PAGE_READ))
2459 return;
53a5960a
PB
2460 p = lock_user(addr, len, 1);
2461 memcpy(buf, p, len);
2462 unlock_user(p, addr, 0);
13eb76e0
FB
2463 }
2464 len -= l;
2465 buf += l;
2466 addr += l;
2467 }
2468}
8df1cd07 2469
13eb76e0 2470#else
2e12669a 2471void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2472 int len, int is_write)
2473{
2474 int l, io_index;
2475 uint8_t *ptr;
2476 uint32_t val;
2e12669a
FB
2477 target_phys_addr_t page;
2478 unsigned long pd;
92e873b9 2479 PhysPageDesc *p;
13eb76e0
FB
2480
2481 while (len > 0) {
2482 page = addr & TARGET_PAGE_MASK;
2483 l = (page + TARGET_PAGE_SIZE) - addr;
2484 if (l > len)
2485 l = len;
92e873b9 2486 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2487 if (!p) {
2488 pd = IO_MEM_UNASSIGNED;
2489 } else {
2490 pd = p->phys_offset;
2491 }
2492
2493 if (is_write) {
3a7d929e 2494 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2495 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2496 /* XXX: could force cpu_single_env to NULL to avoid
2497 potential bugs */
13eb76e0 2498 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2499 /* 32 bit write access */
c27004ec 2500 val = ldl_p(buf);
a4193c8a 2501 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2502 l = 4;
2503 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2504 /* 16 bit write access */
c27004ec 2505 val = lduw_p(buf);
a4193c8a 2506 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2507 l = 2;
2508 } else {
1c213d19 2509 /* 8 bit write access */
c27004ec 2510 val = ldub_p(buf);
a4193c8a 2511 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2512 l = 1;
2513 }
2514 } else {
b448f2f3
FB
2515 unsigned long addr1;
2516 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2517 /* RAM case */
b448f2f3 2518 ptr = phys_ram_base + addr1;
13eb76e0 2519 memcpy(ptr, buf, l);
3a7d929e
FB
2520 if (!cpu_physical_memory_is_dirty(addr1)) {
2521 /* invalidate code */
2522 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2523 /* set dirty bit */
f23db169
FB
2524 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2525 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2526 }
13eb76e0
FB
2527 }
2528 } else {
2a4188a3
FB
2529 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2530 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2531 /* I/O case */
2532 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2533 if (l >= 4 && ((addr & 3) == 0)) {
2534 /* 32 bit read access */
a4193c8a 2535 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2536 stl_p(buf, val);
13eb76e0
FB
2537 l = 4;
2538 } else if (l >= 2 && ((addr & 1) == 0)) {
2539 /* 16 bit read access */
a4193c8a 2540 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2541 stw_p(buf, val);
13eb76e0
FB
2542 l = 2;
2543 } else {
1c213d19 2544 /* 8 bit read access */
a4193c8a 2545 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2546 stb_p(buf, val);
13eb76e0
FB
2547 l = 1;
2548 }
2549 } else {
2550 /* RAM case */
2551 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2552 (addr & ~TARGET_PAGE_MASK);
2553 memcpy(buf, ptr, l);
2554 }
2555 }
2556 len -= l;
2557 buf += l;
2558 addr += l;
2559 }
2560}
8df1cd07 2561
d0ecd2aa
FB
2562/* used for ROM loading : can write in RAM and ROM */
2563void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2564 const uint8_t *buf, int len)
2565{
2566 int l;
2567 uint8_t *ptr;
2568 target_phys_addr_t page;
2569 unsigned long pd;
2570 PhysPageDesc *p;
2571
2572 while (len > 0) {
2573 page = addr & TARGET_PAGE_MASK;
2574 l = (page + TARGET_PAGE_SIZE) - addr;
2575 if (l > len)
2576 l = len;
2577 p = phys_page_find(page >> TARGET_PAGE_BITS);
2578 if (!p) {
2579 pd = IO_MEM_UNASSIGNED;
2580 } else {
2581 pd = p->phys_offset;
2582 }
2583
2584 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2585 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2586 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2587 /* do nothing */
2588 } else {
2589 unsigned long addr1;
2590 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2591 /* ROM/RAM case */
2592 ptr = phys_ram_base + addr1;
2593 memcpy(ptr, buf, l);
2594 }
2595 len -= l;
2596 buf += l;
2597 addr += l;
2598 }
2599}
2600
2601
8df1cd07
FB
2602/* warning: addr must be aligned */
2603uint32_t ldl_phys(target_phys_addr_t addr)
2604{
2605 int io_index;
2606 uint8_t *ptr;
2607 uint32_t val;
2608 unsigned long pd;
2609 PhysPageDesc *p;
2610
2611 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2612 if (!p) {
2613 pd = IO_MEM_UNASSIGNED;
2614 } else {
2615 pd = p->phys_offset;
2616 }
2617
2a4188a3
FB
2618 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2619 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2620 /* I/O case */
2621 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2622 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2623 } else {
2624 /* RAM case */
2625 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2626 (addr & ~TARGET_PAGE_MASK);
2627 val = ldl_p(ptr);
2628 }
2629 return val;
2630}
2631
84b7b8e7
FB
2632/* warning: addr must be aligned */
2633uint64_t ldq_phys(target_phys_addr_t addr)
2634{
2635 int io_index;
2636 uint8_t *ptr;
2637 uint64_t val;
2638 unsigned long pd;
2639 PhysPageDesc *p;
2640
2641 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2642 if (!p) {
2643 pd = IO_MEM_UNASSIGNED;
2644 } else {
2645 pd = p->phys_offset;
2646 }
2647
2a4188a3
FB
2648 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2649 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2650 /* I/O case */
2651 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2652#ifdef TARGET_WORDS_BIGENDIAN
2653 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2654 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2655#else
2656 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2657 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2658#endif
2659 } else {
2660 /* RAM case */
2661 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2662 (addr & ~TARGET_PAGE_MASK);
2663 val = ldq_p(ptr);
2664 }
2665 return val;
2666}
2667
aab33094
FB
2668/* XXX: optimize */
2669uint32_t ldub_phys(target_phys_addr_t addr)
2670{
2671 uint8_t val;
2672 cpu_physical_memory_read(addr, &val, 1);
2673 return val;
2674}
2675
2676/* XXX: optimize */
2677uint32_t lduw_phys(target_phys_addr_t addr)
2678{
2679 uint16_t val;
2680 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2681 return tswap16(val);
2682}
2683
8df1cd07
FB
2684/* warning: addr must be aligned. The ram page is not masked as dirty
2685 and the code inside is not invalidated. It is useful if the dirty
2686 bits are used to track modified PTEs */
2687void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2688{
2689 int io_index;
2690 uint8_t *ptr;
2691 unsigned long pd;
2692 PhysPageDesc *p;
2693
2694 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2695 if (!p) {
2696 pd = IO_MEM_UNASSIGNED;
2697 } else {
2698 pd = p->phys_offset;
2699 }
2700
3a7d929e 2701 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2702 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2703 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2704 } else {
2705 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2706 (addr & ~TARGET_PAGE_MASK);
2707 stl_p(ptr, val);
2708 }
2709}
2710
bc98a7ef
JM
2711void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2712{
2713 int io_index;
2714 uint8_t *ptr;
2715 unsigned long pd;
2716 PhysPageDesc *p;
2717
2718 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2719 if (!p) {
2720 pd = IO_MEM_UNASSIGNED;
2721 } else {
2722 pd = p->phys_offset;
2723 }
2724
2725 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2726 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2727#ifdef TARGET_WORDS_BIGENDIAN
2728 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2729 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2730#else
2731 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2732 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2733#endif
2734 } else {
2735 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2736 (addr & ~TARGET_PAGE_MASK);
2737 stq_p(ptr, val);
2738 }
2739}
2740
8df1cd07 2741/* warning: addr must be aligned */
8df1cd07
FB
2742void stl_phys(target_phys_addr_t addr, uint32_t val)
2743{
2744 int io_index;
2745 uint8_t *ptr;
2746 unsigned long pd;
2747 PhysPageDesc *p;
2748
2749 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2750 if (!p) {
2751 pd = IO_MEM_UNASSIGNED;
2752 } else {
2753 pd = p->phys_offset;
2754 }
2755
3a7d929e 2756 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2757 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2758 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2759 } else {
2760 unsigned long addr1;
2761 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2762 /* RAM case */
2763 ptr = phys_ram_base + addr1;
2764 stl_p(ptr, val);
3a7d929e
FB
2765 if (!cpu_physical_memory_is_dirty(addr1)) {
2766 /* invalidate code */
2767 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2768 /* set dirty bit */
f23db169
FB
2769 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2770 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2771 }
8df1cd07
FB
2772 }
2773}
2774
aab33094
FB
2775/* XXX: optimize */
2776void stb_phys(target_phys_addr_t addr, uint32_t val)
2777{
2778 uint8_t v = val;
2779 cpu_physical_memory_write(addr, &v, 1);
2780}
2781
2782/* XXX: optimize */
2783void stw_phys(target_phys_addr_t addr, uint32_t val)
2784{
2785 uint16_t v = tswap16(val);
2786 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2787}
2788
2789/* XXX: optimize */
2790void stq_phys(target_phys_addr_t addr, uint64_t val)
2791{
2792 val = tswap64(val);
2793 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2794}
2795
13eb76e0
FB
2796#endif
2797
2798/* virtual memory access for debug */
b448f2f3
FB
2799int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2800 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2801{
2802 int l;
9b3c35e0
JM
2803 target_phys_addr_t phys_addr;
2804 target_ulong page;
13eb76e0
FB
2805
2806 while (len > 0) {
2807 page = addr & TARGET_PAGE_MASK;
2808 phys_addr = cpu_get_phys_page_debug(env, page);
2809 /* if no physical page mapped, return an error */
2810 if (phys_addr == -1)
2811 return -1;
2812 l = (page + TARGET_PAGE_SIZE) - addr;
2813 if (l > len)
2814 l = len;
b448f2f3
FB
2815 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2816 buf, l, is_write);
13eb76e0
FB
2817 len -= l;
2818 buf += l;
2819 addr += l;
2820 }
2821 return 0;
2822}
2823
e3db7226
FB
2824void dump_exec_info(FILE *f,
2825 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2826{
2827 int i, target_code_size, max_target_code_size;
2828 int direct_jmp_count, direct_jmp2_count, cross_page;
2829 TranslationBlock *tb;
2830
2831 target_code_size = 0;
2832 max_target_code_size = 0;
2833 cross_page = 0;
2834 direct_jmp_count = 0;
2835 direct_jmp2_count = 0;
2836 for(i = 0; i < nb_tbs; i++) {
2837 tb = &tbs[i];
2838 target_code_size += tb->size;
2839 if (tb->size > max_target_code_size)
2840 max_target_code_size = tb->size;
2841 if (tb->page_addr[1] != -1)
2842 cross_page++;
2843 if (tb->tb_next_offset[0] != 0xffff) {
2844 direct_jmp_count++;
2845 if (tb->tb_next_offset[1] != 0xffff) {
2846 direct_jmp2_count++;
2847 }
2848 }
2849 }
2850 /* XXX: avoid using doubles ? */
2851 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2852 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2853 nb_tbs ? target_code_size / nb_tbs : 0,
2854 max_target_code_size);
2855 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2856 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2857 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2858 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2859 cross_page,
2860 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2861 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2862 direct_jmp_count,
2863 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2864 direct_jmp2_count,
2865 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2866 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2867 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2868 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2869}
2870
61382a50
FB
2871#if !defined(CONFIG_USER_ONLY)
2872
2873#define MMUSUFFIX _cmmu
2874#define GETPC() NULL
2875#define env cpu_single_env
b769d8fe 2876#define SOFTMMU_CODE_ACCESS
61382a50
FB
2877
2878#define SHIFT 0
2879#include "softmmu_template.h"
2880
2881#define SHIFT 1
2882#include "softmmu_template.h"
2883
2884#define SHIFT 2
2885#include "softmmu_template.h"
2886
2887#define SHIFT 3
2888#include "softmmu_template.h"
2889
2890#undef env
2891
2892#endif