]> git.proxmox.com Git - qemu.git/blame - exec.c
DR6 single step exception status bit, by Juergen Keil.
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
53a5960a
PB
37#if defined(CONFIG_USER_ONLY)
38#include <qemu.h>
39#endif
54936004 40
fd6ce8f6 41//#define DEBUG_TB_INVALIDATE
66e85a21 42//#define DEBUG_FLUSH
9fa3e853 43//#define DEBUG_TLB
67d3b957 44//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
45
46/* make various TB consistency checks */
47//#define DEBUG_TB_CHECK
98857888 48//#define DEBUG_TLB_CHECK
fd6ce8f6 49
1196be37 50//#define DEBUG_IOPORT
db7b5426 51//#define DEBUG_SUBPAGE
1196be37 52
99773bd4
PB
53#if !defined(CONFIG_USER_ONLY)
54/* TB consistency checks only implemented for usermode emulation. */
55#undef DEBUG_TB_CHECK
56#endif
57
fd6ce8f6
FB
58/* threshold to flush the translated code buffer */
59#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
60
9fa3e853
FB
61#define SMC_BITMAP_USE_THRESHOLD 10
62
63#define MMAP_AREA_START 0x00000000
64#define MMAP_AREA_END 0xa8000000
fd6ce8f6 65
108c49b8
FB
66#if defined(TARGET_SPARC64)
67#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
68#elif defined(TARGET_SPARC)
69#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
70#elif defined(TARGET_ALPHA)
71#define TARGET_PHYS_ADDR_SPACE_BITS 42
72#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
73#elif defined(TARGET_PPC64)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#else
76/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77#define TARGET_PHYS_ADDR_SPACE_BITS 32
78#endif
79
fd6ce8f6 80TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 81TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 82int nb_tbs;
eb51d102
FB
83/* any access to the tbs or the page table must use this lock */
84spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 85
b8076a74 86uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
87uint8_t *code_gen_ptr;
88
9fa3e853
FB
89int phys_ram_size;
90int phys_ram_fd;
91uint8_t *phys_ram_base;
1ccde1cb 92uint8_t *phys_ram_dirty;
e9a1ab19 93static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 94
6a00d601
FB
95CPUState *first_cpu;
96/* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
98CPUState *cpu_single_env;
99
54936004 100typedef struct PageDesc {
92e873b9 101 /* list of TBs intersecting this ram page */
fd6ce8f6 102 TranslationBlock *first_tb;
9fa3e853
FB
103 /* in order to optimize self modifying code, we count the number
104 of lookups we do to a given page to use a bitmap */
105 unsigned int code_write_count;
106 uint8_t *code_bitmap;
107#if defined(CONFIG_USER_ONLY)
108 unsigned long flags;
109#endif
54936004
FB
110} PageDesc;
111
92e873b9
FB
112typedef struct PhysPageDesc {
113 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 114 uint32_t phys_offset;
92e873b9
FB
115} PhysPageDesc;
116
54936004 117#define L2_BITS 10
bedb69ea
JM
118#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119/* XXX: this is a temporary hack for alpha target.
120 * In the future, this is to be replaced by a multi-level table
121 * to actually be able to handle the complete 64 bits address space.
122 */
123#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
124#else
54936004 125#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 126#endif
54936004
FB
127
128#define L1_SIZE (1 << L1_BITS)
129#define L2_SIZE (1 << L2_BITS)
130
33417e70 131static void io_mem_init(void);
fd6ce8f6 132
83fb7adf
FB
133unsigned long qemu_real_host_page_size;
134unsigned long qemu_host_page_bits;
135unsigned long qemu_host_page_size;
136unsigned long qemu_host_page_mask;
54936004 137
92e873b9 138/* XXX: for system emulation, it could just be an array */
54936004 139static PageDesc *l1_map[L1_SIZE];
0a962c02 140PhysPageDesc **l1_phys_map;
54936004 141
33417e70 142/* io memory support */
33417e70
FB
143CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
144CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 145void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 146static int io_mem_nb;
6658ffb8
PB
147#if defined(CONFIG_SOFTMMU)
148static int io_mem_watch;
149#endif
33417e70 150
34865134
FB
151/* log support */
152char *logfilename = "/tmp/qemu.log";
153FILE *logfile;
154int loglevel;
155
e3db7226
FB
156/* statistics */
157static int tlb_flush_count;
158static int tb_flush_count;
159static int tb_phys_invalidate_count;
160
db7b5426
BS
161#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
162typedef struct subpage_t {
163 target_phys_addr_t base;
164 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
165 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
166 void *opaque[TARGET_PAGE_SIZE];
167} subpage_t;
168
b346ff46 169static void page_init(void)
54936004 170{
83fb7adf 171 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 172 TARGET_PAGE_SIZE */
67b915a5 173#ifdef _WIN32
d5a8f07c
FB
174 {
175 SYSTEM_INFO system_info;
176 DWORD old_protect;
177
178 GetSystemInfo(&system_info);
179 qemu_real_host_page_size = system_info.dwPageSize;
180
181 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
182 PAGE_EXECUTE_READWRITE, &old_protect);
183 }
67b915a5 184#else
83fb7adf 185 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
186 {
187 unsigned long start, end;
188
189 start = (unsigned long)code_gen_buffer;
190 start &= ~(qemu_real_host_page_size - 1);
191
192 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
193 end += qemu_real_host_page_size - 1;
194 end &= ~(qemu_real_host_page_size - 1);
195
196 mprotect((void *)start, end - start,
197 PROT_READ | PROT_WRITE | PROT_EXEC);
198 }
67b915a5 199#endif
d5a8f07c 200
83fb7adf
FB
201 if (qemu_host_page_size == 0)
202 qemu_host_page_size = qemu_real_host_page_size;
203 if (qemu_host_page_size < TARGET_PAGE_SIZE)
204 qemu_host_page_size = TARGET_PAGE_SIZE;
205 qemu_host_page_bits = 0;
206 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
207 qemu_host_page_bits++;
208 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
209 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
210 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
211}
212
fd6ce8f6 213static inline PageDesc *page_find_alloc(unsigned int index)
54936004 214{
54936004
FB
215 PageDesc **lp, *p;
216
54936004
FB
217 lp = &l1_map[index >> L2_BITS];
218 p = *lp;
219 if (!p) {
220 /* allocate if not found */
59817ccb 221 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 222 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
223 *lp = p;
224 }
225 return p + (index & (L2_SIZE - 1));
226}
227
fd6ce8f6 228static inline PageDesc *page_find(unsigned int index)
54936004 229{
54936004
FB
230 PageDesc *p;
231
54936004
FB
232 p = l1_map[index >> L2_BITS];
233 if (!p)
234 return 0;
fd6ce8f6
FB
235 return p + (index & (L2_SIZE - 1));
236}
237
108c49b8 238static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 239{
108c49b8 240 void **lp, **p;
e3f4e2a4 241 PhysPageDesc *pd;
92e873b9 242
108c49b8
FB
243 p = (void **)l1_phys_map;
244#if TARGET_PHYS_ADDR_SPACE_BITS > 32
245
246#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
247#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
248#endif
249 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
250 p = *lp;
251 if (!p) {
252 /* allocate if not found */
108c49b8
FB
253 if (!alloc)
254 return NULL;
255 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
256 memset(p, 0, sizeof(void *) * L1_SIZE);
257 *lp = p;
258 }
259#endif
260 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
261 pd = *lp;
262 if (!pd) {
263 int i;
108c49b8
FB
264 /* allocate if not found */
265 if (!alloc)
266 return NULL;
e3f4e2a4
PB
267 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
268 *lp = pd;
269 for (i = 0; i < L2_SIZE; i++)
270 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 271 }
e3f4e2a4 272 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
273}
274
108c49b8 275static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 276{
108c49b8 277 return phys_page_find_alloc(index, 0);
92e873b9
FB
278}
279
9fa3e853 280#if !defined(CONFIG_USER_ONLY)
6a00d601 281static void tlb_protect_code(ram_addr_t ram_addr);
3a7d929e
FB
282static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
283 target_ulong vaddr);
9fa3e853 284#endif
fd6ce8f6 285
6a00d601 286void cpu_exec_init(CPUState *env)
fd6ce8f6 287{
6a00d601
FB
288 CPUState **penv;
289 int cpu_index;
290
fd6ce8f6
FB
291 if (!code_gen_ptr) {
292 code_gen_ptr = code_gen_buffer;
b346ff46 293 page_init();
33417e70 294 io_mem_init();
fd6ce8f6 295 }
6a00d601
FB
296 env->next_cpu = NULL;
297 penv = &first_cpu;
298 cpu_index = 0;
299 while (*penv != NULL) {
300 penv = (CPUState **)&(*penv)->next_cpu;
301 cpu_index++;
302 }
303 env->cpu_index = cpu_index;
6658ffb8 304 env->nb_watchpoints = 0;
6a00d601 305 *penv = env;
fd6ce8f6
FB
306}
307
9fa3e853
FB
308static inline void invalidate_page_bitmap(PageDesc *p)
309{
310 if (p->code_bitmap) {
59817ccb 311 qemu_free(p->code_bitmap);
9fa3e853
FB
312 p->code_bitmap = NULL;
313 }
314 p->code_write_count = 0;
315}
316
fd6ce8f6
FB
317/* set to NULL all the 'first_tb' fields in all PageDescs */
318static void page_flush_tb(void)
319{
320 int i, j;
321 PageDesc *p;
322
323 for(i = 0; i < L1_SIZE; i++) {
324 p = l1_map[i];
325 if (p) {
9fa3e853
FB
326 for(j = 0; j < L2_SIZE; j++) {
327 p->first_tb = NULL;
328 invalidate_page_bitmap(p);
329 p++;
330 }
fd6ce8f6
FB
331 }
332 }
333}
334
335/* flush all the translation blocks */
d4e8164f 336/* XXX: tb_flush is currently not thread safe */
6a00d601 337void tb_flush(CPUState *env1)
fd6ce8f6 338{
6a00d601 339 CPUState *env;
0124311e 340#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
341 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
342 code_gen_ptr - code_gen_buffer,
343 nb_tbs,
0124311e 344 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
345#endif
346 nb_tbs = 0;
6a00d601
FB
347
348 for(env = first_cpu; env != NULL; env = env->next_cpu) {
349 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
350 }
9fa3e853 351
8a8a608f 352 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 353 page_flush_tb();
9fa3e853 354
fd6ce8f6 355 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
356 /* XXX: flush processor icache at this point if cache flush is
357 expensive */
e3db7226 358 tb_flush_count++;
fd6ce8f6
FB
359}
360
361#ifdef DEBUG_TB_CHECK
362
bc98a7ef 363static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
364{
365 TranslationBlock *tb;
366 int i;
367 address &= TARGET_PAGE_MASK;
99773bd4
PB
368 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
369 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
370 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
371 address >= tb->pc + tb->size)) {
372 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 373 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
374 }
375 }
376 }
377}
378
379/* verify that all the pages have correct rights for code */
380static void tb_page_check(void)
381{
382 TranslationBlock *tb;
383 int i, flags1, flags2;
384
99773bd4
PB
385 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
386 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
387 flags1 = page_get_flags(tb->pc);
388 flags2 = page_get_flags(tb->pc + tb->size - 1);
389 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
390 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 391 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
392 }
393 }
394 }
395}
396
d4e8164f
FB
397void tb_jmp_check(TranslationBlock *tb)
398{
399 TranslationBlock *tb1;
400 unsigned int n1;
401
402 /* suppress any remaining jumps to this TB */
403 tb1 = tb->jmp_first;
404 for(;;) {
405 n1 = (long)tb1 & 3;
406 tb1 = (TranslationBlock *)((long)tb1 & ~3);
407 if (n1 == 2)
408 break;
409 tb1 = tb1->jmp_next[n1];
410 }
411 /* check end of list */
412 if (tb1 != tb) {
413 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
414 }
415}
416
fd6ce8f6
FB
417#endif
418
419/* invalidate one TB */
420static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
421 int next_offset)
422{
423 TranslationBlock *tb1;
424 for(;;) {
425 tb1 = *ptb;
426 if (tb1 == tb) {
427 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
428 break;
429 }
430 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
431 }
432}
433
9fa3e853
FB
434static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
435{
436 TranslationBlock *tb1;
437 unsigned int n1;
438
439 for(;;) {
440 tb1 = *ptb;
441 n1 = (long)tb1 & 3;
442 tb1 = (TranslationBlock *)((long)tb1 & ~3);
443 if (tb1 == tb) {
444 *ptb = tb1->page_next[n1];
445 break;
446 }
447 ptb = &tb1->page_next[n1];
448 }
449}
450
d4e8164f
FB
451static inline void tb_jmp_remove(TranslationBlock *tb, int n)
452{
453 TranslationBlock *tb1, **ptb;
454 unsigned int n1;
455
456 ptb = &tb->jmp_next[n];
457 tb1 = *ptb;
458 if (tb1) {
459 /* find tb(n) in circular list */
460 for(;;) {
461 tb1 = *ptb;
462 n1 = (long)tb1 & 3;
463 tb1 = (TranslationBlock *)((long)tb1 & ~3);
464 if (n1 == n && tb1 == tb)
465 break;
466 if (n1 == 2) {
467 ptb = &tb1->jmp_first;
468 } else {
469 ptb = &tb1->jmp_next[n1];
470 }
471 }
472 /* now we can suppress tb(n) from the list */
473 *ptb = tb->jmp_next[n];
474
475 tb->jmp_next[n] = NULL;
476 }
477}
478
479/* reset the jump entry 'n' of a TB so that it is not chained to
480 another TB */
481static inline void tb_reset_jump(TranslationBlock *tb, int n)
482{
483 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
484}
485
8a40a180 486static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 487{
6a00d601 488 CPUState *env;
8a40a180 489 PageDesc *p;
d4e8164f 490 unsigned int h, n1;
8a40a180
FB
491 target_ulong phys_pc;
492 TranslationBlock *tb1, *tb2;
d4e8164f 493
8a40a180
FB
494 /* remove the TB from the hash list */
495 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
496 h = tb_phys_hash_func(phys_pc);
497 tb_remove(&tb_phys_hash[h], tb,
498 offsetof(TranslationBlock, phys_hash_next));
499
500 /* remove the TB from the page list */
501 if (tb->page_addr[0] != page_addr) {
502 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
503 tb_page_remove(&p->first_tb, tb);
504 invalidate_page_bitmap(p);
505 }
506 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
507 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
508 tb_page_remove(&p->first_tb, tb);
509 invalidate_page_bitmap(p);
510 }
511
36bdbe54 512 tb_invalidated_flag = 1;
59817ccb 513
fd6ce8f6 514 /* remove the TB from the hash list */
8a40a180 515 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
516 for(env = first_cpu; env != NULL; env = env->next_cpu) {
517 if (env->tb_jmp_cache[h] == tb)
518 env->tb_jmp_cache[h] = NULL;
519 }
d4e8164f
FB
520
521 /* suppress this TB from the two jump lists */
522 tb_jmp_remove(tb, 0);
523 tb_jmp_remove(tb, 1);
524
525 /* suppress any remaining jumps to this TB */
526 tb1 = tb->jmp_first;
527 for(;;) {
528 n1 = (long)tb1 & 3;
529 if (n1 == 2)
530 break;
531 tb1 = (TranslationBlock *)((long)tb1 & ~3);
532 tb2 = tb1->jmp_next[n1];
533 tb_reset_jump(tb1, n1);
534 tb1->jmp_next[n1] = NULL;
535 tb1 = tb2;
536 }
537 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 538
e3db7226 539 tb_phys_invalidate_count++;
9fa3e853
FB
540}
541
542static inline void set_bits(uint8_t *tab, int start, int len)
543{
544 int end, mask, end1;
545
546 end = start + len;
547 tab += start >> 3;
548 mask = 0xff << (start & 7);
549 if ((start & ~7) == (end & ~7)) {
550 if (start < end) {
551 mask &= ~(0xff << (end & 7));
552 *tab |= mask;
553 }
554 } else {
555 *tab++ |= mask;
556 start = (start + 8) & ~7;
557 end1 = end & ~7;
558 while (start < end1) {
559 *tab++ = 0xff;
560 start += 8;
561 }
562 if (start < end) {
563 mask = ~(0xff << (end & 7));
564 *tab |= mask;
565 }
566 }
567}
568
569static void build_page_bitmap(PageDesc *p)
570{
571 int n, tb_start, tb_end;
572 TranslationBlock *tb;
573
59817ccb 574 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
575 if (!p->code_bitmap)
576 return;
577 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
578
579 tb = p->first_tb;
580 while (tb != NULL) {
581 n = (long)tb & 3;
582 tb = (TranslationBlock *)((long)tb & ~3);
583 /* NOTE: this is subtle as a TB may span two physical pages */
584 if (n == 0) {
585 /* NOTE: tb_end may be after the end of the page, but
586 it is not a problem */
587 tb_start = tb->pc & ~TARGET_PAGE_MASK;
588 tb_end = tb_start + tb->size;
589 if (tb_end > TARGET_PAGE_SIZE)
590 tb_end = TARGET_PAGE_SIZE;
591 } else {
592 tb_start = 0;
593 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
594 }
595 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
596 tb = tb->page_next[n];
597 }
598}
599
d720b93d
FB
600#ifdef TARGET_HAS_PRECISE_SMC
601
602static void tb_gen_code(CPUState *env,
603 target_ulong pc, target_ulong cs_base, int flags,
604 int cflags)
605{
606 TranslationBlock *tb;
607 uint8_t *tc_ptr;
608 target_ulong phys_pc, phys_page2, virt_page2;
609 int code_gen_size;
610
c27004ec
FB
611 phys_pc = get_phys_addr_code(env, pc);
612 tb = tb_alloc(pc);
d720b93d
FB
613 if (!tb) {
614 /* flush must be done */
615 tb_flush(env);
616 /* cannot fail at this point */
c27004ec 617 tb = tb_alloc(pc);
d720b93d
FB
618 }
619 tc_ptr = code_gen_ptr;
620 tb->tc_ptr = tc_ptr;
621 tb->cs_base = cs_base;
622 tb->flags = flags;
623 tb->cflags = cflags;
624 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
625 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
626
627 /* check next page if needed */
c27004ec 628 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 629 phys_page2 = -1;
c27004ec 630 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
631 phys_page2 = get_phys_addr_code(env, virt_page2);
632 }
633 tb_link_phys(tb, phys_pc, phys_page2);
634}
635#endif
636
9fa3e853
FB
637/* invalidate all TBs which intersect with the target physical page
638 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
639 the same physical page. 'is_cpu_write_access' should be true if called
640 from a real cpu write access: the virtual CPU will exit the current
641 TB if code is modified inside this TB. */
642void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
643 int is_cpu_write_access)
644{
645 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 646 CPUState *env = cpu_single_env;
9fa3e853 647 PageDesc *p;
ea1c1802 648 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 649 target_ulong tb_start, tb_end;
d720b93d 650 target_ulong current_pc, current_cs_base;
9fa3e853
FB
651
652 p = page_find(start >> TARGET_PAGE_BITS);
653 if (!p)
654 return;
655 if (!p->code_bitmap &&
d720b93d
FB
656 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
657 is_cpu_write_access) {
9fa3e853
FB
658 /* build code bitmap */
659 build_page_bitmap(p);
660 }
661
662 /* we remove all the TBs in the range [start, end[ */
663 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
664 current_tb_not_found = is_cpu_write_access;
665 current_tb_modified = 0;
666 current_tb = NULL; /* avoid warning */
667 current_pc = 0; /* avoid warning */
668 current_cs_base = 0; /* avoid warning */
669 current_flags = 0; /* avoid warning */
9fa3e853
FB
670 tb = p->first_tb;
671 while (tb != NULL) {
672 n = (long)tb & 3;
673 tb = (TranslationBlock *)((long)tb & ~3);
674 tb_next = tb->page_next[n];
675 /* NOTE: this is subtle as a TB may span two physical pages */
676 if (n == 0) {
677 /* NOTE: tb_end may be after the end of the page, but
678 it is not a problem */
679 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
680 tb_end = tb_start + tb->size;
681 } else {
682 tb_start = tb->page_addr[1];
683 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
684 }
685 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
686#ifdef TARGET_HAS_PRECISE_SMC
687 if (current_tb_not_found) {
688 current_tb_not_found = 0;
689 current_tb = NULL;
690 if (env->mem_write_pc) {
691 /* now we have a real cpu fault */
692 current_tb = tb_find_pc(env->mem_write_pc);
693 }
694 }
695 if (current_tb == tb &&
696 !(current_tb->cflags & CF_SINGLE_INSN)) {
697 /* If we are modifying the current TB, we must stop
698 its execution. We could be more precise by checking
699 that the modification is after the current PC, but it
700 would require a specialized function to partially
701 restore the CPU state */
702
703 current_tb_modified = 1;
704 cpu_restore_state(current_tb, env,
705 env->mem_write_pc, NULL);
706#if defined(TARGET_I386)
707 current_flags = env->hflags;
708 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
709 current_cs_base = (target_ulong)env->segs[R_CS].base;
710 current_pc = current_cs_base + env->eip;
711#else
712#error unsupported CPU
713#endif
714 }
715#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
716 /* we need to do that to handle the case where a signal
717 occurs while doing tb_phys_invalidate() */
718 saved_tb = NULL;
719 if (env) {
720 saved_tb = env->current_tb;
721 env->current_tb = NULL;
722 }
9fa3e853 723 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
724 if (env) {
725 env->current_tb = saved_tb;
726 if (env->interrupt_request && env->current_tb)
727 cpu_interrupt(env, env->interrupt_request);
728 }
9fa3e853
FB
729 }
730 tb = tb_next;
731 }
732#if !defined(CONFIG_USER_ONLY)
733 /* if no code remaining, no need to continue to use slow writes */
734 if (!p->first_tb) {
735 invalidate_page_bitmap(p);
d720b93d
FB
736 if (is_cpu_write_access) {
737 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
738 }
739 }
740#endif
741#ifdef TARGET_HAS_PRECISE_SMC
742 if (current_tb_modified) {
743 /* we generate a block containing just the instruction
744 modifying the memory. It will ensure that it cannot modify
745 itself */
ea1c1802 746 env->current_tb = NULL;
d720b93d
FB
747 tb_gen_code(env, current_pc, current_cs_base, current_flags,
748 CF_SINGLE_INSN);
749 cpu_resume_from_signal(env, NULL);
9fa3e853 750 }
fd6ce8f6 751#endif
9fa3e853 752}
fd6ce8f6 753
9fa3e853 754/* len must be <= 8 and start must be a multiple of len */
d720b93d 755static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
756{
757 PageDesc *p;
758 int offset, b;
59817ccb 759#if 0
a4193c8a
FB
760 if (1) {
761 if (loglevel) {
762 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
763 cpu_single_env->mem_write_vaddr, len,
764 cpu_single_env->eip,
765 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
766 }
59817ccb
FB
767 }
768#endif
9fa3e853
FB
769 p = page_find(start >> TARGET_PAGE_BITS);
770 if (!p)
771 return;
772 if (p->code_bitmap) {
773 offset = start & ~TARGET_PAGE_MASK;
774 b = p->code_bitmap[offset >> 3] >> (offset & 7);
775 if (b & ((1 << len) - 1))
776 goto do_invalidate;
777 } else {
778 do_invalidate:
d720b93d 779 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
780 }
781}
782
9fa3e853 783#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
784static void tb_invalidate_phys_page(target_ulong addr,
785 unsigned long pc, void *puc)
9fa3e853 786{
d720b93d
FB
787 int n, current_flags, current_tb_modified;
788 target_ulong current_pc, current_cs_base;
9fa3e853 789 PageDesc *p;
d720b93d
FB
790 TranslationBlock *tb, *current_tb;
791#ifdef TARGET_HAS_PRECISE_SMC
792 CPUState *env = cpu_single_env;
793#endif
9fa3e853
FB
794
795 addr &= TARGET_PAGE_MASK;
796 p = page_find(addr >> TARGET_PAGE_BITS);
797 if (!p)
798 return;
799 tb = p->first_tb;
d720b93d
FB
800 current_tb_modified = 0;
801 current_tb = NULL;
802 current_pc = 0; /* avoid warning */
803 current_cs_base = 0; /* avoid warning */
804 current_flags = 0; /* avoid warning */
805#ifdef TARGET_HAS_PRECISE_SMC
806 if (tb && pc != 0) {
807 current_tb = tb_find_pc(pc);
808 }
809#endif
9fa3e853
FB
810 while (tb != NULL) {
811 n = (long)tb & 3;
812 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
813#ifdef TARGET_HAS_PRECISE_SMC
814 if (current_tb == tb &&
815 !(current_tb->cflags & CF_SINGLE_INSN)) {
816 /* If we are modifying the current TB, we must stop
817 its execution. We could be more precise by checking
818 that the modification is after the current PC, but it
819 would require a specialized function to partially
820 restore the CPU state */
821
822 current_tb_modified = 1;
823 cpu_restore_state(current_tb, env, pc, puc);
824#if defined(TARGET_I386)
825 current_flags = env->hflags;
826 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
827 current_cs_base = (target_ulong)env->segs[R_CS].base;
828 current_pc = current_cs_base + env->eip;
829#else
830#error unsupported CPU
831#endif
832 }
833#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
834 tb_phys_invalidate(tb, addr);
835 tb = tb->page_next[n];
836 }
fd6ce8f6 837 p->first_tb = NULL;
d720b93d
FB
838#ifdef TARGET_HAS_PRECISE_SMC
839 if (current_tb_modified) {
840 /* we generate a block containing just the instruction
841 modifying the memory. It will ensure that it cannot modify
842 itself */
ea1c1802 843 env->current_tb = NULL;
d720b93d
FB
844 tb_gen_code(env, current_pc, current_cs_base, current_flags,
845 CF_SINGLE_INSN);
846 cpu_resume_from_signal(env, puc);
847 }
848#endif
fd6ce8f6 849}
9fa3e853 850#endif
fd6ce8f6
FB
851
852/* add the tb in the target page and protect it if necessary */
9fa3e853 853static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 854 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
855{
856 PageDesc *p;
9fa3e853
FB
857 TranslationBlock *last_first_tb;
858
859 tb->page_addr[n] = page_addr;
3a7d929e 860 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
861 tb->page_next[n] = p->first_tb;
862 last_first_tb = p->first_tb;
863 p->first_tb = (TranslationBlock *)((long)tb | n);
864 invalidate_page_bitmap(p);
fd6ce8f6 865
107db443 866#if defined(TARGET_HAS_SMC) || 1
d720b93d 867
9fa3e853 868#if defined(CONFIG_USER_ONLY)
fd6ce8f6 869 if (p->flags & PAGE_WRITE) {
53a5960a
PB
870 target_ulong addr;
871 PageDesc *p2;
9fa3e853
FB
872 int prot;
873
fd6ce8f6
FB
874 /* force the host page as non writable (writes will have a
875 page fault + mprotect overhead) */
53a5960a 876 page_addr &= qemu_host_page_mask;
fd6ce8f6 877 prot = 0;
53a5960a
PB
878 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
879 addr += TARGET_PAGE_SIZE) {
880
881 p2 = page_find (addr >> TARGET_PAGE_BITS);
882 if (!p2)
883 continue;
884 prot |= p2->flags;
885 p2->flags &= ~PAGE_WRITE;
886 page_get_flags(addr);
887 }
888 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
889 (prot & PAGE_BITS) & ~PAGE_WRITE);
890#ifdef DEBUG_TB_INVALIDATE
891 printf("protecting code page: 0x%08lx\n",
53a5960a 892 page_addr);
fd6ce8f6 893#endif
fd6ce8f6 894 }
9fa3e853
FB
895#else
896 /* if some code is already present, then the pages are already
897 protected. So we handle the case where only the first TB is
898 allocated in a physical page */
899 if (!last_first_tb) {
6a00d601 900 tlb_protect_code(page_addr);
9fa3e853
FB
901 }
902#endif
d720b93d
FB
903
904#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
905}
906
907/* Allocate a new translation block. Flush the translation buffer if
908 too many translation blocks or too much generated code. */
c27004ec 909TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
910{
911 TranslationBlock *tb;
fd6ce8f6
FB
912
913 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
914 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 915 return NULL;
fd6ce8f6
FB
916 tb = &tbs[nb_tbs++];
917 tb->pc = pc;
b448f2f3 918 tb->cflags = 0;
d4e8164f
FB
919 return tb;
920}
921
9fa3e853
FB
922/* add a new TB and link it to the physical page tables. phys_page2 is
923 (-1) to indicate that only one page contains the TB. */
924void tb_link_phys(TranslationBlock *tb,
925 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 926{
9fa3e853
FB
927 unsigned int h;
928 TranslationBlock **ptb;
929
930 /* add in the physical hash table */
931 h = tb_phys_hash_func(phys_pc);
932 ptb = &tb_phys_hash[h];
933 tb->phys_hash_next = *ptb;
934 *ptb = tb;
fd6ce8f6
FB
935
936 /* add in the page list */
9fa3e853
FB
937 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
938 if (phys_page2 != -1)
939 tb_alloc_page(tb, 1, phys_page2);
940 else
941 tb->page_addr[1] = -1;
9fa3e853 942
d4e8164f
FB
943 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
944 tb->jmp_next[0] = NULL;
945 tb->jmp_next[1] = NULL;
b448f2f3
FB
946#ifdef USE_CODE_COPY
947 tb->cflags &= ~CF_FP_USED;
948 if (tb->cflags & CF_TB_FP_USED)
949 tb->cflags |= CF_FP_USED;
950#endif
d4e8164f
FB
951
952 /* init original jump addresses */
953 if (tb->tb_next_offset[0] != 0xffff)
954 tb_reset_jump(tb, 0);
955 if (tb->tb_next_offset[1] != 0xffff)
956 tb_reset_jump(tb, 1);
8a40a180
FB
957
958#ifdef DEBUG_TB_CHECK
959 tb_page_check();
960#endif
fd6ce8f6
FB
961}
962
9fa3e853
FB
963/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
964 tb[1].tc_ptr. Return NULL if not found */
965TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 966{
9fa3e853
FB
967 int m_min, m_max, m;
968 unsigned long v;
969 TranslationBlock *tb;
a513fe19
FB
970
971 if (nb_tbs <= 0)
972 return NULL;
973 if (tc_ptr < (unsigned long)code_gen_buffer ||
974 tc_ptr >= (unsigned long)code_gen_ptr)
975 return NULL;
976 /* binary search (cf Knuth) */
977 m_min = 0;
978 m_max = nb_tbs - 1;
979 while (m_min <= m_max) {
980 m = (m_min + m_max) >> 1;
981 tb = &tbs[m];
982 v = (unsigned long)tb->tc_ptr;
983 if (v == tc_ptr)
984 return tb;
985 else if (tc_ptr < v) {
986 m_max = m - 1;
987 } else {
988 m_min = m + 1;
989 }
990 }
991 return &tbs[m_max];
992}
7501267e 993
ea041c0e
FB
994static void tb_reset_jump_recursive(TranslationBlock *tb);
995
996static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
997{
998 TranslationBlock *tb1, *tb_next, **ptb;
999 unsigned int n1;
1000
1001 tb1 = tb->jmp_next[n];
1002 if (tb1 != NULL) {
1003 /* find head of list */
1004 for(;;) {
1005 n1 = (long)tb1 & 3;
1006 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1007 if (n1 == 2)
1008 break;
1009 tb1 = tb1->jmp_next[n1];
1010 }
1011 /* we are now sure now that tb jumps to tb1 */
1012 tb_next = tb1;
1013
1014 /* remove tb from the jmp_first list */
1015 ptb = &tb_next->jmp_first;
1016 for(;;) {
1017 tb1 = *ptb;
1018 n1 = (long)tb1 & 3;
1019 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1020 if (n1 == n && tb1 == tb)
1021 break;
1022 ptb = &tb1->jmp_next[n1];
1023 }
1024 *ptb = tb->jmp_next[n];
1025 tb->jmp_next[n] = NULL;
1026
1027 /* suppress the jump to next tb in generated code */
1028 tb_reset_jump(tb, n);
1029
0124311e 1030 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1031 tb_reset_jump_recursive(tb_next);
1032 }
1033}
1034
1035static void tb_reset_jump_recursive(TranslationBlock *tb)
1036{
1037 tb_reset_jump_recursive2(tb, 0);
1038 tb_reset_jump_recursive2(tb, 1);
1039}
1040
1fddef4b 1041#if defined(TARGET_HAS_ICE)
d720b93d
FB
1042static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1043{
9b3c35e0
JM
1044 target_phys_addr_t addr;
1045 target_ulong pd;
c2f07f81
PB
1046 ram_addr_t ram_addr;
1047 PhysPageDesc *p;
d720b93d 1048
c2f07f81
PB
1049 addr = cpu_get_phys_page_debug(env, pc);
1050 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1051 if (!p) {
1052 pd = IO_MEM_UNASSIGNED;
1053 } else {
1054 pd = p->phys_offset;
1055 }
1056 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1057 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1058}
c27004ec 1059#endif
d720b93d 1060
6658ffb8
PB
1061/* Add a watchpoint. */
1062int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1063{
1064 int i;
1065
1066 for (i = 0; i < env->nb_watchpoints; i++) {
1067 if (addr == env->watchpoint[i].vaddr)
1068 return 0;
1069 }
1070 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1071 return -1;
1072
1073 i = env->nb_watchpoints++;
1074 env->watchpoint[i].vaddr = addr;
1075 tlb_flush_page(env, addr);
1076 /* FIXME: This flush is needed because of the hack to make memory ops
1077 terminate the TB. It can be removed once the proper IO trap and
1078 re-execute bits are in. */
1079 tb_flush(env);
1080 return i;
1081}
1082
1083/* Remove a watchpoint. */
1084int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1085{
1086 int i;
1087
1088 for (i = 0; i < env->nb_watchpoints; i++) {
1089 if (addr == env->watchpoint[i].vaddr) {
1090 env->nb_watchpoints--;
1091 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1092 tlb_flush_page(env, addr);
1093 return 0;
1094 }
1095 }
1096 return -1;
1097}
1098
c33a346e
FB
1099/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1100 breakpoint is reached */
2e12669a 1101int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1102{
1fddef4b 1103#if defined(TARGET_HAS_ICE)
4c3a88a2 1104 int i;
d720b93d 1105
4c3a88a2
FB
1106 for(i = 0; i < env->nb_breakpoints; i++) {
1107 if (env->breakpoints[i] == pc)
1108 return 0;
1109 }
1110
1111 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1112 return -1;
1113 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1114
1115 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1116 return 0;
1117#else
1118 return -1;
1119#endif
1120}
1121
1122/* remove a breakpoint */
2e12669a 1123int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1124{
1fddef4b 1125#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1126 int i;
1127 for(i = 0; i < env->nb_breakpoints; i++) {
1128 if (env->breakpoints[i] == pc)
1129 goto found;
1130 }
1131 return -1;
1132 found:
4c3a88a2 1133 env->nb_breakpoints--;
1fddef4b
FB
1134 if (i < env->nb_breakpoints)
1135 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1136
1137 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1138 return 0;
1139#else
1140 return -1;
1141#endif
1142}
1143
c33a346e
FB
1144/* enable or disable single step mode. EXCP_DEBUG is returned by the
1145 CPU loop after each instruction */
1146void cpu_single_step(CPUState *env, int enabled)
1147{
1fddef4b 1148#if defined(TARGET_HAS_ICE)
c33a346e
FB
1149 if (env->singlestep_enabled != enabled) {
1150 env->singlestep_enabled = enabled;
1151 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1152 /* XXX: only flush what is necessary */
0124311e 1153 tb_flush(env);
c33a346e
FB
1154 }
1155#endif
1156}
1157
34865134
FB
1158/* enable or disable low levels log */
1159void cpu_set_log(int log_flags)
1160{
1161 loglevel = log_flags;
1162 if (loglevel && !logfile) {
1163 logfile = fopen(logfilename, "w");
1164 if (!logfile) {
1165 perror(logfilename);
1166 _exit(1);
1167 }
9fa3e853
FB
1168#if !defined(CONFIG_SOFTMMU)
1169 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1170 {
1171 static uint8_t logfile_buf[4096];
1172 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1173 }
1174#else
34865134 1175 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1176#endif
34865134
FB
1177 }
1178}
1179
1180void cpu_set_log_filename(const char *filename)
1181{
1182 logfilename = strdup(filename);
1183}
c33a346e 1184
0124311e 1185/* mask must never be zero, except for A20 change call */
68a79315 1186void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1187{
1188 TranslationBlock *tb;
ee8b7021 1189 static int interrupt_lock;
59817ccb 1190
68a79315 1191 env->interrupt_request |= mask;
ea041c0e
FB
1192 /* if the cpu is currently executing code, we must unlink it and
1193 all the potentially executing TB */
1194 tb = env->current_tb;
ee8b7021
FB
1195 if (tb && !testandset(&interrupt_lock)) {
1196 env->current_tb = NULL;
ea041c0e 1197 tb_reset_jump_recursive(tb);
ee8b7021 1198 interrupt_lock = 0;
ea041c0e
FB
1199 }
1200}
1201
b54ad049
FB
1202void cpu_reset_interrupt(CPUState *env, int mask)
1203{
1204 env->interrupt_request &= ~mask;
1205}
1206
f193c797
FB
1207CPULogItem cpu_log_items[] = {
1208 { CPU_LOG_TB_OUT_ASM, "out_asm",
1209 "show generated host assembly code for each compiled TB" },
1210 { CPU_LOG_TB_IN_ASM, "in_asm",
1211 "show target assembly code for each compiled TB" },
1212 { CPU_LOG_TB_OP, "op",
1213 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1214#ifdef TARGET_I386
1215 { CPU_LOG_TB_OP_OPT, "op_opt",
1216 "show micro ops after optimization for each compiled TB" },
1217#endif
1218 { CPU_LOG_INT, "int",
1219 "show interrupts/exceptions in short format" },
1220 { CPU_LOG_EXEC, "exec",
1221 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1222 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1223 "show CPU state before block translation" },
f193c797
FB
1224#ifdef TARGET_I386
1225 { CPU_LOG_PCALL, "pcall",
1226 "show protected mode far calls/returns/exceptions" },
1227#endif
8e3a9fd2 1228#ifdef DEBUG_IOPORT
fd872598
FB
1229 { CPU_LOG_IOPORT, "ioport",
1230 "show all i/o ports accesses" },
8e3a9fd2 1231#endif
f193c797
FB
1232 { 0, NULL, NULL },
1233};
1234
1235static int cmp1(const char *s1, int n, const char *s2)
1236{
1237 if (strlen(s2) != n)
1238 return 0;
1239 return memcmp(s1, s2, n) == 0;
1240}
1241
1242/* takes a comma separated list of log masks. Return 0 if error. */
1243int cpu_str_to_log_mask(const char *str)
1244{
1245 CPULogItem *item;
1246 int mask;
1247 const char *p, *p1;
1248
1249 p = str;
1250 mask = 0;
1251 for(;;) {
1252 p1 = strchr(p, ',');
1253 if (!p1)
1254 p1 = p + strlen(p);
8e3a9fd2
FB
1255 if(cmp1(p,p1-p,"all")) {
1256 for(item = cpu_log_items; item->mask != 0; item++) {
1257 mask |= item->mask;
1258 }
1259 } else {
f193c797
FB
1260 for(item = cpu_log_items; item->mask != 0; item++) {
1261 if (cmp1(p, p1 - p, item->name))
1262 goto found;
1263 }
1264 return 0;
8e3a9fd2 1265 }
f193c797
FB
1266 found:
1267 mask |= item->mask;
1268 if (*p1 != ',')
1269 break;
1270 p = p1 + 1;
1271 }
1272 return mask;
1273}
ea041c0e 1274
7501267e
FB
1275void cpu_abort(CPUState *env, const char *fmt, ...)
1276{
1277 va_list ap;
1278
1279 va_start(ap, fmt);
1280 fprintf(stderr, "qemu: fatal: ");
1281 vfprintf(stderr, fmt, ap);
1282 fprintf(stderr, "\n");
1283#ifdef TARGET_I386
7fe48483
FB
1284 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1285#else
1286 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1287#endif
1288 va_end(ap);
924edcae
AZ
1289 if (logfile) {
1290 fflush(logfile);
1291 fclose(logfile);
1292 }
7501267e
FB
1293 abort();
1294}
1295
c5be9f08
TS
1296CPUState *cpu_copy(CPUState *env)
1297{
1298 CPUState *new_env = cpu_init();
1299 /* preserve chaining and index */
1300 CPUState *next_cpu = new_env->next_cpu;
1301 int cpu_index = new_env->cpu_index;
1302 memcpy(new_env, env, sizeof(CPUState));
1303 new_env->next_cpu = next_cpu;
1304 new_env->cpu_index = cpu_index;
1305 return new_env;
1306}
1307
0124311e
FB
1308#if !defined(CONFIG_USER_ONLY)
1309
ee8b7021
FB
1310/* NOTE: if flush_global is true, also flush global entries (not
1311 implemented yet) */
1312void tlb_flush(CPUState *env, int flush_global)
33417e70 1313{
33417e70 1314 int i;
0124311e 1315
9fa3e853
FB
1316#if defined(DEBUG_TLB)
1317 printf("tlb_flush:\n");
1318#endif
0124311e
FB
1319 /* must reset current TB so that interrupts cannot modify the
1320 links while we are modifying them */
1321 env->current_tb = NULL;
1322
33417e70 1323 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1324 env->tlb_table[0][i].addr_read = -1;
1325 env->tlb_table[0][i].addr_write = -1;
1326 env->tlb_table[0][i].addr_code = -1;
1327 env->tlb_table[1][i].addr_read = -1;
1328 env->tlb_table[1][i].addr_write = -1;
1329 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1330#if (NB_MMU_MODES >= 3)
1331 env->tlb_table[2][i].addr_read = -1;
1332 env->tlb_table[2][i].addr_write = -1;
1333 env->tlb_table[2][i].addr_code = -1;
1334#if (NB_MMU_MODES == 4)
1335 env->tlb_table[3][i].addr_read = -1;
1336 env->tlb_table[3][i].addr_write = -1;
1337 env->tlb_table[3][i].addr_code = -1;
1338#endif
1339#endif
33417e70 1340 }
9fa3e853 1341
8a40a180 1342 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1343
1344#if !defined(CONFIG_SOFTMMU)
1345 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1346#endif
1347#ifdef USE_KQEMU
1348 if (env->kqemu_enabled) {
1349 kqemu_flush(env, flush_global);
1350 }
9fa3e853 1351#endif
e3db7226 1352 tlb_flush_count++;
33417e70
FB
1353}
1354
274da6b2 1355static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1356{
84b7b8e7
FB
1357 if (addr == (tlb_entry->addr_read &
1358 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1359 addr == (tlb_entry->addr_write &
1360 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1361 addr == (tlb_entry->addr_code &
1362 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1363 tlb_entry->addr_read = -1;
1364 tlb_entry->addr_write = -1;
1365 tlb_entry->addr_code = -1;
1366 }
61382a50
FB
1367}
1368
2e12669a 1369void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1370{
8a40a180 1371 int i;
9fa3e853 1372 TranslationBlock *tb;
0124311e 1373
9fa3e853 1374#if defined(DEBUG_TLB)
108c49b8 1375 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1376#endif
0124311e
FB
1377 /* must reset current TB so that interrupts cannot modify the
1378 links while we are modifying them */
1379 env->current_tb = NULL;
61382a50
FB
1380
1381 addr &= TARGET_PAGE_MASK;
1382 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1383 tlb_flush_entry(&env->tlb_table[0][i], addr);
1384 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1385#if (NB_MMU_MODES >= 3)
1386 tlb_flush_entry(&env->tlb_table[2][i], addr);
1387#if (NB_MMU_MODES == 4)
1388 tlb_flush_entry(&env->tlb_table[3][i], addr);
1389#endif
1390#endif
0124311e 1391
b362e5e0
PB
1392 /* Discard jump cache entries for any tb which might potentially
1393 overlap the flushed page. */
1394 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1395 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1396
1397 i = tb_jmp_cache_hash_page(addr);
1398 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1399
0124311e 1400#if !defined(CONFIG_SOFTMMU)
9fa3e853 1401 if (addr < MMAP_AREA_END)
0124311e 1402 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1403#endif
0a962c02
FB
1404#ifdef USE_KQEMU
1405 if (env->kqemu_enabled) {
1406 kqemu_flush_page(env, addr);
1407 }
1408#endif
9fa3e853
FB
1409}
1410
9fa3e853
FB
1411/* update the TLBs so that writes to code in the virtual page 'addr'
1412 can be detected */
6a00d601 1413static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1414{
6a00d601
FB
1415 cpu_physical_memory_reset_dirty(ram_addr,
1416 ram_addr + TARGET_PAGE_SIZE,
1417 CODE_DIRTY_FLAG);
9fa3e853
FB
1418}
1419
9fa3e853 1420/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1421 tested for self modifying code */
1422static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1423 target_ulong vaddr)
9fa3e853 1424{
3a7d929e 1425 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1426}
1427
1428static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1429 unsigned long start, unsigned long length)
1430{
1431 unsigned long addr;
84b7b8e7
FB
1432 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1433 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1434 if ((addr - start) < length) {
84b7b8e7 1435 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1436 }
1437 }
1438}
1439
3a7d929e 1440void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1441 int dirty_flags)
1ccde1cb
FB
1442{
1443 CPUState *env;
4f2ac237 1444 unsigned long length, start1;
0a962c02
FB
1445 int i, mask, len;
1446 uint8_t *p;
1ccde1cb
FB
1447
1448 start &= TARGET_PAGE_MASK;
1449 end = TARGET_PAGE_ALIGN(end);
1450
1451 length = end - start;
1452 if (length == 0)
1453 return;
0a962c02 1454 len = length >> TARGET_PAGE_BITS;
3a7d929e 1455#ifdef USE_KQEMU
6a00d601
FB
1456 /* XXX: should not depend on cpu context */
1457 env = first_cpu;
3a7d929e 1458 if (env->kqemu_enabled) {
f23db169
FB
1459 ram_addr_t addr;
1460 addr = start;
1461 for(i = 0; i < len; i++) {
1462 kqemu_set_notdirty(env, addr);
1463 addr += TARGET_PAGE_SIZE;
1464 }
3a7d929e
FB
1465 }
1466#endif
f23db169
FB
1467 mask = ~dirty_flags;
1468 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1469 for(i = 0; i < len; i++)
1470 p[i] &= mask;
1471
1ccde1cb
FB
1472 /* we modify the TLB cache so that the dirty bit will be set again
1473 when accessing the range */
59817ccb 1474 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1475 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1476 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1477 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1478 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1479 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1480#if (NB_MMU_MODES >= 3)
1481 for(i = 0; i < CPU_TLB_SIZE; i++)
1482 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1483#if (NB_MMU_MODES == 4)
1484 for(i = 0; i < CPU_TLB_SIZE; i++)
1485 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1486#endif
1487#endif
6a00d601 1488 }
59817ccb
FB
1489
1490#if !defined(CONFIG_SOFTMMU)
1491 /* XXX: this is expensive */
1492 {
1493 VirtPageDesc *p;
1494 int j;
1495 target_ulong addr;
1496
1497 for(i = 0; i < L1_SIZE; i++) {
1498 p = l1_virt_map[i];
1499 if (p) {
1500 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1501 for(j = 0; j < L2_SIZE; j++) {
1502 if (p->valid_tag == virt_valid_tag &&
1503 p->phys_addr >= start && p->phys_addr < end &&
1504 (p->prot & PROT_WRITE)) {
1505 if (addr < MMAP_AREA_END) {
1506 mprotect((void *)addr, TARGET_PAGE_SIZE,
1507 p->prot & ~PROT_WRITE);
1508 }
1509 }
1510 addr += TARGET_PAGE_SIZE;
1511 p++;
1512 }
1513 }
1514 }
1515 }
1516#endif
1ccde1cb
FB
1517}
1518
3a7d929e
FB
1519static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1520{
1521 ram_addr_t ram_addr;
1522
84b7b8e7
FB
1523 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1524 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1525 tlb_entry->addend - (unsigned long)phys_ram_base;
1526 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1527 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1528 }
1529 }
1530}
1531
1532/* update the TLB according to the current state of the dirty bits */
1533void cpu_tlb_update_dirty(CPUState *env)
1534{
1535 int i;
1536 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1537 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1538 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1539 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1540#if (NB_MMU_MODES >= 3)
1541 for(i = 0; i < CPU_TLB_SIZE; i++)
1542 tlb_update_dirty(&env->tlb_table[2][i]);
1543#if (NB_MMU_MODES == 4)
1544 for(i = 0; i < CPU_TLB_SIZE; i++)
1545 tlb_update_dirty(&env->tlb_table[3][i]);
1546#endif
1547#endif
3a7d929e
FB
1548}
1549
1ccde1cb 1550static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1551 unsigned long start)
1ccde1cb
FB
1552{
1553 unsigned long addr;
84b7b8e7
FB
1554 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1555 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1556 if (addr == start) {
84b7b8e7 1557 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1558 }
1559 }
1560}
1561
1562/* update the TLB corresponding to virtual page vaddr and phys addr
1563 addr so that it is no longer dirty */
6a00d601
FB
1564static inline void tlb_set_dirty(CPUState *env,
1565 unsigned long addr, target_ulong vaddr)
1ccde1cb 1566{
1ccde1cb
FB
1567 int i;
1568
1ccde1cb
FB
1569 addr &= TARGET_PAGE_MASK;
1570 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1571 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1572 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1573#if (NB_MMU_MODES >= 3)
1574 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1575#if (NB_MMU_MODES == 4)
1576 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1577#endif
1578#endif
9fa3e853
FB
1579}
1580
59817ccb
FB
1581/* add a new TLB entry. At most one entry for a given virtual address
1582 is permitted. Return 0 if OK or 2 if the page could not be mapped
1583 (can only happen in non SOFTMMU mode for I/O pages or pages
1584 conflicting with the host address space). */
84b7b8e7
FB
1585int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1586 target_phys_addr_t paddr, int prot,
1587 int is_user, int is_softmmu)
9fa3e853 1588{
92e873b9 1589 PhysPageDesc *p;
4f2ac237 1590 unsigned long pd;
9fa3e853 1591 unsigned int index;
4f2ac237 1592 target_ulong address;
108c49b8 1593 target_phys_addr_t addend;
9fa3e853 1594 int ret;
84b7b8e7 1595 CPUTLBEntry *te;
6658ffb8 1596 int i;
9fa3e853 1597
92e873b9 1598 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1599 if (!p) {
1600 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1601 } else {
1602 pd = p->phys_offset;
9fa3e853
FB
1603 }
1604#if defined(DEBUG_TLB)
3a7d929e 1605 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
84b7b8e7 1606 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1607#endif
1608
1609 ret = 0;
1610#if !defined(CONFIG_SOFTMMU)
1611 if (is_softmmu)
1612#endif
1613 {
2a4188a3 1614 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1615 /* IO memory case */
1616 address = vaddr | pd;
1617 addend = paddr;
1618 } else {
1619 /* standard memory */
1620 address = vaddr;
1621 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1622 }
6658ffb8
PB
1623
1624 /* Make accesses to pages with watchpoints go via the
1625 watchpoint trap routines. */
1626 for (i = 0; i < env->nb_watchpoints; i++) {
1627 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1628 if (address & ~TARGET_PAGE_MASK) {
1629 env->watchpoint[i].is_ram = 0;
1630 address = vaddr | io_mem_watch;
1631 } else {
1632 env->watchpoint[i].is_ram = 1;
1633 /* TODO: Figure out how to make read watchpoints coexist
1634 with code. */
1635 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1636 }
1637 }
1638 }
9fa3e853 1639
90f18422 1640 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1641 addend -= vaddr;
84b7b8e7
FB
1642 te = &env->tlb_table[is_user][index];
1643 te->addend = addend;
67b915a5 1644 if (prot & PAGE_READ) {
84b7b8e7
FB
1645 te->addr_read = address;
1646 } else {
1647 te->addr_read = -1;
1648 }
1649 if (prot & PAGE_EXEC) {
1650 te->addr_code = address;
9fa3e853 1651 } else {
84b7b8e7 1652 te->addr_code = -1;
9fa3e853 1653 }
67b915a5 1654 if (prot & PAGE_WRITE) {
856074ec
FB
1655 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1656 (pd & IO_MEM_ROMD)) {
1657 /* write access calls the I/O callback */
1658 te->addr_write = vaddr |
1659 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
3a7d929e 1660 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1661 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1662 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1663 } else {
84b7b8e7 1664 te->addr_write = address;
9fa3e853
FB
1665 }
1666 } else {
84b7b8e7 1667 te->addr_write = -1;
9fa3e853
FB
1668 }
1669 }
1670#if !defined(CONFIG_SOFTMMU)
1671 else {
1672 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1673 /* IO access: no mapping is done as it will be handled by the
1674 soft MMU */
1675 if (!(env->hflags & HF_SOFTMMU_MASK))
1676 ret = 2;
1677 } else {
1678 void *map_addr;
59817ccb
FB
1679
1680 if (vaddr >= MMAP_AREA_END) {
1681 ret = 2;
1682 } else {
1683 if (prot & PROT_WRITE) {
1684 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1685#if defined(TARGET_HAS_SMC) || 1
59817ccb 1686 first_tb ||
d720b93d 1687#endif
59817ccb
FB
1688 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1689 !cpu_physical_memory_is_dirty(pd))) {
1690 /* ROM: we do as if code was inside */
1691 /* if code is present, we only map as read only and save the
1692 original mapping */
1693 VirtPageDesc *vp;
1694
90f18422 1695 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1696 vp->phys_addr = pd;
1697 vp->prot = prot;
1698 vp->valid_tag = virt_valid_tag;
1699 prot &= ~PAGE_WRITE;
1700 }
1701 }
1702 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1703 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1704 if (map_addr == MAP_FAILED) {
1705 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1706 paddr, vaddr);
9fa3e853 1707 }
9fa3e853
FB
1708 }
1709 }
1710 }
1711#endif
1712 return ret;
1713}
1714
1715/* called from signal handler: invalidate the code and unprotect the
1716 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1717int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1718{
1719#if !defined(CONFIG_SOFTMMU)
1720 VirtPageDesc *vp;
1721
1722#if defined(DEBUG_TLB)
1723 printf("page_unprotect: addr=0x%08x\n", addr);
1724#endif
1725 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1726
1727 /* if it is not mapped, no need to worry here */
1728 if (addr >= MMAP_AREA_END)
1729 return 0;
9fa3e853
FB
1730 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1731 if (!vp)
1732 return 0;
1733 /* NOTE: in this case, validate_tag is _not_ tested as it
1734 validates only the code TLB */
1735 if (vp->valid_tag != virt_valid_tag)
1736 return 0;
1737 if (!(vp->prot & PAGE_WRITE))
1738 return 0;
1739#if defined(DEBUG_TLB)
1740 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1741 addr, vp->phys_addr, vp->prot);
1742#endif
59817ccb
FB
1743 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1744 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1745 (unsigned long)addr, vp->prot);
d720b93d 1746 /* set the dirty bit */
0a962c02 1747 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1748 /* flush the code inside */
1749 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1750 return 1;
1751#else
1752 return 0;
1753#endif
33417e70
FB
1754}
1755
0124311e
FB
1756#else
1757
ee8b7021 1758void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1759{
1760}
1761
2e12669a 1762void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1763{
1764}
1765
84b7b8e7
FB
1766int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1767 target_phys_addr_t paddr, int prot,
1768 int is_user, int is_softmmu)
9fa3e853
FB
1769{
1770 return 0;
1771}
0124311e 1772
9fa3e853
FB
1773/* dump memory mappings */
1774void page_dump(FILE *f)
33417e70 1775{
9fa3e853
FB
1776 unsigned long start, end;
1777 int i, j, prot, prot1;
1778 PageDesc *p;
33417e70 1779
9fa3e853
FB
1780 fprintf(f, "%-8s %-8s %-8s %s\n",
1781 "start", "end", "size", "prot");
1782 start = -1;
1783 end = -1;
1784 prot = 0;
1785 for(i = 0; i <= L1_SIZE; i++) {
1786 if (i < L1_SIZE)
1787 p = l1_map[i];
1788 else
1789 p = NULL;
1790 for(j = 0;j < L2_SIZE; j++) {
1791 if (!p)
1792 prot1 = 0;
1793 else
1794 prot1 = p[j].flags;
1795 if (prot1 != prot) {
1796 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1797 if (start != -1) {
1798 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1799 start, end, end - start,
1800 prot & PAGE_READ ? 'r' : '-',
1801 prot & PAGE_WRITE ? 'w' : '-',
1802 prot & PAGE_EXEC ? 'x' : '-');
1803 }
1804 if (prot1 != 0)
1805 start = end;
1806 else
1807 start = -1;
1808 prot = prot1;
1809 }
1810 if (!p)
1811 break;
1812 }
33417e70 1813 }
33417e70
FB
1814}
1815
53a5960a 1816int page_get_flags(target_ulong address)
33417e70 1817{
9fa3e853
FB
1818 PageDesc *p;
1819
1820 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1821 if (!p)
9fa3e853
FB
1822 return 0;
1823 return p->flags;
1824}
1825
1826/* modify the flags of a page and invalidate the code if
1827 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1828 depending on PAGE_WRITE */
53a5960a 1829void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1830{
1831 PageDesc *p;
53a5960a 1832 target_ulong addr;
9fa3e853
FB
1833
1834 start = start & TARGET_PAGE_MASK;
1835 end = TARGET_PAGE_ALIGN(end);
1836 if (flags & PAGE_WRITE)
1837 flags |= PAGE_WRITE_ORG;
1838 spin_lock(&tb_lock);
1839 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1840 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1841 /* if the write protection is set, then we invalidate the code
1842 inside */
1843 if (!(p->flags & PAGE_WRITE) &&
1844 (flags & PAGE_WRITE) &&
1845 p->first_tb) {
d720b93d 1846 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1847 }
1848 p->flags = flags;
1849 }
1850 spin_unlock(&tb_lock);
33417e70
FB
1851}
1852
9fa3e853
FB
1853/* called from signal handler: invalidate the code and unprotect the
1854 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1855int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1856{
1857 unsigned int page_index, prot, pindex;
1858 PageDesc *p, *p1;
53a5960a 1859 target_ulong host_start, host_end, addr;
9fa3e853 1860
83fb7adf 1861 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1862 page_index = host_start >> TARGET_PAGE_BITS;
1863 p1 = page_find(page_index);
1864 if (!p1)
1865 return 0;
83fb7adf 1866 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1867 p = p1;
1868 prot = 0;
1869 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1870 prot |= p->flags;
1871 p++;
1872 }
1873 /* if the page was really writable, then we change its
1874 protection back to writable */
1875 if (prot & PAGE_WRITE_ORG) {
1876 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1877 if (!(p1[pindex].flags & PAGE_WRITE)) {
53a5960a 1878 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1879 (prot & PAGE_BITS) | PAGE_WRITE);
1880 p1[pindex].flags |= PAGE_WRITE;
1881 /* and since the content will be modified, we must invalidate
1882 the corresponding translated code. */
d720b93d 1883 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1884#ifdef DEBUG_TB_CHECK
1885 tb_invalidate_check(address);
1886#endif
1887 return 1;
1888 }
1889 }
1890 return 0;
1891}
1892
1893/* call this function when system calls directly modify a memory area */
53a5960a
PB
1894/* ??? This should be redundant now we have lock_user. */
1895void page_unprotect_range(target_ulong data, target_ulong data_size)
9fa3e853 1896{
53a5960a 1897 target_ulong start, end, addr;
9fa3e853 1898
53a5960a 1899 start = data;
9fa3e853
FB
1900 end = start + data_size;
1901 start &= TARGET_PAGE_MASK;
1902 end = TARGET_PAGE_ALIGN(end);
1903 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1904 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1905 }
1906}
1907
6a00d601
FB
1908static inline void tlb_set_dirty(CPUState *env,
1909 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1910{
1911}
9fa3e853
FB
1912#endif /* defined(CONFIG_USER_ONLY) */
1913
db7b5426
BS
1914static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1915 int memory);
1916static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1917 int orig_memory);
1918#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1919 need_subpage) \
1920 do { \
1921 if (addr > start_addr) \
1922 start_addr2 = 0; \
1923 else { \
1924 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1925 if (start_addr2 > 0) \
1926 need_subpage = 1; \
1927 } \
1928 \
49e9fba2 1929 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
1930 end_addr2 = TARGET_PAGE_SIZE - 1; \
1931 else { \
1932 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1933 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
1934 need_subpage = 1; \
1935 } \
1936 } while (0)
1937
33417e70
FB
1938/* register physical memory. 'size' must be a multiple of the target
1939 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1940 io memory page */
2e12669a
FB
1941void cpu_register_physical_memory(target_phys_addr_t start_addr,
1942 unsigned long size,
1943 unsigned long phys_offset)
33417e70 1944{
108c49b8 1945 target_phys_addr_t addr, end_addr;
92e873b9 1946 PhysPageDesc *p;
9d42037b 1947 CPUState *env;
db7b5426
BS
1948 unsigned long orig_size = size;
1949 void *subpage;
33417e70 1950
5fd386f6 1951 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
1952 end_addr = start_addr + (target_phys_addr_t)size;
1953 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
1954 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1955 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
1956 unsigned long orig_memory = p->phys_offset;
1957 target_phys_addr_t start_addr2, end_addr2;
1958 int need_subpage = 0;
1959
1960 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
1961 need_subpage);
1962 if (need_subpage) {
1963 if (!(orig_memory & IO_MEM_SUBPAGE)) {
1964 subpage = subpage_init((addr & TARGET_PAGE_MASK),
1965 &p->phys_offset, orig_memory);
1966 } else {
1967 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
1968 >> IO_MEM_SHIFT];
1969 }
1970 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
1971 } else {
1972 p->phys_offset = phys_offset;
1973 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1974 (phys_offset & IO_MEM_ROMD))
1975 phys_offset += TARGET_PAGE_SIZE;
1976 }
1977 } else {
1978 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1979 p->phys_offset = phys_offset;
1980 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1981 (phys_offset & IO_MEM_ROMD))
1982 phys_offset += TARGET_PAGE_SIZE;
1983 else {
1984 target_phys_addr_t start_addr2, end_addr2;
1985 int need_subpage = 0;
1986
1987 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
1988 end_addr2, need_subpage);
1989
1990 if (need_subpage) {
1991 subpage = subpage_init((addr & TARGET_PAGE_MASK),
1992 &p->phys_offset, IO_MEM_UNASSIGNED);
1993 subpage_register(subpage, start_addr2, end_addr2,
1994 phys_offset);
1995 }
1996 }
1997 }
33417e70 1998 }
9d42037b
FB
1999
2000 /* since each CPU stores ram addresses in its TLB cache, we must
2001 reset the modified entries */
2002 /* XXX: slow ! */
2003 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2004 tlb_flush(env, 1);
2005 }
33417e70
FB
2006}
2007
ba863458
FB
2008/* XXX: temporary until new memory mapping API */
2009uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2010{
2011 PhysPageDesc *p;
2012
2013 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2014 if (!p)
2015 return IO_MEM_UNASSIGNED;
2016 return p->phys_offset;
2017}
2018
e9a1ab19
FB
2019/* XXX: better than nothing */
2020ram_addr_t qemu_ram_alloc(unsigned int size)
2021{
2022 ram_addr_t addr;
2023 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2024 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2025 size, phys_ram_size);
2026 abort();
2027 }
2028 addr = phys_ram_alloc_offset;
2029 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2030 return addr;
2031}
2032
2033void qemu_ram_free(ram_addr_t addr)
2034{
2035}
2036
a4193c8a 2037static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2038{
67d3b957 2039#ifdef DEBUG_UNASSIGNED
6c36d3fa 2040 printf("Unassigned mem read " TARGET_FMT_lx "\n", addr);
b4f0a316
BS
2041#endif
2042#ifdef TARGET_SPARC
6c36d3fa 2043 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2044#endif
33417e70
FB
2045 return 0;
2046}
2047
a4193c8a 2048static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2049{
67d3b957 2050#ifdef DEBUG_UNASSIGNED
6c36d3fa 2051 printf("Unassigned mem write " TARGET_FMT_lx " = 0x%x\n", addr, val);
67d3b957 2052#endif
b4f0a316 2053#ifdef TARGET_SPARC
6c36d3fa 2054 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2055#endif
33417e70
FB
2056}
2057
2058static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2059 unassigned_mem_readb,
2060 unassigned_mem_readb,
2061 unassigned_mem_readb,
2062};
2063
2064static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2065 unassigned_mem_writeb,
2066 unassigned_mem_writeb,
2067 unassigned_mem_writeb,
2068};
2069
3a7d929e 2070static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2071{
3a7d929e
FB
2072 unsigned long ram_addr;
2073 int dirty_flags;
2074 ram_addr = addr - (unsigned long)phys_ram_base;
2075 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2076 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2077#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2078 tb_invalidate_phys_page_fast(ram_addr, 1);
2079 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2080#endif
3a7d929e 2081 }
c27004ec 2082 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2083#ifdef USE_KQEMU
2084 if (cpu_single_env->kqemu_enabled &&
2085 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2086 kqemu_modify_page(cpu_single_env, ram_addr);
2087#endif
f23db169
FB
2088 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2089 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2090 /* we remove the notdirty callback only if the code has been
2091 flushed */
2092 if (dirty_flags == 0xff)
6a00d601 2093 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2094}
2095
3a7d929e 2096static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2097{
3a7d929e
FB
2098 unsigned long ram_addr;
2099 int dirty_flags;
2100 ram_addr = addr - (unsigned long)phys_ram_base;
2101 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2102 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2103#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2104 tb_invalidate_phys_page_fast(ram_addr, 2);
2105 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2106#endif
3a7d929e 2107 }
c27004ec 2108 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2109#ifdef USE_KQEMU
2110 if (cpu_single_env->kqemu_enabled &&
2111 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2112 kqemu_modify_page(cpu_single_env, ram_addr);
2113#endif
f23db169
FB
2114 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2115 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2116 /* we remove the notdirty callback only if the code has been
2117 flushed */
2118 if (dirty_flags == 0xff)
6a00d601 2119 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2120}
2121
3a7d929e 2122static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2123{
3a7d929e
FB
2124 unsigned long ram_addr;
2125 int dirty_flags;
2126 ram_addr = addr - (unsigned long)phys_ram_base;
2127 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2128 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2129#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2130 tb_invalidate_phys_page_fast(ram_addr, 4);
2131 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2132#endif
3a7d929e 2133 }
c27004ec 2134 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2135#ifdef USE_KQEMU
2136 if (cpu_single_env->kqemu_enabled &&
2137 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2138 kqemu_modify_page(cpu_single_env, ram_addr);
2139#endif
f23db169
FB
2140 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2141 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2142 /* we remove the notdirty callback only if the code has been
2143 flushed */
2144 if (dirty_flags == 0xff)
6a00d601 2145 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2146}
2147
3a7d929e 2148static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2149 NULL, /* never used */
2150 NULL, /* never used */
2151 NULL, /* never used */
2152};
2153
1ccde1cb
FB
2154static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2155 notdirty_mem_writeb,
2156 notdirty_mem_writew,
2157 notdirty_mem_writel,
2158};
2159
6658ffb8
PB
2160#if defined(CONFIG_SOFTMMU)
2161/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2162 so these check for a hit then pass through to the normal out-of-line
2163 phys routines. */
2164static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2165{
2166 return ldub_phys(addr);
2167}
2168
2169static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2170{
2171 return lduw_phys(addr);
2172}
2173
2174static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2175{
2176 return ldl_phys(addr);
2177}
2178
2179/* Generate a debug exception if a watchpoint has been hit.
2180 Returns the real physical address of the access. addr will be a host
2181 address in the is_ram case. */
2182static target_ulong check_watchpoint(target_phys_addr_t addr)
2183{
2184 CPUState *env = cpu_single_env;
2185 target_ulong watch;
2186 target_ulong retaddr;
2187 int i;
2188
2189 retaddr = addr;
2190 for (i = 0; i < env->nb_watchpoints; i++) {
2191 watch = env->watchpoint[i].vaddr;
2192 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2193 if (env->watchpoint[i].is_ram)
2194 retaddr = addr - (unsigned long)phys_ram_base;
2195 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2196 cpu_single_env->watchpoint_hit = i + 1;
2197 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2198 break;
2199 }
2200 }
2201 }
2202 return retaddr;
2203}
2204
2205static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2206 uint32_t val)
2207{
2208 addr = check_watchpoint(addr);
2209 stb_phys(addr, val);
2210}
2211
2212static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2213 uint32_t val)
2214{
2215 addr = check_watchpoint(addr);
2216 stw_phys(addr, val);
2217}
2218
2219static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2220 uint32_t val)
2221{
2222 addr = check_watchpoint(addr);
2223 stl_phys(addr, val);
2224}
2225
2226static CPUReadMemoryFunc *watch_mem_read[3] = {
2227 watch_mem_readb,
2228 watch_mem_readw,
2229 watch_mem_readl,
2230};
2231
2232static CPUWriteMemoryFunc *watch_mem_write[3] = {
2233 watch_mem_writeb,
2234 watch_mem_writew,
2235 watch_mem_writel,
2236};
2237#endif
2238
db7b5426
BS
2239static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2240 unsigned int len)
2241{
2242 CPUReadMemoryFunc **mem_read;
2243 uint32_t ret;
2244 unsigned int idx;
2245
2246 idx = SUBPAGE_IDX(addr - mmio->base);
2247#if defined(DEBUG_SUBPAGE)
2248 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2249 mmio, len, addr, idx);
2250#endif
2251 mem_read = mmio->mem_read[idx];
2252 ret = (*mem_read[len])(mmio->opaque[idx], addr);
2253
2254 return ret;
2255}
2256
2257static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2258 uint32_t value, unsigned int len)
2259{
2260 CPUWriteMemoryFunc **mem_write;
2261 unsigned int idx;
2262
2263 idx = SUBPAGE_IDX(addr - mmio->base);
2264#if defined(DEBUG_SUBPAGE)
2265 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2266 mmio, len, addr, idx, value);
2267#endif
2268 mem_write = mmio->mem_write[idx];
2269 (*mem_write[len])(mmio->opaque[idx], addr, value);
2270}
2271
2272static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2273{
2274#if defined(DEBUG_SUBPAGE)
2275 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2276#endif
2277
2278 return subpage_readlen(opaque, addr, 0);
2279}
2280
2281static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2282 uint32_t value)
2283{
2284#if defined(DEBUG_SUBPAGE)
2285 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2286#endif
2287 subpage_writelen(opaque, addr, value, 0);
2288}
2289
2290static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2291{
2292#if defined(DEBUG_SUBPAGE)
2293 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2294#endif
2295
2296 return subpage_readlen(opaque, addr, 1);
2297}
2298
2299static void subpage_writew (void *opaque, target_phys_addr_t addr,
2300 uint32_t value)
2301{
2302#if defined(DEBUG_SUBPAGE)
2303 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2304#endif
2305 subpage_writelen(opaque, addr, value, 1);
2306}
2307
2308static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2309{
2310#if defined(DEBUG_SUBPAGE)
2311 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2312#endif
2313
2314 return subpage_readlen(opaque, addr, 2);
2315}
2316
2317static void subpage_writel (void *opaque,
2318 target_phys_addr_t addr, uint32_t value)
2319{
2320#if defined(DEBUG_SUBPAGE)
2321 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2322#endif
2323 subpage_writelen(opaque, addr, value, 2);
2324}
2325
2326static CPUReadMemoryFunc *subpage_read[] = {
2327 &subpage_readb,
2328 &subpage_readw,
2329 &subpage_readl,
2330};
2331
2332static CPUWriteMemoryFunc *subpage_write[] = {
2333 &subpage_writeb,
2334 &subpage_writew,
2335 &subpage_writel,
2336};
2337
2338static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2339 int memory)
2340{
2341 int idx, eidx;
2342
2343 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2344 return -1;
2345 idx = SUBPAGE_IDX(start);
2346 eidx = SUBPAGE_IDX(end);
2347#if defined(DEBUG_SUBPAGE)
2348 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2349 mmio, start, end, idx, eidx, memory);
2350#endif
2351 memory >>= IO_MEM_SHIFT;
2352 for (; idx <= eidx; idx++) {
2353 mmio->mem_read[idx] = io_mem_read[memory];
2354 mmio->mem_write[idx] = io_mem_write[memory];
2355 mmio->opaque[idx] = io_mem_opaque[memory];
2356 }
2357
2358 return 0;
2359}
2360
2361static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2362 int orig_memory)
2363{
2364 subpage_t *mmio;
2365 int subpage_memory;
2366
2367 mmio = qemu_mallocz(sizeof(subpage_t));
2368 if (mmio != NULL) {
2369 mmio->base = base;
2370 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2371#if defined(DEBUG_SUBPAGE)
2372 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2373 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2374#endif
2375 *phys = subpage_memory | IO_MEM_SUBPAGE;
2376 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2377 }
2378
2379 return mmio;
2380}
2381
33417e70
FB
2382static void io_mem_init(void)
2383{
3a7d929e 2384 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2385 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2386 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2387 io_mem_nb = 5;
2388
6658ffb8
PB
2389#if defined(CONFIG_SOFTMMU)
2390 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2391 watch_mem_write, NULL);
2392#endif
1ccde1cb 2393 /* alloc dirty bits array */
0a962c02 2394 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2395 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2396}
2397
2398/* mem_read and mem_write are arrays of functions containing the
2399 function to access byte (index 0), word (index 1) and dword (index
2400 2). All functions must be supplied. If io_index is non zero, the
2401 corresponding io zone is modified. If it is zero, a new io zone is
2402 allocated. The return value can be used with
2403 cpu_register_physical_memory(). (-1) is returned if error. */
2404int cpu_register_io_memory(int io_index,
2405 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2406 CPUWriteMemoryFunc **mem_write,
2407 void *opaque)
33417e70
FB
2408{
2409 int i;
2410
2411 if (io_index <= 0) {
b5ff1b31 2412 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2413 return -1;
2414 io_index = io_mem_nb++;
2415 } else {
2416 if (io_index >= IO_MEM_NB_ENTRIES)
2417 return -1;
2418 }
b5ff1b31 2419
33417e70
FB
2420 for(i = 0;i < 3; i++) {
2421 io_mem_read[io_index][i] = mem_read[i];
2422 io_mem_write[io_index][i] = mem_write[i];
2423 }
a4193c8a 2424 io_mem_opaque[io_index] = opaque;
33417e70
FB
2425 return io_index << IO_MEM_SHIFT;
2426}
61382a50 2427
8926b517
FB
2428CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2429{
2430 return io_mem_write[io_index >> IO_MEM_SHIFT];
2431}
2432
2433CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2434{
2435 return io_mem_read[io_index >> IO_MEM_SHIFT];
2436}
2437
13eb76e0
FB
2438/* physical memory access (slow version, mainly for debug) */
2439#if defined(CONFIG_USER_ONLY)
2e12669a 2440void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2441 int len, int is_write)
2442{
2443 int l, flags;
2444 target_ulong page;
53a5960a 2445 void * p;
13eb76e0
FB
2446
2447 while (len > 0) {
2448 page = addr & TARGET_PAGE_MASK;
2449 l = (page + TARGET_PAGE_SIZE) - addr;
2450 if (l > len)
2451 l = len;
2452 flags = page_get_flags(page);
2453 if (!(flags & PAGE_VALID))
2454 return;
2455 if (is_write) {
2456 if (!(flags & PAGE_WRITE))
2457 return;
53a5960a
PB
2458 p = lock_user(addr, len, 0);
2459 memcpy(p, buf, len);
2460 unlock_user(p, addr, len);
13eb76e0
FB
2461 } else {
2462 if (!(flags & PAGE_READ))
2463 return;
53a5960a
PB
2464 p = lock_user(addr, len, 1);
2465 memcpy(buf, p, len);
2466 unlock_user(p, addr, 0);
13eb76e0
FB
2467 }
2468 len -= l;
2469 buf += l;
2470 addr += l;
2471 }
2472}
8df1cd07 2473
13eb76e0 2474#else
2e12669a 2475void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2476 int len, int is_write)
2477{
2478 int l, io_index;
2479 uint8_t *ptr;
2480 uint32_t val;
2e12669a
FB
2481 target_phys_addr_t page;
2482 unsigned long pd;
92e873b9 2483 PhysPageDesc *p;
13eb76e0
FB
2484
2485 while (len > 0) {
2486 page = addr & TARGET_PAGE_MASK;
2487 l = (page + TARGET_PAGE_SIZE) - addr;
2488 if (l > len)
2489 l = len;
92e873b9 2490 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2491 if (!p) {
2492 pd = IO_MEM_UNASSIGNED;
2493 } else {
2494 pd = p->phys_offset;
2495 }
2496
2497 if (is_write) {
3a7d929e 2498 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2499 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2500 /* XXX: could force cpu_single_env to NULL to avoid
2501 potential bugs */
13eb76e0 2502 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2503 /* 32 bit write access */
c27004ec 2504 val = ldl_p(buf);
a4193c8a 2505 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2506 l = 4;
2507 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2508 /* 16 bit write access */
c27004ec 2509 val = lduw_p(buf);
a4193c8a 2510 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2511 l = 2;
2512 } else {
1c213d19 2513 /* 8 bit write access */
c27004ec 2514 val = ldub_p(buf);
a4193c8a 2515 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2516 l = 1;
2517 }
2518 } else {
b448f2f3
FB
2519 unsigned long addr1;
2520 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2521 /* RAM case */
b448f2f3 2522 ptr = phys_ram_base + addr1;
13eb76e0 2523 memcpy(ptr, buf, l);
3a7d929e
FB
2524 if (!cpu_physical_memory_is_dirty(addr1)) {
2525 /* invalidate code */
2526 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2527 /* set dirty bit */
f23db169
FB
2528 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2529 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2530 }
13eb76e0
FB
2531 }
2532 } else {
2a4188a3
FB
2533 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2534 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2535 /* I/O case */
2536 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2537 if (l >= 4 && ((addr & 3) == 0)) {
2538 /* 32 bit read access */
a4193c8a 2539 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2540 stl_p(buf, val);
13eb76e0
FB
2541 l = 4;
2542 } else if (l >= 2 && ((addr & 1) == 0)) {
2543 /* 16 bit read access */
a4193c8a 2544 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2545 stw_p(buf, val);
13eb76e0
FB
2546 l = 2;
2547 } else {
1c213d19 2548 /* 8 bit read access */
a4193c8a 2549 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2550 stb_p(buf, val);
13eb76e0
FB
2551 l = 1;
2552 }
2553 } else {
2554 /* RAM case */
2555 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2556 (addr & ~TARGET_PAGE_MASK);
2557 memcpy(buf, ptr, l);
2558 }
2559 }
2560 len -= l;
2561 buf += l;
2562 addr += l;
2563 }
2564}
8df1cd07 2565
d0ecd2aa
FB
2566/* used for ROM loading : can write in RAM and ROM */
2567void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2568 const uint8_t *buf, int len)
2569{
2570 int l;
2571 uint8_t *ptr;
2572 target_phys_addr_t page;
2573 unsigned long pd;
2574 PhysPageDesc *p;
2575
2576 while (len > 0) {
2577 page = addr & TARGET_PAGE_MASK;
2578 l = (page + TARGET_PAGE_SIZE) - addr;
2579 if (l > len)
2580 l = len;
2581 p = phys_page_find(page >> TARGET_PAGE_BITS);
2582 if (!p) {
2583 pd = IO_MEM_UNASSIGNED;
2584 } else {
2585 pd = p->phys_offset;
2586 }
2587
2588 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2589 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2590 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2591 /* do nothing */
2592 } else {
2593 unsigned long addr1;
2594 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2595 /* ROM/RAM case */
2596 ptr = phys_ram_base + addr1;
2597 memcpy(ptr, buf, l);
2598 }
2599 len -= l;
2600 buf += l;
2601 addr += l;
2602 }
2603}
2604
2605
8df1cd07
FB
2606/* warning: addr must be aligned */
2607uint32_t ldl_phys(target_phys_addr_t addr)
2608{
2609 int io_index;
2610 uint8_t *ptr;
2611 uint32_t val;
2612 unsigned long pd;
2613 PhysPageDesc *p;
2614
2615 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2616 if (!p) {
2617 pd = IO_MEM_UNASSIGNED;
2618 } else {
2619 pd = p->phys_offset;
2620 }
2621
2a4188a3
FB
2622 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2623 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2624 /* I/O case */
2625 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2626 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2627 } else {
2628 /* RAM case */
2629 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2630 (addr & ~TARGET_PAGE_MASK);
2631 val = ldl_p(ptr);
2632 }
2633 return val;
2634}
2635
84b7b8e7
FB
2636/* warning: addr must be aligned */
2637uint64_t ldq_phys(target_phys_addr_t addr)
2638{
2639 int io_index;
2640 uint8_t *ptr;
2641 uint64_t val;
2642 unsigned long pd;
2643 PhysPageDesc *p;
2644
2645 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2646 if (!p) {
2647 pd = IO_MEM_UNASSIGNED;
2648 } else {
2649 pd = p->phys_offset;
2650 }
2651
2a4188a3
FB
2652 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2653 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2654 /* I/O case */
2655 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2656#ifdef TARGET_WORDS_BIGENDIAN
2657 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2658 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2659#else
2660 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2661 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2662#endif
2663 } else {
2664 /* RAM case */
2665 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2666 (addr & ~TARGET_PAGE_MASK);
2667 val = ldq_p(ptr);
2668 }
2669 return val;
2670}
2671
aab33094
FB
2672/* XXX: optimize */
2673uint32_t ldub_phys(target_phys_addr_t addr)
2674{
2675 uint8_t val;
2676 cpu_physical_memory_read(addr, &val, 1);
2677 return val;
2678}
2679
2680/* XXX: optimize */
2681uint32_t lduw_phys(target_phys_addr_t addr)
2682{
2683 uint16_t val;
2684 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2685 return tswap16(val);
2686}
2687
8df1cd07
FB
2688/* warning: addr must be aligned. The ram page is not masked as dirty
2689 and the code inside is not invalidated. It is useful if the dirty
2690 bits are used to track modified PTEs */
2691void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2692{
2693 int io_index;
2694 uint8_t *ptr;
2695 unsigned long pd;
2696 PhysPageDesc *p;
2697
2698 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2699 if (!p) {
2700 pd = IO_MEM_UNASSIGNED;
2701 } else {
2702 pd = p->phys_offset;
2703 }
2704
3a7d929e 2705 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2706 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2707 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2708 } else {
2709 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2710 (addr & ~TARGET_PAGE_MASK);
2711 stl_p(ptr, val);
2712 }
2713}
2714
bc98a7ef
JM
2715void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2716{
2717 int io_index;
2718 uint8_t *ptr;
2719 unsigned long pd;
2720 PhysPageDesc *p;
2721
2722 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2723 if (!p) {
2724 pd = IO_MEM_UNASSIGNED;
2725 } else {
2726 pd = p->phys_offset;
2727 }
2728
2729 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2730 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2731#ifdef TARGET_WORDS_BIGENDIAN
2732 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2733 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2734#else
2735 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2736 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2737#endif
2738 } else {
2739 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2740 (addr & ~TARGET_PAGE_MASK);
2741 stq_p(ptr, val);
2742 }
2743}
2744
8df1cd07 2745/* warning: addr must be aligned */
8df1cd07
FB
2746void stl_phys(target_phys_addr_t addr, uint32_t val)
2747{
2748 int io_index;
2749 uint8_t *ptr;
2750 unsigned long pd;
2751 PhysPageDesc *p;
2752
2753 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2754 if (!p) {
2755 pd = IO_MEM_UNASSIGNED;
2756 } else {
2757 pd = p->phys_offset;
2758 }
2759
3a7d929e 2760 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2761 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2762 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2763 } else {
2764 unsigned long addr1;
2765 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2766 /* RAM case */
2767 ptr = phys_ram_base + addr1;
2768 stl_p(ptr, val);
3a7d929e
FB
2769 if (!cpu_physical_memory_is_dirty(addr1)) {
2770 /* invalidate code */
2771 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2772 /* set dirty bit */
f23db169
FB
2773 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2774 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2775 }
8df1cd07
FB
2776 }
2777}
2778
aab33094
FB
2779/* XXX: optimize */
2780void stb_phys(target_phys_addr_t addr, uint32_t val)
2781{
2782 uint8_t v = val;
2783 cpu_physical_memory_write(addr, &v, 1);
2784}
2785
2786/* XXX: optimize */
2787void stw_phys(target_phys_addr_t addr, uint32_t val)
2788{
2789 uint16_t v = tswap16(val);
2790 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2791}
2792
2793/* XXX: optimize */
2794void stq_phys(target_phys_addr_t addr, uint64_t val)
2795{
2796 val = tswap64(val);
2797 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2798}
2799
13eb76e0
FB
2800#endif
2801
2802/* virtual memory access for debug */
b448f2f3
FB
2803int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2804 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2805{
2806 int l;
9b3c35e0
JM
2807 target_phys_addr_t phys_addr;
2808 target_ulong page;
13eb76e0
FB
2809
2810 while (len > 0) {
2811 page = addr & TARGET_PAGE_MASK;
2812 phys_addr = cpu_get_phys_page_debug(env, page);
2813 /* if no physical page mapped, return an error */
2814 if (phys_addr == -1)
2815 return -1;
2816 l = (page + TARGET_PAGE_SIZE) - addr;
2817 if (l > len)
2818 l = len;
b448f2f3
FB
2819 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2820 buf, l, is_write);
13eb76e0
FB
2821 len -= l;
2822 buf += l;
2823 addr += l;
2824 }
2825 return 0;
2826}
2827
e3db7226
FB
2828void dump_exec_info(FILE *f,
2829 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2830{
2831 int i, target_code_size, max_target_code_size;
2832 int direct_jmp_count, direct_jmp2_count, cross_page;
2833 TranslationBlock *tb;
2834
2835 target_code_size = 0;
2836 max_target_code_size = 0;
2837 cross_page = 0;
2838 direct_jmp_count = 0;
2839 direct_jmp2_count = 0;
2840 for(i = 0; i < nb_tbs; i++) {
2841 tb = &tbs[i];
2842 target_code_size += tb->size;
2843 if (tb->size > max_target_code_size)
2844 max_target_code_size = tb->size;
2845 if (tb->page_addr[1] != -1)
2846 cross_page++;
2847 if (tb->tb_next_offset[0] != 0xffff) {
2848 direct_jmp_count++;
2849 if (tb->tb_next_offset[1] != 0xffff) {
2850 direct_jmp2_count++;
2851 }
2852 }
2853 }
2854 /* XXX: avoid using doubles ? */
2855 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2856 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2857 nb_tbs ? target_code_size / nb_tbs : 0,
2858 max_target_code_size);
2859 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2860 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2861 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2862 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2863 cross_page,
2864 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2865 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2866 direct_jmp_count,
2867 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2868 direct_jmp2_count,
2869 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2870 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2871 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2872 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2873}
2874
61382a50
FB
2875#if !defined(CONFIG_USER_ONLY)
2876
2877#define MMUSUFFIX _cmmu
2878#define GETPC() NULL
2879#define env cpu_single_env
b769d8fe 2880#define SOFTMMU_CODE_ACCESS
61382a50
FB
2881
2882#define SHIFT 0
2883#include "softmmu_template.h"
2884
2885#define SHIFT 1
2886#include "softmmu_template.h"
2887
2888#define SHIFT 2
2889#include "softmmu_template.h"
2890
2891#define SHIFT 3
2892#include "softmmu_template.h"
2893
2894#undef env
2895
2896#endif