]> git.proxmox.com Git - qemu.git/blame - exec.c
Remove broken ds1225y init, it is useless on this machine anyway.
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
53a5960a
PB
37#if defined(CONFIG_USER_ONLY)
38#include <qemu.h>
39#endif
54936004 40
fd6ce8f6 41//#define DEBUG_TB_INVALIDATE
66e85a21 42//#define DEBUG_FLUSH
9fa3e853 43//#define DEBUG_TLB
67d3b957 44//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
45
46/* make various TB consistency checks */
5fafdf24
TS
47//#define DEBUG_TB_CHECK
48//#define DEBUG_TLB_CHECK
fd6ce8f6 49
1196be37 50//#define DEBUG_IOPORT
db7b5426 51//#define DEBUG_SUBPAGE
1196be37 52
99773bd4
PB
53#if !defined(CONFIG_USER_ONLY)
54/* TB consistency checks only implemented for usermode emulation. */
55#undef DEBUG_TB_CHECK
56#endif
57
fd6ce8f6
FB
58/* threshold to flush the translated code buffer */
59#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
60
9fa3e853
FB
61#define SMC_BITMAP_USE_THRESHOLD 10
62
63#define MMAP_AREA_START 0x00000000
64#define MMAP_AREA_END 0xa8000000
fd6ce8f6 65
108c49b8
FB
66#if defined(TARGET_SPARC64)
67#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
68#elif defined(TARGET_SPARC)
69#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
70#elif defined(TARGET_ALPHA)
71#define TARGET_PHYS_ADDR_SPACE_BITS 42
72#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
73#elif defined(TARGET_PPC64)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#else
76/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77#define TARGET_PHYS_ADDR_SPACE_BITS 32
78#endif
79
fd6ce8f6 80TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 81TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 82int nb_tbs;
eb51d102
FB
83/* any access to the tbs or the page table must use this lock */
84spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 85
b8076a74 86uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
87uint8_t *code_gen_ptr;
88
9fa3e853
FB
89int phys_ram_size;
90int phys_ram_fd;
91uint8_t *phys_ram_base;
1ccde1cb 92uint8_t *phys_ram_dirty;
e9a1ab19 93static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 94
6a00d601
FB
95CPUState *first_cpu;
96/* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
5fafdf24 98CPUState *cpu_single_env;
6a00d601 99
54936004 100typedef struct PageDesc {
92e873b9 101 /* list of TBs intersecting this ram page */
fd6ce8f6 102 TranslationBlock *first_tb;
9fa3e853
FB
103 /* in order to optimize self modifying code, we count the number
104 of lookups we do to a given page to use a bitmap */
105 unsigned int code_write_count;
106 uint8_t *code_bitmap;
107#if defined(CONFIG_USER_ONLY)
108 unsigned long flags;
109#endif
54936004
FB
110} PageDesc;
111
92e873b9
FB
112typedef struct PhysPageDesc {
113 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 114 uint32_t phys_offset;
92e873b9
FB
115} PhysPageDesc;
116
54936004 117#define L2_BITS 10
bedb69ea
JM
118#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119/* XXX: this is a temporary hack for alpha target.
120 * In the future, this is to be replaced by a multi-level table
121 * to actually be able to handle the complete 64 bits address space.
122 */
123#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
124#else
54936004 125#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 126#endif
54936004
FB
127
128#define L1_SIZE (1 << L1_BITS)
129#define L2_SIZE (1 << L2_BITS)
130
33417e70 131static void io_mem_init(void);
fd6ce8f6 132
83fb7adf
FB
133unsigned long qemu_real_host_page_size;
134unsigned long qemu_host_page_bits;
135unsigned long qemu_host_page_size;
136unsigned long qemu_host_page_mask;
54936004 137
92e873b9 138/* XXX: for system emulation, it could just be an array */
54936004 139static PageDesc *l1_map[L1_SIZE];
0a962c02 140PhysPageDesc **l1_phys_map;
54936004 141
33417e70 142/* io memory support */
33417e70
FB
143CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
144CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 145void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 146static int io_mem_nb;
6658ffb8
PB
147#if defined(CONFIG_SOFTMMU)
148static int io_mem_watch;
149#endif
33417e70 150
34865134
FB
151/* log support */
152char *logfilename = "/tmp/qemu.log";
153FILE *logfile;
154int loglevel;
e735b91c 155static int log_append = 0;
34865134 156
e3db7226
FB
157/* statistics */
158static int tlb_flush_count;
159static int tb_flush_count;
160static int tb_phys_invalidate_count;
161
db7b5426
BS
162#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
163typedef struct subpage_t {
164 target_phys_addr_t base;
165 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
166 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
167 void *opaque[TARGET_PAGE_SIZE];
168} subpage_t;
169
b346ff46 170static void page_init(void)
54936004 171{
83fb7adf 172 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 173 TARGET_PAGE_SIZE */
67b915a5 174#ifdef _WIN32
d5a8f07c
FB
175 {
176 SYSTEM_INFO system_info;
177 DWORD old_protect;
3b46e624 178
d5a8f07c
FB
179 GetSystemInfo(&system_info);
180 qemu_real_host_page_size = system_info.dwPageSize;
3b46e624 181
d5a8f07c
FB
182 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
183 PAGE_EXECUTE_READWRITE, &old_protect);
184 }
67b915a5 185#else
83fb7adf 186 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
187 {
188 unsigned long start, end;
189
190 start = (unsigned long)code_gen_buffer;
191 start &= ~(qemu_real_host_page_size - 1);
3b46e624 192
d5a8f07c
FB
193 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
194 end += qemu_real_host_page_size - 1;
195 end &= ~(qemu_real_host_page_size - 1);
3b46e624 196
5fafdf24 197 mprotect((void *)start, end - start,
d5a8f07c
FB
198 PROT_READ | PROT_WRITE | PROT_EXEC);
199 }
67b915a5 200#endif
d5a8f07c 201
83fb7adf
FB
202 if (qemu_host_page_size == 0)
203 qemu_host_page_size = qemu_real_host_page_size;
204 if (qemu_host_page_size < TARGET_PAGE_SIZE)
205 qemu_host_page_size = TARGET_PAGE_SIZE;
206 qemu_host_page_bits = 0;
207 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
208 qemu_host_page_bits++;
209 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
210 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
211 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
212}
213
fd6ce8f6 214static inline PageDesc *page_find_alloc(unsigned int index)
54936004 215{
54936004
FB
216 PageDesc **lp, *p;
217
54936004
FB
218 lp = &l1_map[index >> L2_BITS];
219 p = *lp;
220 if (!p) {
221 /* allocate if not found */
59817ccb 222 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 223 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
224 *lp = p;
225 }
226 return p + (index & (L2_SIZE - 1));
227}
228
fd6ce8f6 229static inline PageDesc *page_find(unsigned int index)
54936004 230{
54936004
FB
231 PageDesc *p;
232
54936004
FB
233 p = l1_map[index >> L2_BITS];
234 if (!p)
235 return 0;
fd6ce8f6
FB
236 return p + (index & (L2_SIZE - 1));
237}
238
108c49b8 239static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 240{
108c49b8 241 void **lp, **p;
e3f4e2a4 242 PhysPageDesc *pd;
92e873b9 243
108c49b8
FB
244 p = (void **)l1_phys_map;
245#if TARGET_PHYS_ADDR_SPACE_BITS > 32
246
247#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
248#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
249#endif
250 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
251 p = *lp;
252 if (!p) {
253 /* allocate if not found */
108c49b8
FB
254 if (!alloc)
255 return NULL;
256 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
257 memset(p, 0, sizeof(void *) * L1_SIZE);
258 *lp = p;
259 }
260#endif
261 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
262 pd = *lp;
263 if (!pd) {
264 int i;
108c49b8
FB
265 /* allocate if not found */
266 if (!alloc)
267 return NULL;
e3f4e2a4
PB
268 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
269 *lp = pd;
270 for (i = 0; i < L2_SIZE; i++)
271 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 272 }
e3f4e2a4 273 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
274}
275
108c49b8 276static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 277{
108c49b8 278 return phys_page_find_alloc(index, 0);
92e873b9
FB
279}
280
9fa3e853 281#if !defined(CONFIG_USER_ONLY)
6a00d601 282static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 283static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 284 target_ulong vaddr);
9fa3e853 285#endif
fd6ce8f6 286
6a00d601 287void cpu_exec_init(CPUState *env)
fd6ce8f6 288{
6a00d601
FB
289 CPUState **penv;
290 int cpu_index;
291
fd6ce8f6
FB
292 if (!code_gen_ptr) {
293 code_gen_ptr = code_gen_buffer;
b346ff46 294 page_init();
33417e70 295 io_mem_init();
fd6ce8f6 296 }
6a00d601
FB
297 env->next_cpu = NULL;
298 penv = &first_cpu;
299 cpu_index = 0;
300 while (*penv != NULL) {
301 penv = (CPUState **)&(*penv)->next_cpu;
302 cpu_index++;
303 }
304 env->cpu_index = cpu_index;
6658ffb8 305 env->nb_watchpoints = 0;
6a00d601 306 *penv = env;
fd6ce8f6
FB
307}
308
9fa3e853
FB
309static inline void invalidate_page_bitmap(PageDesc *p)
310{
311 if (p->code_bitmap) {
59817ccb 312 qemu_free(p->code_bitmap);
9fa3e853
FB
313 p->code_bitmap = NULL;
314 }
315 p->code_write_count = 0;
316}
317
fd6ce8f6
FB
318/* set to NULL all the 'first_tb' fields in all PageDescs */
319static void page_flush_tb(void)
320{
321 int i, j;
322 PageDesc *p;
323
324 for(i = 0; i < L1_SIZE; i++) {
325 p = l1_map[i];
326 if (p) {
9fa3e853
FB
327 for(j = 0; j < L2_SIZE; j++) {
328 p->first_tb = NULL;
329 invalidate_page_bitmap(p);
330 p++;
331 }
fd6ce8f6
FB
332 }
333 }
334}
335
336/* flush all the translation blocks */
d4e8164f 337/* XXX: tb_flush is currently not thread safe */
6a00d601 338void tb_flush(CPUState *env1)
fd6ce8f6 339{
6a00d601 340 CPUState *env;
0124311e 341#if defined(DEBUG_FLUSH)
ab3d1727
BS
342 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
343 (unsigned long)(code_gen_ptr - code_gen_buffer),
344 nb_tbs, nb_tbs > 0 ?
345 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6
FB
346#endif
347 nb_tbs = 0;
3b46e624 348
6a00d601
FB
349 for(env = first_cpu; env != NULL; env = env->next_cpu) {
350 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
351 }
9fa3e853 352
8a8a608f 353 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 354 page_flush_tb();
9fa3e853 355
fd6ce8f6 356 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
357 /* XXX: flush processor icache at this point if cache flush is
358 expensive */
e3db7226 359 tb_flush_count++;
fd6ce8f6
FB
360}
361
362#ifdef DEBUG_TB_CHECK
363
bc98a7ef 364static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
365{
366 TranslationBlock *tb;
367 int i;
368 address &= TARGET_PAGE_MASK;
99773bd4
PB
369 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
370 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
371 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
372 address >= tb->pc + tb->size)) {
373 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 374 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
375 }
376 }
377 }
378}
379
380/* verify that all the pages have correct rights for code */
381static void tb_page_check(void)
382{
383 TranslationBlock *tb;
384 int i, flags1, flags2;
3b46e624 385
99773bd4
PB
386 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
387 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
388 flags1 = page_get_flags(tb->pc);
389 flags2 = page_get_flags(tb->pc + tb->size - 1);
390 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
391 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 392 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
393 }
394 }
395 }
396}
397
d4e8164f
FB
398void tb_jmp_check(TranslationBlock *tb)
399{
400 TranslationBlock *tb1;
401 unsigned int n1;
402
403 /* suppress any remaining jumps to this TB */
404 tb1 = tb->jmp_first;
405 for(;;) {
406 n1 = (long)tb1 & 3;
407 tb1 = (TranslationBlock *)((long)tb1 & ~3);
408 if (n1 == 2)
409 break;
410 tb1 = tb1->jmp_next[n1];
411 }
412 /* check end of list */
413 if (tb1 != tb) {
414 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
415 }
416}
417
fd6ce8f6
FB
418#endif
419
420/* invalidate one TB */
421static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
422 int next_offset)
423{
424 TranslationBlock *tb1;
425 for(;;) {
426 tb1 = *ptb;
427 if (tb1 == tb) {
428 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
429 break;
430 }
431 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
432 }
433}
434
9fa3e853
FB
435static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
436{
437 TranslationBlock *tb1;
438 unsigned int n1;
439
440 for(;;) {
441 tb1 = *ptb;
442 n1 = (long)tb1 & 3;
443 tb1 = (TranslationBlock *)((long)tb1 & ~3);
444 if (tb1 == tb) {
445 *ptb = tb1->page_next[n1];
446 break;
447 }
448 ptb = &tb1->page_next[n1];
449 }
450}
451
d4e8164f
FB
452static inline void tb_jmp_remove(TranslationBlock *tb, int n)
453{
454 TranslationBlock *tb1, **ptb;
455 unsigned int n1;
456
457 ptb = &tb->jmp_next[n];
458 tb1 = *ptb;
459 if (tb1) {
460 /* find tb(n) in circular list */
461 for(;;) {
462 tb1 = *ptb;
463 n1 = (long)tb1 & 3;
464 tb1 = (TranslationBlock *)((long)tb1 & ~3);
465 if (n1 == n && tb1 == tb)
466 break;
467 if (n1 == 2) {
468 ptb = &tb1->jmp_first;
469 } else {
470 ptb = &tb1->jmp_next[n1];
471 }
472 }
473 /* now we can suppress tb(n) from the list */
474 *ptb = tb->jmp_next[n];
475
476 tb->jmp_next[n] = NULL;
477 }
478}
479
480/* reset the jump entry 'n' of a TB so that it is not chained to
481 another TB */
482static inline void tb_reset_jump(TranslationBlock *tb, int n)
483{
484 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
485}
486
8a40a180 487static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 488{
6a00d601 489 CPUState *env;
8a40a180 490 PageDesc *p;
d4e8164f 491 unsigned int h, n1;
8a40a180
FB
492 target_ulong phys_pc;
493 TranslationBlock *tb1, *tb2;
3b46e624 494
8a40a180
FB
495 /* remove the TB from the hash list */
496 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
497 h = tb_phys_hash_func(phys_pc);
5fafdf24 498 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
499 offsetof(TranslationBlock, phys_hash_next));
500
501 /* remove the TB from the page list */
502 if (tb->page_addr[0] != page_addr) {
503 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
504 tb_page_remove(&p->first_tb, tb);
505 invalidate_page_bitmap(p);
506 }
507 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
508 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
509 tb_page_remove(&p->first_tb, tb);
510 invalidate_page_bitmap(p);
511 }
512
36bdbe54 513 tb_invalidated_flag = 1;
59817ccb 514
fd6ce8f6 515 /* remove the TB from the hash list */
8a40a180 516 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
517 for(env = first_cpu; env != NULL; env = env->next_cpu) {
518 if (env->tb_jmp_cache[h] == tb)
519 env->tb_jmp_cache[h] = NULL;
520 }
d4e8164f
FB
521
522 /* suppress this TB from the two jump lists */
523 tb_jmp_remove(tb, 0);
524 tb_jmp_remove(tb, 1);
525
526 /* suppress any remaining jumps to this TB */
527 tb1 = tb->jmp_first;
528 for(;;) {
529 n1 = (long)tb1 & 3;
530 if (n1 == 2)
531 break;
532 tb1 = (TranslationBlock *)((long)tb1 & ~3);
533 tb2 = tb1->jmp_next[n1];
534 tb_reset_jump(tb1, n1);
535 tb1->jmp_next[n1] = NULL;
536 tb1 = tb2;
537 }
538 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 539
e3db7226 540 tb_phys_invalidate_count++;
9fa3e853
FB
541}
542
543static inline void set_bits(uint8_t *tab, int start, int len)
544{
545 int end, mask, end1;
546
547 end = start + len;
548 tab += start >> 3;
549 mask = 0xff << (start & 7);
550 if ((start & ~7) == (end & ~7)) {
551 if (start < end) {
552 mask &= ~(0xff << (end & 7));
553 *tab |= mask;
554 }
555 } else {
556 *tab++ |= mask;
557 start = (start + 8) & ~7;
558 end1 = end & ~7;
559 while (start < end1) {
560 *tab++ = 0xff;
561 start += 8;
562 }
563 if (start < end) {
564 mask = ~(0xff << (end & 7));
565 *tab |= mask;
566 }
567 }
568}
569
570static void build_page_bitmap(PageDesc *p)
571{
572 int n, tb_start, tb_end;
573 TranslationBlock *tb;
3b46e624 574
59817ccb 575 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
576 if (!p->code_bitmap)
577 return;
578 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
579
580 tb = p->first_tb;
581 while (tb != NULL) {
582 n = (long)tb & 3;
583 tb = (TranslationBlock *)((long)tb & ~3);
584 /* NOTE: this is subtle as a TB may span two physical pages */
585 if (n == 0) {
586 /* NOTE: tb_end may be after the end of the page, but
587 it is not a problem */
588 tb_start = tb->pc & ~TARGET_PAGE_MASK;
589 tb_end = tb_start + tb->size;
590 if (tb_end > TARGET_PAGE_SIZE)
591 tb_end = TARGET_PAGE_SIZE;
592 } else {
593 tb_start = 0;
594 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
595 }
596 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
597 tb = tb->page_next[n];
598 }
599}
600
d720b93d
FB
601#ifdef TARGET_HAS_PRECISE_SMC
602
5fafdf24 603static void tb_gen_code(CPUState *env,
d720b93d
FB
604 target_ulong pc, target_ulong cs_base, int flags,
605 int cflags)
606{
607 TranslationBlock *tb;
608 uint8_t *tc_ptr;
609 target_ulong phys_pc, phys_page2, virt_page2;
610 int code_gen_size;
611
c27004ec
FB
612 phys_pc = get_phys_addr_code(env, pc);
613 tb = tb_alloc(pc);
d720b93d
FB
614 if (!tb) {
615 /* flush must be done */
616 tb_flush(env);
617 /* cannot fail at this point */
c27004ec 618 tb = tb_alloc(pc);
d720b93d
FB
619 }
620 tc_ptr = code_gen_ptr;
621 tb->tc_ptr = tc_ptr;
622 tb->cs_base = cs_base;
623 tb->flags = flags;
624 tb->cflags = cflags;
625 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
626 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 627
d720b93d 628 /* check next page if needed */
c27004ec 629 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 630 phys_page2 = -1;
c27004ec 631 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
632 phys_page2 = get_phys_addr_code(env, virt_page2);
633 }
634 tb_link_phys(tb, phys_pc, phys_page2);
635}
636#endif
3b46e624 637
9fa3e853
FB
638/* invalidate all TBs which intersect with the target physical page
639 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
640 the same physical page. 'is_cpu_write_access' should be true if called
641 from a real cpu write access: the virtual CPU will exit the current
642 TB if code is modified inside this TB. */
5fafdf24 643void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
d720b93d
FB
644 int is_cpu_write_access)
645{
646 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 647 CPUState *env = cpu_single_env;
9fa3e853 648 PageDesc *p;
ea1c1802 649 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 650 target_ulong tb_start, tb_end;
d720b93d 651 target_ulong current_pc, current_cs_base;
9fa3e853
FB
652
653 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 654 if (!p)
9fa3e853 655 return;
5fafdf24 656 if (!p->code_bitmap &&
d720b93d
FB
657 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
658 is_cpu_write_access) {
9fa3e853
FB
659 /* build code bitmap */
660 build_page_bitmap(p);
661 }
662
663 /* we remove all the TBs in the range [start, end[ */
664 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
665 current_tb_not_found = is_cpu_write_access;
666 current_tb_modified = 0;
667 current_tb = NULL; /* avoid warning */
668 current_pc = 0; /* avoid warning */
669 current_cs_base = 0; /* avoid warning */
670 current_flags = 0; /* avoid warning */
9fa3e853
FB
671 tb = p->first_tb;
672 while (tb != NULL) {
673 n = (long)tb & 3;
674 tb = (TranslationBlock *)((long)tb & ~3);
675 tb_next = tb->page_next[n];
676 /* NOTE: this is subtle as a TB may span two physical pages */
677 if (n == 0) {
678 /* NOTE: tb_end may be after the end of the page, but
679 it is not a problem */
680 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
681 tb_end = tb_start + tb->size;
682 } else {
683 tb_start = tb->page_addr[1];
684 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
685 }
686 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
687#ifdef TARGET_HAS_PRECISE_SMC
688 if (current_tb_not_found) {
689 current_tb_not_found = 0;
690 current_tb = NULL;
691 if (env->mem_write_pc) {
692 /* now we have a real cpu fault */
693 current_tb = tb_find_pc(env->mem_write_pc);
694 }
695 }
696 if (current_tb == tb &&
697 !(current_tb->cflags & CF_SINGLE_INSN)) {
698 /* If we are modifying the current TB, we must stop
699 its execution. We could be more precise by checking
700 that the modification is after the current PC, but it
701 would require a specialized function to partially
702 restore the CPU state */
3b46e624 703
d720b93d 704 current_tb_modified = 1;
5fafdf24 705 cpu_restore_state(current_tb, env,
d720b93d
FB
706 env->mem_write_pc, NULL);
707#if defined(TARGET_I386)
708 current_flags = env->hflags;
709 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
710 current_cs_base = (target_ulong)env->segs[R_CS].base;
711 current_pc = current_cs_base + env->eip;
712#else
713#error unsupported CPU
714#endif
715 }
716#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
717 /* we need to do that to handle the case where a signal
718 occurs while doing tb_phys_invalidate() */
719 saved_tb = NULL;
720 if (env) {
721 saved_tb = env->current_tb;
722 env->current_tb = NULL;
723 }
9fa3e853 724 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
725 if (env) {
726 env->current_tb = saved_tb;
727 if (env->interrupt_request && env->current_tb)
728 cpu_interrupt(env, env->interrupt_request);
729 }
9fa3e853
FB
730 }
731 tb = tb_next;
732 }
733#if !defined(CONFIG_USER_ONLY)
734 /* if no code remaining, no need to continue to use slow writes */
735 if (!p->first_tb) {
736 invalidate_page_bitmap(p);
d720b93d
FB
737 if (is_cpu_write_access) {
738 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
739 }
740 }
741#endif
742#ifdef TARGET_HAS_PRECISE_SMC
743 if (current_tb_modified) {
744 /* we generate a block containing just the instruction
745 modifying the memory. It will ensure that it cannot modify
746 itself */
ea1c1802 747 env->current_tb = NULL;
5fafdf24 748 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
749 CF_SINGLE_INSN);
750 cpu_resume_from_signal(env, NULL);
9fa3e853 751 }
fd6ce8f6 752#endif
9fa3e853 753}
fd6ce8f6 754
9fa3e853 755/* len must be <= 8 and start must be a multiple of len */
d720b93d 756static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
757{
758 PageDesc *p;
759 int offset, b;
59817ccb 760#if 0
a4193c8a
FB
761 if (1) {
762 if (loglevel) {
5fafdf24
TS
763 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
764 cpu_single_env->mem_write_vaddr, len,
765 cpu_single_env->eip,
a4193c8a
FB
766 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
767 }
59817ccb
FB
768 }
769#endif
9fa3e853 770 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 771 if (!p)
9fa3e853
FB
772 return;
773 if (p->code_bitmap) {
774 offset = start & ~TARGET_PAGE_MASK;
775 b = p->code_bitmap[offset >> 3] >> (offset & 7);
776 if (b & ((1 << len) - 1))
777 goto do_invalidate;
778 } else {
779 do_invalidate:
d720b93d 780 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
781 }
782}
783
9fa3e853 784#if !defined(CONFIG_SOFTMMU)
5fafdf24 785static void tb_invalidate_phys_page(target_ulong addr,
d720b93d 786 unsigned long pc, void *puc)
9fa3e853 787{
d720b93d
FB
788 int n, current_flags, current_tb_modified;
789 target_ulong current_pc, current_cs_base;
9fa3e853 790 PageDesc *p;
d720b93d
FB
791 TranslationBlock *tb, *current_tb;
792#ifdef TARGET_HAS_PRECISE_SMC
793 CPUState *env = cpu_single_env;
794#endif
9fa3e853
FB
795
796 addr &= TARGET_PAGE_MASK;
797 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 798 if (!p)
9fa3e853
FB
799 return;
800 tb = p->first_tb;
d720b93d
FB
801 current_tb_modified = 0;
802 current_tb = NULL;
803 current_pc = 0; /* avoid warning */
804 current_cs_base = 0; /* avoid warning */
805 current_flags = 0; /* avoid warning */
806#ifdef TARGET_HAS_PRECISE_SMC
807 if (tb && pc != 0) {
808 current_tb = tb_find_pc(pc);
809 }
810#endif
9fa3e853
FB
811 while (tb != NULL) {
812 n = (long)tb & 3;
813 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
814#ifdef TARGET_HAS_PRECISE_SMC
815 if (current_tb == tb &&
816 !(current_tb->cflags & CF_SINGLE_INSN)) {
817 /* If we are modifying the current TB, we must stop
818 its execution. We could be more precise by checking
819 that the modification is after the current PC, but it
820 would require a specialized function to partially
821 restore the CPU state */
3b46e624 822
d720b93d
FB
823 current_tb_modified = 1;
824 cpu_restore_state(current_tb, env, pc, puc);
825#if defined(TARGET_I386)
826 current_flags = env->hflags;
827 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
828 current_cs_base = (target_ulong)env->segs[R_CS].base;
829 current_pc = current_cs_base + env->eip;
830#else
831#error unsupported CPU
832#endif
833 }
834#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
835 tb_phys_invalidate(tb, addr);
836 tb = tb->page_next[n];
837 }
fd6ce8f6 838 p->first_tb = NULL;
d720b93d
FB
839#ifdef TARGET_HAS_PRECISE_SMC
840 if (current_tb_modified) {
841 /* we generate a block containing just the instruction
842 modifying the memory. It will ensure that it cannot modify
843 itself */
ea1c1802 844 env->current_tb = NULL;
5fafdf24 845 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
846 CF_SINGLE_INSN);
847 cpu_resume_from_signal(env, puc);
848 }
849#endif
fd6ce8f6 850}
9fa3e853 851#endif
fd6ce8f6
FB
852
853/* add the tb in the target page and protect it if necessary */
5fafdf24 854static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 855 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
856{
857 PageDesc *p;
9fa3e853
FB
858 TranslationBlock *last_first_tb;
859
860 tb->page_addr[n] = page_addr;
3a7d929e 861 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
862 tb->page_next[n] = p->first_tb;
863 last_first_tb = p->first_tb;
864 p->first_tb = (TranslationBlock *)((long)tb | n);
865 invalidate_page_bitmap(p);
fd6ce8f6 866
107db443 867#if defined(TARGET_HAS_SMC) || 1
d720b93d 868
9fa3e853 869#if defined(CONFIG_USER_ONLY)
fd6ce8f6 870 if (p->flags & PAGE_WRITE) {
53a5960a
PB
871 target_ulong addr;
872 PageDesc *p2;
9fa3e853
FB
873 int prot;
874
fd6ce8f6
FB
875 /* force the host page as non writable (writes will have a
876 page fault + mprotect overhead) */
53a5960a 877 page_addr &= qemu_host_page_mask;
fd6ce8f6 878 prot = 0;
53a5960a
PB
879 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
880 addr += TARGET_PAGE_SIZE) {
881
882 p2 = page_find (addr >> TARGET_PAGE_BITS);
883 if (!p2)
884 continue;
885 prot |= p2->flags;
886 p2->flags &= ~PAGE_WRITE;
887 page_get_flags(addr);
888 }
5fafdf24 889 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
890 (prot & PAGE_BITS) & ~PAGE_WRITE);
891#ifdef DEBUG_TB_INVALIDATE
ab3d1727 892 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 893 page_addr);
fd6ce8f6 894#endif
fd6ce8f6 895 }
9fa3e853
FB
896#else
897 /* if some code is already present, then the pages are already
898 protected. So we handle the case where only the first TB is
899 allocated in a physical page */
900 if (!last_first_tb) {
6a00d601 901 tlb_protect_code(page_addr);
9fa3e853
FB
902 }
903#endif
d720b93d
FB
904
905#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
906}
907
908/* Allocate a new translation block. Flush the translation buffer if
909 too many translation blocks or too much generated code. */
c27004ec 910TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
911{
912 TranslationBlock *tb;
fd6ce8f6 913
5fafdf24 914 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
fd6ce8f6 915 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 916 return NULL;
fd6ce8f6
FB
917 tb = &tbs[nb_tbs++];
918 tb->pc = pc;
b448f2f3 919 tb->cflags = 0;
d4e8164f
FB
920 return tb;
921}
922
9fa3e853
FB
923/* add a new TB and link it to the physical page tables. phys_page2 is
924 (-1) to indicate that only one page contains the TB. */
5fafdf24 925void tb_link_phys(TranslationBlock *tb,
9fa3e853 926 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 927{
9fa3e853
FB
928 unsigned int h;
929 TranslationBlock **ptb;
930
931 /* add in the physical hash table */
932 h = tb_phys_hash_func(phys_pc);
933 ptb = &tb_phys_hash[h];
934 tb->phys_hash_next = *ptb;
935 *ptb = tb;
fd6ce8f6
FB
936
937 /* add in the page list */
9fa3e853
FB
938 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
939 if (phys_page2 != -1)
940 tb_alloc_page(tb, 1, phys_page2);
941 else
942 tb->page_addr[1] = -1;
9fa3e853 943
d4e8164f
FB
944 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
945 tb->jmp_next[0] = NULL;
946 tb->jmp_next[1] = NULL;
947
948 /* init original jump addresses */
949 if (tb->tb_next_offset[0] != 0xffff)
950 tb_reset_jump(tb, 0);
951 if (tb->tb_next_offset[1] != 0xffff)
952 tb_reset_jump(tb, 1);
8a40a180
FB
953
954#ifdef DEBUG_TB_CHECK
955 tb_page_check();
956#endif
fd6ce8f6
FB
957}
958
9fa3e853
FB
959/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
960 tb[1].tc_ptr. Return NULL if not found */
961TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 962{
9fa3e853
FB
963 int m_min, m_max, m;
964 unsigned long v;
965 TranslationBlock *tb;
a513fe19
FB
966
967 if (nb_tbs <= 0)
968 return NULL;
969 if (tc_ptr < (unsigned long)code_gen_buffer ||
970 tc_ptr >= (unsigned long)code_gen_ptr)
971 return NULL;
972 /* binary search (cf Knuth) */
973 m_min = 0;
974 m_max = nb_tbs - 1;
975 while (m_min <= m_max) {
976 m = (m_min + m_max) >> 1;
977 tb = &tbs[m];
978 v = (unsigned long)tb->tc_ptr;
979 if (v == tc_ptr)
980 return tb;
981 else if (tc_ptr < v) {
982 m_max = m - 1;
983 } else {
984 m_min = m + 1;
985 }
5fafdf24 986 }
a513fe19
FB
987 return &tbs[m_max];
988}
7501267e 989
ea041c0e
FB
990static void tb_reset_jump_recursive(TranslationBlock *tb);
991
992static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
993{
994 TranslationBlock *tb1, *tb_next, **ptb;
995 unsigned int n1;
996
997 tb1 = tb->jmp_next[n];
998 if (tb1 != NULL) {
999 /* find head of list */
1000 for(;;) {
1001 n1 = (long)tb1 & 3;
1002 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1003 if (n1 == 2)
1004 break;
1005 tb1 = tb1->jmp_next[n1];
1006 }
1007 /* we are now sure now that tb jumps to tb1 */
1008 tb_next = tb1;
1009
1010 /* remove tb from the jmp_first list */
1011 ptb = &tb_next->jmp_first;
1012 for(;;) {
1013 tb1 = *ptb;
1014 n1 = (long)tb1 & 3;
1015 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1016 if (n1 == n && tb1 == tb)
1017 break;
1018 ptb = &tb1->jmp_next[n1];
1019 }
1020 *ptb = tb->jmp_next[n];
1021 tb->jmp_next[n] = NULL;
3b46e624 1022
ea041c0e
FB
1023 /* suppress the jump to next tb in generated code */
1024 tb_reset_jump(tb, n);
1025
0124311e 1026 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1027 tb_reset_jump_recursive(tb_next);
1028 }
1029}
1030
1031static void tb_reset_jump_recursive(TranslationBlock *tb)
1032{
1033 tb_reset_jump_recursive2(tb, 0);
1034 tb_reset_jump_recursive2(tb, 1);
1035}
1036
1fddef4b 1037#if defined(TARGET_HAS_ICE)
d720b93d
FB
1038static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1039{
9b3c35e0
JM
1040 target_phys_addr_t addr;
1041 target_ulong pd;
c2f07f81
PB
1042 ram_addr_t ram_addr;
1043 PhysPageDesc *p;
d720b93d 1044
c2f07f81
PB
1045 addr = cpu_get_phys_page_debug(env, pc);
1046 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1047 if (!p) {
1048 pd = IO_MEM_UNASSIGNED;
1049 } else {
1050 pd = p->phys_offset;
1051 }
1052 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1053 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1054}
c27004ec 1055#endif
d720b93d 1056
6658ffb8
PB
1057/* Add a watchpoint. */
1058int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1059{
1060 int i;
1061
1062 for (i = 0; i < env->nb_watchpoints; i++) {
1063 if (addr == env->watchpoint[i].vaddr)
1064 return 0;
1065 }
1066 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1067 return -1;
1068
1069 i = env->nb_watchpoints++;
1070 env->watchpoint[i].vaddr = addr;
1071 tlb_flush_page(env, addr);
1072 /* FIXME: This flush is needed because of the hack to make memory ops
1073 terminate the TB. It can be removed once the proper IO trap and
1074 re-execute bits are in. */
1075 tb_flush(env);
1076 return i;
1077}
1078
1079/* Remove a watchpoint. */
1080int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1081{
1082 int i;
1083
1084 for (i = 0; i < env->nb_watchpoints; i++) {
1085 if (addr == env->watchpoint[i].vaddr) {
1086 env->nb_watchpoints--;
1087 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1088 tlb_flush_page(env, addr);
1089 return 0;
1090 }
1091 }
1092 return -1;
1093}
1094
c33a346e
FB
1095/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1096 breakpoint is reached */
2e12669a 1097int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1098{
1fddef4b 1099#if defined(TARGET_HAS_ICE)
4c3a88a2 1100 int i;
3b46e624 1101
4c3a88a2
FB
1102 for(i = 0; i < env->nb_breakpoints; i++) {
1103 if (env->breakpoints[i] == pc)
1104 return 0;
1105 }
1106
1107 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1108 return -1;
1109 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1110
d720b93d 1111 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1112 return 0;
1113#else
1114 return -1;
1115#endif
1116}
1117
1118/* remove a breakpoint */
2e12669a 1119int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1120{
1fddef4b 1121#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1122 int i;
1123 for(i = 0; i < env->nb_breakpoints; i++) {
1124 if (env->breakpoints[i] == pc)
1125 goto found;
1126 }
1127 return -1;
1128 found:
4c3a88a2 1129 env->nb_breakpoints--;
1fddef4b
FB
1130 if (i < env->nb_breakpoints)
1131 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1132
1133 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1134 return 0;
1135#else
1136 return -1;
1137#endif
1138}
1139
c33a346e
FB
1140/* enable or disable single step mode. EXCP_DEBUG is returned by the
1141 CPU loop after each instruction */
1142void cpu_single_step(CPUState *env, int enabled)
1143{
1fddef4b 1144#if defined(TARGET_HAS_ICE)
c33a346e
FB
1145 if (env->singlestep_enabled != enabled) {
1146 env->singlestep_enabled = enabled;
1147 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1148 /* XXX: only flush what is necessary */
0124311e 1149 tb_flush(env);
c33a346e
FB
1150 }
1151#endif
1152}
1153
34865134
FB
1154/* enable or disable low levels log */
1155void cpu_set_log(int log_flags)
1156{
1157 loglevel = log_flags;
1158 if (loglevel && !logfile) {
11fcfab4 1159 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1160 if (!logfile) {
1161 perror(logfilename);
1162 _exit(1);
1163 }
9fa3e853
FB
1164#if !defined(CONFIG_SOFTMMU)
1165 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1166 {
1167 static uint8_t logfile_buf[4096];
1168 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1169 }
1170#else
34865134 1171 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1172#endif
e735b91c
PB
1173 log_append = 1;
1174 }
1175 if (!loglevel && logfile) {
1176 fclose(logfile);
1177 logfile = NULL;
34865134
FB
1178 }
1179}
1180
1181void cpu_set_log_filename(const char *filename)
1182{
1183 logfilename = strdup(filename);
e735b91c
PB
1184 if (logfile) {
1185 fclose(logfile);
1186 logfile = NULL;
1187 }
1188 cpu_set_log(loglevel);
34865134 1189}
c33a346e 1190
0124311e 1191/* mask must never be zero, except for A20 change call */
68a79315 1192void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1193{
1194 TranslationBlock *tb;
ee8b7021 1195 static int interrupt_lock;
59817ccb 1196
68a79315 1197 env->interrupt_request |= mask;
ea041c0e
FB
1198 /* if the cpu is currently executing code, we must unlink it and
1199 all the potentially executing TB */
1200 tb = env->current_tb;
ee8b7021
FB
1201 if (tb && !testandset(&interrupt_lock)) {
1202 env->current_tb = NULL;
ea041c0e 1203 tb_reset_jump_recursive(tb);
ee8b7021 1204 interrupt_lock = 0;
ea041c0e
FB
1205 }
1206}
1207
b54ad049
FB
1208void cpu_reset_interrupt(CPUState *env, int mask)
1209{
1210 env->interrupt_request &= ~mask;
1211}
1212
f193c797 1213CPULogItem cpu_log_items[] = {
5fafdf24 1214 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1215 "show generated host assembly code for each compiled TB" },
1216 { CPU_LOG_TB_IN_ASM, "in_asm",
1217 "show target assembly code for each compiled TB" },
5fafdf24 1218 { CPU_LOG_TB_OP, "op",
f193c797
FB
1219 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1220#ifdef TARGET_I386
1221 { CPU_LOG_TB_OP_OPT, "op_opt",
1222 "show micro ops after optimization for each compiled TB" },
1223#endif
1224 { CPU_LOG_INT, "int",
1225 "show interrupts/exceptions in short format" },
1226 { CPU_LOG_EXEC, "exec",
1227 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1228 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1229 "show CPU state before block translation" },
f193c797
FB
1230#ifdef TARGET_I386
1231 { CPU_LOG_PCALL, "pcall",
1232 "show protected mode far calls/returns/exceptions" },
1233#endif
8e3a9fd2 1234#ifdef DEBUG_IOPORT
fd872598
FB
1235 { CPU_LOG_IOPORT, "ioport",
1236 "show all i/o ports accesses" },
8e3a9fd2 1237#endif
f193c797
FB
1238 { 0, NULL, NULL },
1239};
1240
1241static int cmp1(const char *s1, int n, const char *s2)
1242{
1243 if (strlen(s2) != n)
1244 return 0;
1245 return memcmp(s1, s2, n) == 0;
1246}
3b46e624 1247
f193c797
FB
1248/* takes a comma separated list of log masks. Return 0 if error. */
1249int cpu_str_to_log_mask(const char *str)
1250{
1251 CPULogItem *item;
1252 int mask;
1253 const char *p, *p1;
1254
1255 p = str;
1256 mask = 0;
1257 for(;;) {
1258 p1 = strchr(p, ',');
1259 if (!p1)
1260 p1 = p + strlen(p);
8e3a9fd2
FB
1261 if(cmp1(p,p1-p,"all")) {
1262 for(item = cpu_log_items; item->mask != 0; item++) {
1263 mask |= item->mask;
1264 }
1265 } else {
f193c797
FB
1266 for(item = cpu_log_items; item->mask != 0; item++) {
1267 if (cmp1(p, p1 - p, item->name))
1268 goto found;
1269 }
1270 return 0;
8e3a9fd2 1271 }
f193c797
FB
1272 found:
1273 mask |= item->mask;
1274 if (*p1 != ',')
1275 break;
1276 p = p1 + 1;
1277 }
1278 return mask;
1279}
ea041c0e 1280
7501267e
FB
1281void cpu_abort(CPUState *env, const char *fmt, ...)
1282{
1283 va_list ap;
493ae1f0 1284 va_list ap2;
7501267e
FB
1285
1286 va_start(ap, fmt);
493ae1f0 1287 va_copy(ap2, ap);
7501267e
FB
1288 fprintf(stderr, "qemu: fatal: ");
1289 vfprintf(stderr, fmt, ap);
1290 fprintf(stderr, "\n");
1291#ifdef TARGET_I386
0573fbfc
TS
1292 if(env->intercept & INTERCEPT_SVM_MASK) {
1293 /* most probably the virtual machine should not
1294 be shut down but rather caught by the VMM */
1295 vmexit(SVM_EXIT_SHUTDOWN, 0);
1296 }
7fe48483
FB
1297 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1298#else
1299 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1300#endif
924edcae 1301 if (logfile) {
f9373291 1302 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1303 vfprintf(logfile, fmt, ap2);
f9373291
JM
1304 fprintf(logfile, "\n");
1305#ifdef TARGET_I386
1306 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1307#else
1308 cpu_dump_state(env, logfile, fprintf, 0);
1309#endif
924edcae
AZ
1310 fflush(logfile);
1311 fclose(logfile);
1312 }
493ae1f0 1313 va_end(ap2);
f9373291 1314 va_end(ap);
7501267e
FB
1315 abort();
1316}
1317
c5be9f08
TS
1318CPUState *cpu_copy(CPUState *env)
1319{
aaed909a
FB
1320#if 0
1321 /* XXX: broken, must be handled by each CPU */
c5be9f08
TS
1322 CPUState *new_env = cpu_init();
1323 /* preserve chaining and index */
1324 CPUState *next_cpu = new_env->next_cpu;
1325 int cpu_index = new_env->cpu_index;
1326 memcpy(new_env, env, sizeof(CPUState));
1327 new_env->next_cpu = next_cpu;
1328 new_env->cpu_index = cpu_index;
1329 return new_env;
aaed909a
FB
1330#else
1331 return NULL;
1332#endif
c5be9f08
TS
1333}
1334
0124311e
FB
1335#if !defined(CONFIG_USER_ONLY)
1336
ee8b7021
FB
1337/* NOTE: if flush_global is true, also flush global entries (not
1338 implemented yet) */
1339void tlb_flush(CPUState *env, int flush_global)
33417e70 1340{
33417e70 1341 int i;
0124311e 1342
9fa3e853
FB
1343#if defined(DEBUG_TLB)
1344 printf("tlb_flush:\n");
1345#endif
0124311e
FB
1346 /* must reset current TB so that interrupts cannot modify the
1347 links while we are modifying them */
1348 env->current_tb = NULL;
1349
33417e70 1350 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1351 env->tlb_table[0][i].addr_read = -1;
1352 env->tlb_table[0][i].addr_write = -1;
1353 env->tlb_table[0][i].addr_code = -1;
1354 env->tlb_table[1][i].addr_read = -1;
1355 env->tlb_table[1][i].addr_write = -1;
1356 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1357#if (NB_MMU_MODES >= 3)
1358 env->tlb_table[2][i].addr_read = -1;
1359 env->tlb_table[2][i].addr_write = -1;
1360 env->tlb_table[2][i].addr_code = -1;
1361#if (NB_MMU_MODES == 4)
1362 env->tlb_table[3][i].addr_read = -1;
1363 env->tlb_table[3][i].addr_write = -1;
1364 env->tlb_table[3][i].addr_code = -1;
1365#endif
1366#endif
33417e70 1367 }
9fa3e853 1368
8a40a180 1369 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1370
1371#if !defined(CONFIG_SOFTMMU)
1372 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1373#endif
1374#ifdef USE_KQEMU
1375 if (env->kqemu_enabled) {
1376 kqemu_flush(env, flush_global);
1377 }
9fa3e853 1378#endif
e3db7226 1379 tlb_flush_count++;
33417e70
FB
1380}
1381
274da6b2 1382static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1383{
5fafdf24 1384 if (addr == (tlb_entry->addr_read &
84b7b8e7 1385 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1386 addr == (tlb_entry->addr_write &
84b7b8e7 1387 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1388 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1389 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1390 tlb_entry->addr_read = -1;
1391 tlb_entry->addr_write = -1;
1392 tlb_entry->addr_code = -1;
1393 }
61382a50
FB
1394}
1395
2e12669a 1396void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1397{
8a40a180 1398 int i;
9fa3e853 1399 TranslationBlock *tb;
0124311e 1400
9fa3e853 1401#if defined(DEBUG_TLB)
108c49b8 1402 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1403#endif
0124311e
FB
1404 /* must reset current TB so that interrupts cannot modify the
1405 links while we are modifying them */
1406 env->current_tb = NULL;
61382a50
FB
1407
1408 addr &= TARGET_PAGE_MASK;
1409 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1410 tlb_flush_entry(&env->tlb_table[0][i], addr);
1411 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1412#if (NB_MMU_MODES >= 3)
1413 tlb_flush_entry(&env->tlb_table[2][i], addr);
1414#if (NB_MMU_MODES == 4)
1415 tlb_flush_entry(&env->tlb_table[3][i], addr);
1416#endif
1417#endif
0124311e 1418
b362e5e0
PB
1419 /* Discard jump cache entries for any tb which might potentially
1420 overlap the flushed page. */
1421 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1422 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1423
1424 i = tb_jmp_cache_hash_page(addr);
1425 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1426
0124311e 1427#if !defined(CONFIG_SOFTMMU)
9fa3e853 1428 if (addr < MMAP_AREA_END)
0124311e 1429 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1430#endif
0a962c02
FB
1431#ifdef USE_KQEMU
1432 if (env->kqemu_enabled) {
1433 kqemu_flush_page(env, addr);
1434 }
1435#endif
9fa3e853
FB
1436}
1437
9fa3e853
FB
1438/* update the TLBs so that writes to code in the virtual page 'addr'
1439 can be detected */
6a00d601 1440static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1441{
5fafdf24 1442 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1443 ram_addr + TARGET_PAGE_SIZE,
1444 CODE_DIRTY_FLAG);
9fa3e853
FB
1445}
1446
9fa3e853 1447/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1448 tested for self modifying code */
5fafdf24 1449static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1450 target_ulong vaddr)
9fa3e853 1451{
3a7d929e 1452 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1453}
1454
5fafdf24 1455static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1456 unsigned long start, unsigned long length)
1457{
1458 unsigned long addr;
84b7b8e7
FB
1459 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1460 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1461 if ((addr - start) < length) {
84b7b8e7 1462 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1463 }
1464 }
1465}
1466
3a7d929e 1467void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1468 int dirty_flags)
1ccde1cb
FB
1469{
1470 CPUState *env;
4f2ac237 1471 unsigned long length, start1;
0a962c02
FB
1472 int i, mask, len;
1473 uint8_t *p;
1ccde1cb
FB
1474
1475 start &= TARGET_PAGE_MASK;
1476 end = TARGET_PAGE_ALIGN(end);
1477
1478 length = end - start;
1479 if (length == 0)
1480 return;
0a962c02 1481 len = length >> TARGET_PAGE_BITS;
3a7d929e 1482#ifdef USE_KQEMU
6a00d601
FB
1483 /* XXX: should not depend on cpu context */
1484 env = first_cpu;
3a7d929e 1485 if (env->kqemu_enabled) {
f23db169
FB
1486 ram_addr_t addr;
1487 addr = start;
1488 for(i = 0; i < len; i++) {
1489 kqemu_set_notdirty(env, addr);
1490 addr += TARGET_PAGE_SIZE;
1491 }
3a7d929e
FB
1492 }
1493#endif
f23db169
FB
1494 mask = ~dirty_flags;
1495 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1496 for(i = 0; i < len; i++)
1497 p[i] &= mask;
1498
1ccde1cb
FB
1499 /* we modify the TLB cache so that the dirty bit will be set again
1500 when accessing the range */
59817ccb 1501 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1502 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1503 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1504 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1505 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1506 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1507#if (NB_MMU_MODES >= 3)
1508 for(i = 0; i < CPU_TLB_SIZE; i++)
1509 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1510#if (NB_MMU_MODES == 4)
1511 for(i = 0; i < CPU_TLB_SIZE; i++)
1512 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1513#endif
1514#endif
6a00d601 1515 }
59817ccb
FB
1516
1517#if !defined(CONFIG_SOFTMMU)
1518 /* XXX: this is expensive */
1519 {
1520 VirtPageDesc *p;
1521 int j;
1522 target_ulong addr;
1523
1524 for(i = 0; i < L1_SIZE; i++) {
1525 p = l1_virt_map[i];
1526 if (p) {
1527 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1528 for(j = 0; j < L2_SIZE; j++) {
1529 if (p->valid_tag == virt_valid_tag &&
1530 p->phys_addr >= start && p->phys_addr < end &&
1531 (p->prot & PROT_WRITE)) {
1532 if (addr < MMAP_AREA_END) {
5fafdf24 1533 mprotect((void *)addr, TARGET_PAGE_SIZE,
59817ccb
FB
1534 p->prot & ~PROT_WRITE);
1535 }
1536 }
1537 addr += TARGET_PAGE_SIZE;
1538 p++;
1539 }
1540 }
1541 }
1542 }
1543#endif
1ccde1cb
FB
1544}
1545
3a7d929e
FB
1546static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1547{
1548 ram_addr_t ram_addr;
1549
84b7b8e7 1550 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1551 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1552 tlb_entry->addend - (unsigned long)phys_ram_base;
1553 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1554 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1555 }
1556 }
1557}
1558
1559/* update the TLB according to the current state of the dirty bits */
1560void cpu_tlb_update_dirty(CPUState *env)
1561{
1562 int i;
1563 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1564 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1565 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1566 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1567#if (NB_MMU_MODES >= 3)
1568 for(i = 0; i < CPU_TLB_SIZE; i++)
1569 tlb_update_dirty(&env->tlb_table[2][i]);
1570#if (NB_MMU_MODES == 4)
1571 for(i = 0; i < CPU_TLB_SIZE; i++)
1572 tlb_update_dirty(&env->tlb_table[3][i]);
1573#endif
1574#endif
3a7d929e
FB
1575}
1576
5fafdf24 1577static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1578 unsigned long start)
1ccde1cb
FB
1579{
1580 unsigned long addr;
84b7b8e7
FB
1581 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1582 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1583 if (addr == start) {
84b7b8e7 1584 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1585 }
1586 }
1587}
1588
1589/* update the TLB corresponding to virtual page vaddr and phys addr
1590 addr so that it is no longer dirty */
6a00d601
FB
1591static inline void tlb_set_dirty(CPUState *env,
1592 unsigned long addr, target_ulong vaddr)
1ccde1cb 1593{
1ccde1cb
FB
1594 int i;
1595
1ccde1cb
FB
1596 addr &= TARGET_PAGE_MASK;
1597 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1598 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1599 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1600#if (NB_MMU_MODES >= 3)
1601 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1602#if (NB_MMU_MODES == 4)
1603 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1604#endif
1605#endif
9fa3e853
FB
1606}
1607
59817ccb
FB
1608/* add a new TLB entry. At most one entry for a given virtual address
1609 is permitted. Return 0 if OK or 2 if the page could not be mapped
1610 (can only happen in non SOFTMMU mode for I/O pages or pages
1611 conflicting with the host address space). */
5fafdf24
TS
1612int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1613 target_phys_addr_t paddr, int prot,
6ebbf390 1614 int mmu_idx, int is_softmmu)
9fa3e853 1615{
92e873b9 1616 PhysPageDesc *p;
4f2ac237 1617 unsigned long pd;
9fa3e853 1618 unsigned int index;
4f2ac237 1619 target_ulong address;
108c49b8 1620 target_phys_addr_t addend;
9fa3e853 1621 int ret;
84b7b8e7 1622 CPUTLBEntry *te;
6658ffb8 1623 int i;
9fa3e853 1624
92e873b9 1625 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1626 if (!p) {
1627 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1628 } else {
1629 pd = p->phys_offset;
9fa3e853
FB
1630 }
1631#if defined(DEBUG_TLB)
6ebbf390
JM
1632 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1633 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1634#endif
1635
1636 ret = 0;
1637#if !defined(CONFIG_SOFTMMU)
5fafdf24 1638 if (is_softmmu)
9fa3e853
FB
1639#endif
1640 {
2a4188a3 1641 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1642 /* IO memory case */
1643 address = vaddr | pd;
1644 addend = paddr;
1645 } else {
1646 /* standard memory */
1647 address = vaddr;
1648 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1649 }
6658ffb8
PB
1650
1651 /* Make accesses to pages with watchpoints go via the
1652 watchpoint trap routines. */
1653 for (i = 0; i < env->nb_watchpoints; i++) {
1654 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1655 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1656 env->watchpoint[i].addend = 0;
6658ffb8
PB
1657 address = vaddr | io_mem_watch;
1658 } else {
d79acba4
AZ
1659 env->watchpoint[i].addend = pd - paddr +
1660 (unsigned long) phys_ram_base;
6658ffb8
PB
1661 /* TODO: Figure out how to make read watchpoints coexist
1662 with code. */
1663 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1664 }
1665 }
1666 }
d79acba4 1667
90f18422 1668 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1669 addend -= vaddr;
6ebbf390 1670 te = &env->tlb_table[mmu_idx][index];
84b7b8e7 1671 te->addend = addend;
67b915a5 1672 if (prot & PAGE_READ) {
84b7b8e7
FB
1673 te->addr_read = address;
1674 } else {
1675 te->addr_read = -1;
1676 }
1677 if (prot & PAGE_EXEC) {
1678 te->addr_code = address;
9fa3e853 1679 } else {
84b7b8e7 1680 te->addr_code = -1;
9fa3e853 1681 }
67b915a5 1682 if (prot & PAGE_WRITE) {
5fafdf24 1683 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
856074ec
FB
1684 (pd & IO_MEM_ROMD)) {
1685 /* write access calls the I/O callback */
5fafdf24 1686 te->addr_write = vaddr |
856074ec 1687 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
5fafdf24 1688 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1689 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1690 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1691 } else {
84b7b8e7 1692 te->addr_write = address;
9fa3e853
FB
1693 }
1694 } else {
84b7b8e7 1695 te->addr_write = -1;
9fa3e853
FB
1696 }
1697 }
1698#if !defined(CONFIG_SOFTMMU)
1699 else {
1700 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1701 /* IO access: no mapping is done as it will be handled by the
1702 soft MMU */
1703 if (!(env->hflags & HF_SOFTMMU_MASK))
1704 ret = 2;
1705 } else {
1706 void *map_addr;
59817ccb
FB
1707
1708 if (vaddr >= MMAP_AREA_END) {
1709 ret = 2;
1710 } else {
1711 if (prot & PROT_WRITE) {
5fafdf24 1712 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1713#if defined(TARGET_HAS_SMC) || 1
59817ccb 1714 first_tb ||
d720b93d 1715#endif
5fafdf24 1716 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
59817ccb
FB
1717 !cpu_physical_memory_is_dirty(pd))) {
1718 /* ROM: we do as if code was inside */
1719 /* if code is present, we only map as read only and save the
1720 original mapping */
1721 VirtPageDesc *vp;
3b46e624 1722
90f18422 1723 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1724 vp->phys_addr = pd;
1725 vp->prot = prot;
1726 vp->valid_tag = virt_valid_tag;
1727 prot &= ~PAGE_WRITE;
1728 }
1729 }
5fafdf24 1730 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
59817ccb
FB
1731 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1732 if (map_addr == MAP_FAILED) {
1733 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1734 paddr, vaddr);
9fa3e853 1735 }
9fa3e853
FB
1736 }
1737 }
1738 }
1739#endif
1740 return ret;
1741}
1742
1743/* called from signal handler: invalidate the code and unprotect the
1744 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1745int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1746{
1747#if !defined(CONFIG_SOFTMMU)
1748 VirtPageDesc *vp;
1749
1750#if defined(DEBUG_TLB)
1751 printf("page_unprotect: addr=0x%08x\n", addr);
1752#endif
1753 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1754
1755 /* if it is not mapped, no need to worry here */
1756 if (addr >= MMAP_AREA_END)
1757 return 0;
9fa3e853
FB
1758 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1759 if (!vp)
1760 return 0;
1761 /* NOTE: in this case, validate_tag is _not_ tested as it
1762 validates only the code TLB */
1763 if (vp->valid_tag != virt_valid_tag)
1764 return 0;
1765 if (!(vp->prot & PAGE_WRITE))
1766 return 0;
1767#if defined(DEBUG_TLB)
5fafdf24 1768 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
9fa3e853
FB
1769 addr, vp->phys_addr, vp->prot);
1770#endif
59817ccb
FB
1771 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1772 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1773 (unsigned long)addr, vp->prot);
d720b93d 1774 /* set the dirty bit */
0a962c02 1775 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1776 /* flush the code inside */
1777 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1778 return 1;
1779#else
1780 return 0;
1781#endif
33417e70
FB
1782}
1783
0124311e
FB
1784#else
1785
ee8b7021 1786void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1787{
1788}
1789
2e12669a 1790void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1791{
1792}
1793
5fafdf24
TS
1794int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1795 target_phys_addr_t paddr, int prot,
6ebbf390 1796 int mmu_idx, int is_softmmu)
9fa3e853
FB
1797{
1798 return 0;
1799}
0124311e 1800
9fa3e853
FB
1801/* dump memory mappings */
1802void page_dump(FILE *f)
33417e70 1803{
9fa3e853
FB
1804 unsigned long start, end;
1805 int i, j, prot, prot1;
1806 PageDesc *p;
33417e70 1807
9fa3e853
FB
1808 fprintf(f, "%-8s %-8s %-8s %s\n",
1809 "start", "end", "size", "prot");
1810 start = -1;
1811 end = -1;
1812 prot = 0;
1813 for(i = 0; i <= L1_SIZE; i++) {
1814 if (i < L1_SIZE)
1815 p = l1_map[i];
1816 else
1817 p = NULL;
1818 for(j = 0;j < L2_SIZE; j++) {
1819 if (!p)
1820 prot1 = 0;
1821 else
1822 prot1 = p[j].flags;
1823 if (prot1 != prot) {
1824 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1825 if (start != -1) {
1826 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1827 start, end, end - start,
9fa3e853
FB
1828 prot & PAGE_READ ? 'r' : '-',
1829 prot & PAGE_WRITE ? 'w' : '-',
1830 prot & PAGE_EXEC ? 'x' : '-');
1831 }
1832 if (prot1 != 0)
1833 start = end;
1834 else
1835 start = -1;
1836 prot = prot1;
1837 }
1838 if (!p)
1839 break;
1840 }
33417e70 1841 }
33417e70
FB
1842}
1843
53a5960a 1844int page_get_flags(target_ulong address)
33417e70 1845{
9fa3e853
FB
1846 PageDesc *p;
1847
1848 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1849 if (!p)
9fa3e853
FB
1850 return 0;
1851 return p->flags;
1852}
1853
1854/* modify the flags of a page and invalidate the code if
1855 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1856 depending on PAGE_WRITE */
53a5960a 1857void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1858{
1859 PageDesc *p;
53a5960a 1860 target_ulong addr;
9fa3e853
FB
1861
1862 start = start & TARGET_PAGE_MASK;
1863 end = TARGET_PAGE_ALIGN(end);
1864 if (flags & PAGE_WRITE)
1865 flags |= PAGE_WRITE_ORG;
1866 spin_lock(&tb_lock);
1867 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1868 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1869 /* if the write protection is set, then we invalidate the code
1870 inside */
5fafdf24 1871 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
1872 (flags & PAGE_WRITE) &&
1873 p->first_tb) {
d720b93d 1874 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1875 }
1876 p->flags = flags;
1877 }
1878 spin_unlock(&tb_lock);
33417e70
FB
1879}
1880
3d97b40b
TS
1881int page_check_range(target_ulong start, target_ulong len, int flags)
1882{
1883 PageDesc *p;
1884 target_ulong end;
1885 target_ulong addr;
1886
1887 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1888 start = start & TARGET_PAGE_MASK;
1889
1890 if( end < start )
1891 /* we've wrapped around */
1892 return -1;
1893 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1894 p = page_find(addr >> TARGET_PAGE_BITS);
1895 if( !p )
1896 return -1;
1897 if( !(p->flags & PAGE_VALID) )
1898 return -1;
1899
dae3270c 1900 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 1901 return -1;
dae3270c
FB
1902 if (flags & PAGE_WRITE) {
1903 if (!(p->flags & PAGE_WRITE_ORG))
1904 return -1;
1905 /* unprotect the page if it was put read-only because it
1906 contains translated code */
1907 if (!(p->flags & PAGE_WRITE)) {
1908 if (!page_unprotect(addr, 0, NULL))
1909 return -1;
1910 }
1911 return 0;
1912 }
3d97b40b
TS
1913 }
1914 return 0;
1915}
1916
9fa3e853
FB
1917/* called from signal handler: invalidate the code and unprotect the
1918 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1919int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1920{
1921 unsigned int page_index, prot, pindex;
1922 PageDesc *p, *p1;
53a5960a 1923 target_ulong host_start, host_end, addr;
9fa3e853 1924
83fb7adf 1925 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1926 page_index = host_start >> TARGET_PAGE_BITS;
1927 p1 = page_find(page_index);
1928 if (!p1)
1929 return 0;
83fb7adf 1930 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1931 p = p1;
1932 prot = 0;
1933 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1934 prot |= p->flags;
1935 p++;
1936 }
1937 /* if the page was really writable, then we change its
1938 protection back to writable */
1939 if (prot & PAGE_WRITE_ORG) {
1940 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1941 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 1942 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1943 (prot & PAGE_BITS) | PAGE_WRITE);
1944 p1[pindex].flags |= PAGE_WRITE;
1945 /* and since the content will be modified, we must invalidate
1946 the corresponding translated code. */
d720b93d 1947 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1948#ifdef DEBUG_TB_CHECK
1949 tb_invalidate_check(address);
1950#endif
1951 return 1;
1952 }
1953 }
1954 return 0;
1955}
1956
6a00d601
FB
1957static inline void tlb_set_dirty(CPUState *env,
1958 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1959{
1960}
9fa3e853
FB
1961#endif /* defined(CONFIG_USER_ONLY) */
1962
db7b5426
BS
1963static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1964 int memory);
1965static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1966 int orig_memory);
1967#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1968 need_subpage) \
1969 do { \
1970 if (addr > start_addr) \
1971 start_addr2 = 0; \
1972 else { \
1973 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1974 if (start_addr2 > 0) \
1975 need_subpage = 1; \
1976 } \
1977 \
49e9fba2 1978 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
1979 end_addr2 = TARGET_PAGE_SIZE - 1; \
1980 else { \
1981 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1982 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
1983 need_subpage = 1; \
1984 } \
1985 } while (0)
1986
33417e70
FB
1987/* register physical memory. 'size' must be a multiple of the target
1988 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1989 io memory page */
5fafdf24 1990void cpu_register_physical_memory(target_phys_addr_t start_addr,
2e12669a
FB
1991 unsigned long size,
1992 unsigned long phys_offset)
33417e70 1993{
108c49b8 1994 target_phys_addr_t addr, end_addr;
92e873b9 1995 PhysPageDesc *p;
9d42037b 1996 CPUState *env;
db7b5426
BS
1997 unsigned long orig_size = size;
1998 void *subpage;
33417e70 1999
5fd386f6 2000 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2001 end_addr = start_addr + (target_phys_addr_t)size;
2002 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2003 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2004 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2005 unsigned long orig_memory = p->phys_offset;
2006 target_phys_addr_t start_addr2, end_addr2;
2007 int need_subpage = 0;
2008
2009 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2010 need_subpage);
2011 if (need_subpage) {
2012 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2013 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2014 &p->phys_offset, orig_memory);
2015 } else {
2016 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2017 >> IO_MEM_SHIFT];
2018 }
2019 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2020 } else {
2021 p->phys_offset = phys_offset;
2022 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2023 (phys_offset & IO_MEM_ROMD))
2024 phys_offset += TARGET_PAGE_SIZE;
2025 }
2026 } else {
2027 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2028 p->phys_offset = phys_offset;
2029 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2030 (phys_offset & IO_MEM_ROMD))
2031 phys_offset += TARGET_PAGE_SIZE;
2032 else {
2033 target_phys_addr_t start_addr2, end_addr2;
2034 int need_subpage = 0;
2035
2036 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2037 end_addr2, need_subpage);
2038
2039 if (need_subpage) {
2040 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2041 &p->phys_offset, IO_MEM_UNASSIGNED);
2042 subpage_register(subpage, start_addr2, end_addr2,
2043 phys_offset);
2044 }
2045 }
2046 }
33417e70 2047 }
3b46e624 2048
9d42037b
FB
2049 /* since each CPU stores ram addresses in its TLB cache, we must
2050 reset the modified entries */
2051 /* XXX: slow ! */
2052 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2053 tlb_flush(env, 1);
2054 }
33417e70
FB
2055}
2056
ba863458
FB
2057/* XXX: temporary until new memory mapping API */
2058uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2059{
2060 PhysPageDesc *p;
2061
2062 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2063 if (!p)
2064 return IO_MEM_UNASSIGNED;
2065 return p->phys_offset;
2066}
2067
e9a1ab19
FB
2068/* XXX: better than nothing */
2069ram_addr_t qemu_ram_alloc(unsigned int size)
2070{
2071 ram_addr_t addr;
2072 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
5fafdf24 2073 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
e9a1ab19
FB
2074 size, phys_ram_size);
2075 abort();
2076 }
2077 addr = phys_ram_alloc_offset;
2078 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2079 return addr;
2080}
2081
2082void qemu_ram_free(ram_addr_t addr)
2083{
2084}
2085
a4193c8a 2086static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2087{
67d3b957 2088#ifdef DEBUG_UNASSIGNED
ab3d1727 2089 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2090#endif
2091#ifdef TARGET_SPARC
6c36d3fa 2092 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2093#elif TARGET_CRIS
2094 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2095#endif
33417e70
FB
2096 return 0;
2097}
2098
a4193c8a 2099static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2100{
67d3b957 2101#ifdef DEBUG_UNASSIGNED
ab3d1727 2102 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2103#endif
b4f0a316 2104#ifdef TARGET_SPARC
6c36d3fa 2105 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2106#elif TARGET_CRIS
2107 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2108#endif
33417e70
FB
2109}
2110
2111static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2112 unassigned_mem_readb,
2113 unassigned_mem_readb,
2114 unassigned_mem_readb,
2115};
2116
2117static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2118 unassigned_mem_writeb,
2119 unassigned_mem_writeb,
2120 unassigned_mem_writeb,
2121};
2122
3a7d929e 2123static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2124{
3a7d929e
FB
2125 unsigned long ram_addr;
2126 int dirty_flags;
2127 ram_addr = addr - (unsigned long)phys_ram_base;
2128 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2129 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2130#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2131 tb_invalidate_phys_page_fast(ram_addr, 1);
2132 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2133#endif
3a7d929e 2134 }
c27004ec 2135 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2136#ifdef USE_KQEMU
2137 if (cpu_single_env->kqemu_enabled &&
2138 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2139 kqemu_modify_page(cpu_single_env, ram_addr);
2140#endif
f23db169
FB
2141 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2142 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2143 /* we remove the notdirty callback only if the code has been
2144 flushed */
2145 if (dirty_flags == 0xff)
6a00d601 2146 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2147}
2148
3a7d929e 2149static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2150{
3a7d929e
FB
2151 unsigned long ram_addr;
2152 int dirty_flags;
2153 ram_addr = addr - (unsigned long)phys_ram_base;
2154 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2155 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2156#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2157 tb_invalidate_phys_page_fast(ram_addr, 2);
2158 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2159#endif
3a7d929e 2160 }
c27004ec 2161 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2162#ifdef USE_KQEMU
2163 if (cpu_single_env->kqemu_enabled &&
2164 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2165 kqemu_modify_page(cpu_single_env, ram_addr);
2166#endif
f23db169
FB
2167 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2168 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2169 /* we remove the notdirty callback only if the code has been
2170 flushed */
2171 if (dirty_flags == 0xff)
6a00d601 2172 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2173}
2174
3a7d929e 2175static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2176{
3a7d929e
FB
2177 unsigned long ram_addr;
2178 int dirty_flags;
2179 ram_addr = addr - (unsigned long)phys_ram_base;
2180 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2181 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2182#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2183 tb_invalidate_phys_page_fast(ram_addr, 4);
2184 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2185#endif
3a7d929e 2186 }
c27004ec 2187 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2188#ifdef USE_KQEMU
2189 if (cpu_single_env->kqemu_enabled &&
2190 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2191 kqemu_modify_page(cpu_single_env, ram_addr);
2192#endif
f23db169
FB
2193 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2194 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2195 /* we remove the notdirty callback only if the code has been
2196 flushed */
2197 if (dirty_flags == 0xff)
6a00d601 2198 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2199}
2200
3a7d929e 2201static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2202 NULL, /* never used */
2203 NULL, /* never used */
2204 NULL, /* never used */
2205};
2206
1ccde1cb
FB
2207static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2208 notdirty_mem_writeb,
2209 notdirty_mem_writew,
2210 notdirty_mem_writel,
2211};
2212
6658ffb8
PB
2213#if defined(CONFIG_SOFTMMU)
2214/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2215 so these check for a hit then pass through to the normal out-of-line
2216 phys routines. */
2217static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2218{
2219 return ldub_phys(addr);
2220}
2221
2222static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2223{
2224 return lduw_phys(addr);
2225}
2226
2227static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2228{
2229 return ldl_phys(addr);
2230}
2231
2232/* Generate a debug exception if a watchpoint has been hit.
2233 Returns the real physical address of the access. addr will be a host
d79acba4 2234 address in case of a RAM location. */
6658ffb8
PB
2235static target_ulong check_watchpoint(target_phys_addr_t addr)
2236{
2237 CPUState *env = cpu_single_env;
2238 target_ulong watch;
2239 target_ulong retaddr;
2240 int i;
2241
2242 retaddr = addr;
2243 for (i = 0; i < env->nb_watchpoints; i++) {
2244 watch = env->watchpoint[i].vaddr;
2245 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2246 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2247 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2248 cpu_single_env->watchpoint_hit = i + 1;
2249 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2250 break;
2251 }
2252 }
2253 }
2254 return retaddr;
2255}
2256
2257static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2258 uint32_t val)
2259{
2260 addr = check_watchpoint(addr);
2261 stb_phys(addr, val);
2262}
2263
2264static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2265 uint32_t val)
2266{
2267 addr = check_watchpoint(addr);
2268 stw_phys(addr, val);
2269}
2270
2271static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2272 uint32_t val)
2273{
2274 addr = check_watchpoint(addr);
2275 stl_phys(addr, val);
2276}
2277
2278static CPUReadMemoryFunc *watch_mem_read[3] = {
2279 watch_mem_readb,
2280 watch_mem_readw,
2281 watch_mem_readl,
2282};
2283
2284static CPUWriteMemoryFunc *watch_mem_write[3] = {
2285 watch_mem_writeb,
2286 watch_mem_writew,
2287 watch_mem_writel,
2288};
2289#endif
2290
db7b5426
BS
2291static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2292 unsigned int len)
2293{
2294 CPUReadMemoryFunc **mem_read;
2295 uint32_t ret;
2296 unsigned int idx;
2297
2298 idx = SUBPAGE_IDX(addr - mmio->base);
2299#if defined(DEBUG_SUBPAGE)
2300 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2301 mmio, len, addr, idx);
2302#endif
2303 mem_read = mmio->mem_read[idx];
2304 ret = (*mem_read[len])(mmio->opaque[idx], addr);
2305
2306 return ret;
2307}
2308
2309static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2310 uint32_t value, unsigned int len)
2311{
2312 CPUWriteMemoryFunc **mem_write;
2313 unsigned int idx;
2314
2315 idx = SUBPAGE_IDX(addr - mmio->base);
2316#if defined(DEBUG_SUBPAGE)
2317 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2318 mmio, len, addr, idx, value);
2319#endif
2320 mem_write = mmio->mem_write[idx];
2321 (*mem_write[len])(mmio->opaque[idx], addr, value);
2322}
2323
2324static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2325{
2326#if defined(DEBUG_SUBPAGE)
2327 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2328#endif
2329
2330 return subpage_readlen(opaque, addr, 0);
2331}
2332
2333static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2334 uint32_t value)
2335{
2336#if defined(DEBUG_SUBPAGE)
2337 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2338#endif
2339 subpage_writelen(opaque, addr, value, 0);
2340}
2341
2342static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2343{
2344#if defined(DEBUG_SUBPAGE)
2345 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2346#endif
2347
2348 return subpage_readlen(opaque, addr, 1);
2349}
2350
2351static void subpage_writew (void *opaque, target_phys_addr_t addr,
2352 uint32_t value)
2353{
2354#if defined(DEBUG_SUBPAGE)
2355 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2356#endif
2357 subpage_writelen(opaque, addr, value, 1);
2358}
2359
2360static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2361{
2362#if defined(DEBUG_SUBPAGE)
2363 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2364#endif
2365
2366 return subpage_readlen(opaque, addr, 2);
2367}
2368
2369static void subpage_writel (void *opaque,
2370 target_phys_addr_t addr, uint32_t value)
2371{
2372#if defined(DEBUG_SUBPAGE)
2373 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2374#endif
2375 subpage_writelen(opaque, addr, value, 2);
2376}
2377
2378static CPUReadMemoryFunc *subpage_read[] = {
2379 &subpage_readb,
2380 &subpage_readw,
2381 &subpage_readl,
2382};
2383
2384static CPUWriteMemoryFunc *subpage_write[] = {
2385 &subpage_writeb,
2386 &subpage_writew,
2387 &subpage_writel,
2388};
2389
2390static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2391 int memory)
2392{
2393 int idx, eidx;
2394
2395 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2396 return -1;
2397 idx = SUBPAGE_IDX(start);
2398 eidx = SUBPAGE_IDX(end);
2399#if defined(DEBUG_SUBPAGE)
2400 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2401 mmio, start, end, idx, eidx, memory);
2402#endif
2403 memory >>= IO_MEM_SHIFT;
2404 for (; idx <= eidx; idx++) {
2405 mmio->mem_read[idx] = io_mem_read[memory];
2406 mmio->mem_write[idx] = io_mem_write[memory];
2407 mmio->opaque[idx] = io_mem_opaque[memory];
2408 }
2409
2410 return 0;
2411}
2412
2413static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2414 int orig_memory)
2415{
2416 subpage_t *mmio;
2417 int subpage_memory;
2418
2419 mmio = qemu_mallocz(sizeof(subpage_t));
2420 if (mmio != NULL) {
2421 mmio->base = base;
2422 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2423#if defined(DEBUG_SUBPAGE)
2424 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2425 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2426#endif
2427 *phys = subpage_memory | IO_MEM_SUBPAGE;
2428 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2429 }
2430
2431 return mmio;
2432}
2433
33417e70
FB
2434static void io_mem_init(void)
2435{
3a7d929e 2436 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2437 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2438 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2439 io_mem_nb = 5;
2440
6658ffb8
PB
2441#if defined(CONFIG_SOFTMMU)
2442 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2443 watch_mem_write, NULL);
2444#endif
1ccde1cb 2445 /* alloc dirty bits array */
0a962c02 2446 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2447 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2448}
2449
2450/* mem_read and mem_write are arrays of functions containing the
2451 function to access byte (index 0), word (index 1) and dword (index
2452 2). All functions must be supplied. If io_index is non zero, the
2453 corresponding io zone is modified. If it is zero, a new io zone is
2454 allocated. The return value can be used with
2455 cpu_register_physical_memory(). (-1) is returned if error. */
2456int cpu_register_io_memory(int io_index,
2457 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2458 CPUWriteMemoryFunc **mem_write,
2459 void *opaque)
33417e70
FB
2460{
2461 int i;
2462
2463 if (io_index <= 0) {
b5ff1b31 2464 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2465 return -1;
2466 io_index = io_mem_nb++;
2467 } else {
2468 if (io_index >= IO_MEM_NB_ENTRIES)
2469 return -1;
2470 }
b5ff1b31 2471
33417e70
FB
2472 for(i = 0;i < 3; i++) {
2473 io_mem_read[io_index][i] = mem_read[i];
2474 io_mem_write[io_index][i] = mem_write[i];
2475 }
a4193c8a 2476 io_mem_opaque[io_index] = opaque;
33417e70
FB
2477 return io_index << IO_MEM_SHIFT;
2478}
61382a50 2479
8926b517
FB
2480CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2481{
2482 return io_mem_write[io_index >> IO_MEM_SHIFT];
2483}
2484
2485CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2486{
2487 return io_mem_read[io_index >> IO_MEM_SHIFT];
2488}
2489
13eb76e0
FB
2490/* physical memory access (slow version, mainly for debug) */
2491#if defined(CONFIG_USER_ONLY)
5fafdf24 2492void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2493 int len, int is_write)
2494{
2495 int l, flags;
2496 target_ulong page;
53a5960a 2497 void * p;
13eb76e0
FB
2498
2499 while (len > 0) {
2500 page = addr & TARGET_PAGE_MASK;
2501 l = (page + TARGET_PAGE_SIZE) - addr;
2502 if (l > len)
2503 l = len;
2504 flags = page_get_flags(page);
2505 if (!(flags & PAGE_VALID))
2506 return;
2507 if (is_write) {
2508 if (!(flags & PAGE_WRITE))
2509 return;
579a97f7
FB
2510 /* XXX: this code should not depend on lock_user */
2511 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2512 /* FIXME - should this return an error rather than just fail? */
2513 return;
53a5960a
PB
2514 memcpy(p, buf, len);
2515 unlock_user(p, addr, len);
13eb76e0
FB
2516 } else {
2517 if (!(flags & PAGE_READ))
2518 return;
579a97f7
FB
2519 /* XXX: this code should not depend on lock_user */
2520 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2521 /* FIXME - should this return an error rather than just fail? */
2522 return;
53a5960a
PB
2523 memcpy(buf, p, len);
2524 unlock_user(p, addr, 0);
13eb76e0
FB
2525 }
2526 len -= l;
2527 buf += l;
2528 addr += l;
2529 }
2530}
8df1cd07 2531
13eb76e0 2532#else
5fafdf24 2533void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2534 int len, int is_write)
2535{
2536 int l, io_index;
2537 uint8_t *ptr;
2538 uint32_t val;
2e12669a
FB
2539 target_phys_addr_t page;
2540 unsigned long pd;
92e873b9 2541 PhysPageDesc *p;
3b46e624 2542
13eb76e0
FB
2543 while (len > 0) {
2544 page = addr & TARGET_PAGE_MASK;
2545 l = (page + TARGET_PAGE_SIZE) - addr;
2546 if (l > len)
2547 l = len;
92e873b9 2548 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2549 if (!p) {
2550 pd = IO_MEM_UNASSIGNED;
2551 } else {
2552 pd = p->phys_offset;
2553 }
3b46e624 2554
13eb76e0 2555 if (is_write) {
3a7d929e 2556 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2557 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2558 /* XXX: could force cpu_single_env to NULL to avoid
2559 potential bugs */
13eb76e0 2560 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2561 /* 32 bit write access */
c27004ec 2562 val = ldl_p(buf);
a4193c8a 2563 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2564 l = 4;
2565 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2566 /* 16 bit write access */
c27004ec 2567 val = lduw_p(buf);
a4193c8a 2568 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2569 l = 2;
2570 } else {
1c213d19 2571 /* 8 bit write access */
c27004ec 2572 val = ldub_p(buf);
a4193c8a 2573 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2574 l = 1;
2575 }
2576 } else {
b448f2f3
FB
2577 unsigned long addr1;
2578 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2579 /* RAM case */
b448f2f3 2580 ptr = phys_ram_base + addr1;
13eb76e0 2581 memcpy(ptr, buf, l);
3a7d929e
FB
2582 if (!cpu_physical_memory_is_dirty(addr1)) {
2583 /* invalidate code */
2584 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2585 /* set dirty bit */
5fafdf24 2586 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2587 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2588 }
13eb76e0
FB
2589 }
2590 } else {
5fafdf24 2591 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2592 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2593 /* I/O case */
2594 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2595 if (l >= 4 && ((addr & 3) == 0)) {
2596 /* 32 bit read access */
a4193c8a 2597 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2598 stl_p(buf, val);
13eb76e0
FB
2599 l = 4;
2600 } else if (l >= 2 && ((addr & 1) == 0)) {
2601 /* 16 bit read access */
a4193c8a 2602 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2603 stw_p(buf, val);
13eb76e0
FB
2604 l = 2;
2605 } else {
1c213d19 2606 /* 8 bit read access */
a4193c8a 2607 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2608 stb_p(buf, val);
13eb76e0
FB
2609 l = 1;
2610 }
2611 } else {
2612 /* RAM case */
5fafdf24 2613 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2614 (addr & ~TARGET_PAGE_MASK);
2615 memcpy(buf, ptr, l);
2616 }
2617 }
2618 len -= l;
2619 buf += l;
2620 addr += l;
2621 }
2622}
8df1cd07 2623
d0ecd2aa 2624/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2625void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2626 const uint8_t *buf, int len)
2627{
2628 int l;
2629 uint8_t *ptr;
2630 target_phys_addr_t page;
2631 unsigned long pd;
2632 PhysPageDesc *p;
3b46e624 2633
d0ecd2aa
FB
2634 while (len > 0) {
2635 page = addr & TARGET_PAGE_MASK;
2636 l = (page + TARGET_PAGE_SIZE) - addr;
2637 if (l > len)
2638 l = len;
2639 p = phys_page_find(page >> TARGET_PAGE_BITS);
2640 if (!p) {
2641 pd = IO_MEM_UNASSIGNED;
2642 } else {
2643 pd = p->phys_offset;
2644 }
3b46e624 2645
d0ecd2aa 2646 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2647 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2648 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2649 /* do nothing */
2650 } else {
2651 unsigned long addr1;
2652 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2653 /* ROM/RAM case */
2654 ptr = phys_ram_base + addr1;
2655 memcpy(ptr, buf, l);
2656 }
2657 len -= l;
2658 buf += l;
2659 addr += l;
2660 }
2661}
2662
2663
8df1cd07
FB
2664/* warning: addr must be aligned */
2665uint32_t ldl_phys(target_phys_addr_t addr)
2666{
2667 int io_index;
2668 uint8_t *ptr;
2669 uint32_t val;
2670 unsigned long pd;
2671 PhysPageDesc *p;
2672
2673 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2674 if (!p) {
2675 pd = IO_MEM_UNASSIGNED;
2676 } else {
2677 pd = p->phys_offset;
2678 }
3b46e624 2679
5fafdf24 2680 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2681 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2682 /* I/O case */
2683 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2684 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2685 } else {
2686 /* RAM case */
5fafdf24 2687 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2688 (addr & ~TARGET_PAGE_MASK);
2689 val = ldl_p(ptr);
2690 }
2691 return val;
2692}
2693
84b7b8e7
FB
2694/* warning: addr must be aligned */
2695uint64_t ldq_phys(target_phys_addr_t addr)
2696{
2697 int io_index;
2698 uint8_t *ptr;
2699 uint64_t val;
2700 unsigned long pd;
2701 PhysPageDesc *p;
2702
2703 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2704 if (!p) {
2705 pd = IO_MEM_UNASSIGNED;
2706 } else {
2707 pd = p->phys_offset;
2708 }
3b46e624 2709
2a4188a3
FB
2710 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2711 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2712 /* I/O case */
2713 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2714#ifdef TARGET_WORDS_BIGENDIAN
2715 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2716 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2717#else
2718 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2719 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2720#endif
2721 } else {
2722 /* RAM case */
5fafdf24 2723 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2724 (addr & ~TARGET_PAGE_MASK);
2725 val = ldq_p(ptr);
2726 }
2727 return val;
2728}
2729
aab33094
FB
2730/* XXX: optimize */
2731uint32_t ldub_phys(target_phys_addr_t addr)
2732{
2733 uint8_t val;
2734 cpu_physical_memory_read(addr, &val, 1);
2735 return val;
2736}
2737
2738/* XXX: optimize */
2739uint32_t lduw_phys(target_phys_addr_t addr)
2740{
2741 uint16_t val;
2742 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2743 return tswap16(val);
2744}
2745
8df1cd07
FB
2746/* warning: addr must be aligned. The ram page is not masked as dirty
2747 and the code inside is not invalidated. It is useful if the dirty
2748 bits are used to track modified PTEs */
2749void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2750{
2751 int io_index;
2752 uint8_t *ptr;
2753 unsigned long pd;
2754 PhysPageDesc *p;
2755
2756 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2757 if (!p) {
2758 pd = IO_MEM_UNASSIGNED;
2759 } else {
2760 pd = p->phys_offset;
2761 }
3b46e624 2762
3a7d929e 2763 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2764 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2765 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2766 } else {
5fafdf24 2767 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2768 (addr & ~TARGET_PAGE_MASK);
2769 stl_p(ptr, val);
2770 }
2771}
2772
bc98a7ef
JM
2773void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2774{
2775 int io_index;
2776 uint8_t *ptr;
2777 unsigned long pd;
2778 PhysPageDesc *p;
2779
2780 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2781 if (!p) {
2782 pd = IO_MEM_UNASSIGNED;
2783 } else {
2784 pd = p->phys_offset;
2785 }
3b46e624 2786
bc98a7ef
JM
2787 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2788 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2789#ifdef TARGET_WORDS_BIGENDIAN
2790 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2791 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2792#else
2793 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2794 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2795#endif
2796 } else {
5fafdf24 2797 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2798 (addr & ~TARGET_PAGE_MASK);
2799 stq_p(ptr, val);
2800 }
2801}
2802
8df1cd07 2803/* warning: addr must be aligned */
8df1cd07
FB
2804void stl_phys(target_phys_addr_t addr, uint32_t val)
2805{
2806 int io_index;
2807 uint8_t *ptr;
2808 unsigned long pd;
2809 PhysPageDesc *p;
2810
2811 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2812 if (!p) {
2813 pd = IO_MEM_UNASSIGNED;
2814 } else {
2815 pd = p->phys_offset;
2816 }
3b46e624 2817
3a7d929e 2818 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2819 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2820 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2821 } else {
2822 unsigned long addr1;
2823 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2824 /* RAM case */
2825 ptr = phys_ram_base + addr1;
2826 stl_p(ptr, val);
3a7d929e
FB
2827 if (!cpu_physical_memory_is_dirty(addr1)) {
2828 /* invalidate code */
2829 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2830 /* set dirty bit */
f23db169
FB
2831 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2832 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2833 }
8df1cd07
FB
2834 }
2835}
2836
aab33094
FB
2837/* XXX: optimize */
2838void stb_phys(target_phys_addr_t addr, uint32_t val)
2839{
2840 uint8_t v = val;
2841 cpu_physical_memory_write(addr, &v, 1);
2842}
2843
2844/* XXX: optimize */
2845void stw_phys(target_phys_addr_t addr, uint32_t val)
2846{
2847 uint16_t v = tswap16(val);
2848 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2849}
2850
2851/* XXX: optimize */
2852void stq_phys(target_phys_addr_t addr, uint64_t val)
2853{
2854 val = tswap64(val);
2855 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2856}
2857
13eb76e0
FB
2858#endif
2859
2860/* virtual memory access for debug */
5fafdf24 2861int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 2862 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2863{
2864 int l;
9b3c35e0
JM
2865 target_phys_addr_t phys_addr;
2866 target_ulong page;
13eb76e0
FB
2867
2868 while (len > 0) {
2869 page = addr & TARGET_PAGE_MASK;
2870 phys_addr = cpu_get_phys_page_debug(env, page);
2871 /* if no physical page mapped, return an error */
2872 if (phys_addr == -1)
2873 return -1;
2874 l = (page + TARGET_PAGE_SIZE) - addr;
2875 if (l > len)
2876 l = len;
5fafdf24 2877 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 2878 buf, l, is_write);
13eb76e0
FB
2879 len -= l;
2880 buf += l;
2881 addr += l;
2882 }
2883 return 0;
2884}
2885
e3db7226
FB
2886void dump_exec_info(FILE *f,
2887 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2888{
2889 int i, target_code_size, max_target_code_size;
2890 int direct_jmp_count, direct_jmp2_count, cross_page;
2891 TranslationBlock *tb;
3b46e624 2892
e3db7226
FB
2893 target_code_size = 0;
2894 max_target_code_size = 0;
2895 cross_page = 0;
2896 direct_jmp_count = 0;
2897 direct_jmp2_count = 0;
2898 for(i = 0; i < nb_tbs; i++) {
2899 tb = &tbs[i];
2900 target_code_size += tb->size;
2901 if (tb->size > max_target_code_size)
2902 max_target_code_size = tb->size;
2903 if (tb->page_addr[1] != -1)
2904 cross_page++;
2905 if (tb->tb_next_offset[0] != 0xffff) {
2906 direct_jmp_count++;
2907 if (tb->tb_next_offset[1] != 0xffff) {
2908 direct_jmp2_count++;
2909 }
2910 }
2911 }
2912 /* XXX: avoid using doubles ? */
2913 cpu_fprintf(f, "TB count %d\n", nb_tbs);
5fafdf24 2914 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
2915 nb_tbs ? target_code_size / nb_tbs : 0,
2916 max_target_code_size);
5fafdf24 2917 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
2918 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2919 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
2920 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2921 cross_page,
e3db7226
FB
2922 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2923 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 2924 direct_jmp_count,
e3db7226
FB
2925 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2926 direct_jmp2_count,
2927 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2928 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2929 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2930 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2931}
2932
5fafdf24 2933#if !defined(CONFIG_USER_ONLY)
61382a50
FB
2934
2935#define MMUSUFFIX _cmmu
2936#define GETPC() NULL
2937#define env cpu_single_env
b769d8fe 2938#define SOFTMMU_CODE_ACCESS
61382a50
FB
2939
2940#define SHIFT 0
2941#include "softmmu_template.h"
2942
2943#define SHIFT 1
2944#include "softmmu_template.h"
2945
2946#define SHIFT 2
2947#include "softmmu_template.h"
2948
2949#define SHIFT 3
2950#include "softmmu_template.h"
2951
2952#undef env
2953
2954#endif