]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Update Changelog
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
40#endif
54936004 41
fd6ce8f6 42//#define DEBUG_TB_INVALIDATE
66e85a21 43//#define DEBUG_FLUSH
9fa3e853 44//#define DEBUG_TLB
67d3b957 45//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
46
47/* make various TB consistency checks */
5fafdf24
TS
48//#define DEBUG_TB_CHECK
49//#define DEBUG_TLB_CHECK
fd6ce8f6 50
1196be37 51//#define DEBUG_IOPORT
db7b5426 52//#define DEBUG_SUBPAGE
1196be37 53
99773bd4
PB
54#if !defined(CONFIG_USER_ONLY)
55/* TB consistency checks only implemented for usermode emulation. */
56#undef DEBUG_TB_CHECK
57#endif
58
fd6ce8f6 59/* threshold to flush the translated code buffer */
d07bde88 60#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
fd6ce8f6 61
9fa3e853
FB
62#define SMC_BITMAP_USE_THRESHOLD 10
63
64#define MMAP_AREA_START 0x00000000
65#define MMAP_AREA_END 0xa8000000
fd6ce8f6 66
108c49b8
FB
67#if defined(TARGET_SPARC64)
68#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
69#elif defined(TARGET_SPARC)
70#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
71#elif defined(TARGET_ALPHA)
72#define TARGET_PHYS_ADDR_SPACE_BITS 42
73#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
74#elif defined(TARGET_PPC64)
75#define TARGET_PHYS_ADDR_SPACE_BITS 42
76#else
77/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
78#define TARGET_PHYS_ADDR_SPACE_BITS 32
79#endif
80
fd6ce8f6 81TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 82TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 83int nb_tbs;
eb51d102
FB
84/* any access to the tbs or the page table must use this lock */
85spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 86
b8076a74 87uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
88uint8_t *code_gen_ptr;
89
9fa3e853
FB
90int phys_ram_size;
91int phys_ram_fd;
92uint8_t *phys_ram_base;
1ccde1cb 93uint8_t *phys_ram_dirty;
e9a1ab19 94static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 95
6a00d601
FB
96CPUState *first_cpu;
97/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
5fafdf24 99CPUState *cpu_single_env;
6a00d601 100
54936004 101typedef struct PageDesc {
92e873b9 102 /* list of TBs intersecting this ram page */
fd6ce8f6 103 TranslationBlock *first_tb;
9fa3e853
FB
104 /* in order to optimize self modifying code, we count the number
105 of lookups we do to a given page to use a bitmap */
106 unsigned int code_write_count;
107 uint8_t *code_bitmap;
108#if defined(CONFIG_USER_ONLY)
109 unsigned long flags;
110#endif
54936004
FB
111} PageDesc;
112
92e873b9
FB
113typedef struct PhysPageDesc {
114 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 115 uint32_t phys_offset;
92e873b9
FB
116} PhysPageDesc;
117
54936004 118#define L2_BITS 10
bedb69ea
JM
119#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
120/* XXX: this is a temporary hack for alpha target.
121 * In the future, this is to be replaced by a multi-level table
122 * to actually be able to handle the complete 64 bits address space.
123 */
124#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
125#else
54936004 126#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 127#endif
54936004
FB
128
129#define L1_SIZE (1 << L1_BITS)
130#define L2_SIZE (1 << L2_BITS)
131
33417e70 132static void io_mem_init(void);
fd6ce8f6 133
83fb7adf
FB
134unsigned long qemu_real_host_page_size;
135unsigned long qemu_host_page_bits;
136unsigned long qemu_host_page_size;
137unsigned long qemu_host_page_mask;
54936004 138
92e873b9 139/* XXX: for system emulation, it could just be an array */
54936004 140static PageDesc *l1_map[L1_SIZE];
0a962c02 141PhysPageDesc **l1_phys_map;
54936004 142
33417e70 143/* io memory support */
33417e70
FB
144CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
145CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 146void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 147static int io_mem_nb;
6658ffb8
PB
148#if defined(CONFIG_SOFTMMU)
149static int io_mem_watch;
150#endif
33417e70 151
34865134
FB
152/* log support */
153char *logfilename = "/tmp/qemu.log";
154FILE *logfile;
155int loglevel;
e735b91c 156static int log_append = 0;
34865134 157
e3db7226
FB
158/* statistics */
159static int tlb_flush_count;
160static int tb_flush_count;
161static int tb_phys_invalidate_count;
162
db7b5426
BS
163#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
164typedef struct subpage_t {
165 target_phys_addr_t base;
3ee89922
BS
166 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
167 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
168 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
169} subpage_t;
170
b346ff46 171static void page_init(void)
54936004 172{
83fb7adf 173 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 174 TARGET_PAGE_SIZE */
67b915a5 175#ifdef _WIN32
d5a8f07c
FB
176 {
177 SYSTEM_INFO system_info;
178 DWORD old_protect;
3b46e624 179
d5a8f07c
FB
180 GetSystemInfo(&system_info);
181 qemu_real_host_page_size = system_info.dwPageSize;
3b46e624 182
d5a8f07c
FB
183 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
184 PAGE_EXECUTE_READWRITE, &old_protect);
185 }
67b915a5 186#else
83fb7adf 187 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
188 {
189 unsigned long start, end;
190
191 start = (unsigned long)code_gen_buffer;
192 start &= ~(qemu_real_host_page_size - 1);
3b46e624 193
d5a8f07c
FB
194 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
195 end += qemu_real_host_page_size - 1;
196 end &= ~(qemu_real_host_page_size - 1);
3b46e624 197
5fafdf24 198 mprotect((void *)start, end - start,
d5a8f07c
FB
199 PROT_READ | PROT_WRITE | PROT_EXEC);
200 }
67b915a5 201#endif
d5a8f07c 202
83fb7adf
FB
203 if (qemu_host_page_size == 0)
204 qemu_host_page_size = qemu_real_host_page_size;
205 if (qemu_host_page_size < TARGET_PAGE_SIZE)
206 qemu_host_page_size = TARGET_PAGE_SIZE;
207 qemu_host_page_bits = 0;
208 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
209 qemu_host_page_bits++;
210 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
211 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
212 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
213
214#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
215 {
216 long long startaddr, endaddr;
217 FILE *f;
218 int n;
219
220 f = fopen("/proc/self/maps", "r");
221 if (f) {
222 do {
223 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
224 if (n == 2) {
225 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
226 TARGET_PAGE_ALIGN(endaddr),
227 PAGE_RESERVED);
228 }
229 } while (!feof(f));
230 fclose(f);
231 }
232 }
233#endif
54936004
FB
234}
235
fd6ce8f6 236static inline PageDesc *page_find_alloc(unsigned int index)
54936004 237{
54936004
FB
238 PageDesc **lp, *p;
239
54936004
FB
240 lp = &l1_map[index >> L2_BITS];
241 p = *lp;
242 if (!p) {
243 /* allocate if not found */
59817ccb 244 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 245 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
246 *lp = p;
247 }
248 return p + (index & (L2_SIZE - 1));
249}
250
fd6ce8f6 251static inline PageDesc *page_find(unsigned int index)
54936004 252{
54936004
FB
253 PageDesc *p;
254
54936004
FB
255 p = l1_map[index >> L2_BITS];
256 if (!p)
257 return 0;
fd6ce8f6
FB
258 return p + (index & (L2_SIZE - 1));
259}
260
108c49b8 261static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 262{
108c49b8 263 void **lp, **p;
e3f4e2a4 264 PhysPageDesc *pd;
92e873b9 265
108c49b8
FB
266 p = (void **)l1_phys_map;
267#if TARGET_PHYS_ADDR_SPACE_BITS > 32
268
269#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
270#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
271#endif
272 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
273 p = *lp;
274 if (!p) {
275 /* allocate if not found */
108c49b8
FB
276 if (!alloc)
277 return NULL;
278 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
279 memset(p, 0, sizeof(void *) * L1_SIZE);
280 *lp = p;
281 }
282#endif
283 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
284 pd = *lp;
285 if (!pd) {
286 int i;
108c49b8
FB
287 /* allocate if not found */
288 if (!alloc)
289 return NULL;
e3f4e2a4
PB
290 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
291 *lp = pd;
292 for (i = 0; i < L2_SIZE; i++)
293 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 294 }
e3f4e2a4 295 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
296}
297
108c49b8 298static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 299{
108c49b8 300 return phys_page_find_alloc(index, 0);
92e873b9
FB
301}
302
9fa3e853 303#if !defined(CONFIG_USER_ONLY)
6a00d601 304static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 305static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 306 target_ulong vaddr);
9fa3e853 307#endif
fd6ce8f6 308
6a00d601 309void cpu_exec_init(CPUState *env)
fd6ce8f6 310{
6a00d601
FB
311 CPUState **penv;
312 int cpu_index;
313
fd6ce8f6 314 if (!code_gen_ptr) {
57fec1fe 315 cpu_gen_init();
fd6ce8f6 316 code_gen_ptr = code_gen_buffer;
b346ff46 317 page_init();
33417e70 318 io_mem_init();
fd6ce8f6 319 }
6a00d601
FB
320 env->next_cpu = NULL;
321 penv = &first_cpu;
322 cpu_index = 0;
323 while (*penv != NULL) {
324 penv = (CPUState **)&(*penv)->next_cpu;
325 cpu_index++;
326 }
327 env->cpu_index = cpu_index;
6658ffb8 328 env->nb_watchpoints = 0;
6a00d601 329 *penv = env;
fd6ce8f6
FB
330}
331
9fa3e853
FB
332static inline void invalidate_page_bitmap(PageDesc *p)
333{
334 if (p->code_bitmap) {
59817ccb 335 qemu_free(p->code_bitmap);
9fa3e853
FB
336 p->code_bitmap = NULL;
337 }
338 p->code_write_count = 0;
339}
340
fd6ce8f6
FB
341/* set to NULL all the 'first_tb' fields in all PageDescs */
342static void page_flush_tb(void)
343{
344 int i, j;
345 PageDesc *p;
346
347 for(i = 0; i < L1_SIZE; i++) {
348 p = l1_map[i];
349 if (p) {
9fa3e853
FB
350 for(j = 0; j < L2_SIZE; j++) {
351 p->first_tb = NULL;
352 invalidate_page_bitmap(p);
353 p++;
354 }
fd6ce8f6
FB
355 }
356 }
357}
358
359/* flush all the translation blocks */
d4e8164f 360/* XXX: tb_flush is currently not thread safe */
6a00d601 361void tb_flush(CPUState *env1)
fd6ce8f6 362{
6a00d601 363 CPUState *env;
0124311e 364#if defined(DEBUG_FLUSH)
ab3d1727
BS
365 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
366 (unsigned long)(code_gen_ptr - code_gen_buffer),
367 nb_tbs, nb_tbs > 0 ?
368 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 369#endif
a208e54a
PB
370 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
371 cpu_abort(env1, "Internal error: code buffer overflow\n");
372
fd6ce8f6 373 nb_tbs = 0;
3b46e624 374
6a00d601
FB
375 for(env = first_cpu; env != NULL; env = env->next_cpu) {
376 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
377 }
9fa3e853 378
8a8a608f 379 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 380 page_flush_tb();
9fa3e853 381
fd6ce8f6 382 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
383 /* XXX: flush processor icache at this point if cache flush is
384 expensive */
e3db7226 385 tb_flush_count++;
fd6ce8f6
FB
386}
387
388#ifdef DEBUG_TB_CHECK
389
bc98a7ef 390static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
391{
392 TranslationBlock *tb;
393 int i;
394 address &= TARGET_PAGE_MASK;
99773bd4
PB
395 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
396 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
397 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
398 address >= tb->pc + tb->size)) {
399 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 400 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
401 }
402 }
403 }
404}
405
406/* verify that all the pages have correct rights for code */
407static void tb_page_check(void)
408{
409 TranslationBlock *tb;
410 int i, flags1, flags2;
3b46e624 411
99773bd4
PB
412 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
413 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
414 flags1 = page_get_flags(tb->pc);
415 flags2 = page_get_flags(tb->pc + tb->size - 1);
416 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
417 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 418 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
419 }
420 }
421 }
422}
423
d4e8164f
FB
424void tb_jmp_check(TranslationBlock *tb)
425{
426 TranslationBlock *tb1;
427 unsigned int n1;
428
429 /* suppress any remaining jumps to this TB */
430 tb1 = tb->jmp_first;
431 for(;;) {
432 n1 = (long)tb1 & 3;
433 tb1 = (TranslationBlock *)((long)tb1 & ~3);
434 if (n1 == 2)
435 break;
436 tb1 = tb1->jmp_next[n1];
437 }
438 /* check end of list */
439 if (tb1 != tb) {
440 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
441 }
442}
443
fd6ce8f6
FB
444#endif
445
446/* invalidate one TB */
447static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
448 int next_offset)
449{
450 TranslationBlock *tb1;
451 for(;;) {
452 tb1 = *ptb;
453 if (tb1 == tb) {
454 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
455 break;
456 }
457 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
458 }
459}
460
9fa3e853
FB
461static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
462{
463 TranslationBlock *tb1;
464 unsigned int n1;
465
466 for(;;) {
467 tb1 = *ptb;
468 n1 = (long)tb1 & 3;
469 tb1 = (TranslationBlock *)((long)tb1 & ~3);
470 if (tb1 == tb) {
471 *ptb = tb1->page_next[n1];
472 break;
473 }
474 ptb = &tb1->page_next[n1];
475 }
476}
477
d4e8164f
FB
478static inline void tb_jmp_remove(TranslationBlock *tb, int n)
479{
480 TranslationBlock *tb1, **ptb;
481 unsigned int n1;
482
483 ptb = &tb->jmp_next[n];
484 tb1 = *ptb;
485 if (tb1) {
486 /* find tb(n) in circular list */
487 for(;;) {
488 tb1 = *ptb;
489 n1 = (long)tb1 & 3;
490 tb1 = (TranslationBlock *)((long)tb1 & ~3);
491 if (n1 == n && tb1 == tb)
492 break;
493 if (n1 == 2) {
494 ptb = &tb1->jmp_first;
495 } else {
496 ptb = &tb1->jmp_next[n1];
497 }
498 }
499 /* now we can suppress tb(n) from the list */
500 *ptb = tb->jmp_next[n];
501
502 tb->jmp_next[n] = NULL;
503 }
504}
505
506/* reset the jump entry 'n' of a TB so that it is not chained to
507 another TB */
508static inline void tb_reset_jump(TranslationBlock *tb, int n)
509{
510 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
511}
512
8a40a180 513static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 514{
6a00d601 515 CPUState *env;
8a40a180 516 PageDesc *p;
d4e8164f 517 unsigned int h, n1;
8a40a180
FB
518 target_ulong phys_pc;
519 TranslationBlock *tb1, *tb2;
3b46e624 520
8a40a180
FB
521 /* remove the TB from the hash list */
522 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
523 h = tb_phys_hash_func(phys_pc);
5fafdf24 524 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
525 offsetof(TranslationBlock, phys_hash_next));
526
527 /* remove the TB from the page list */
528 if (tb->page_addr[0] != page_addr) {
529 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
530 tb_page_remove(&p->first_tb, tb);
531 invalidate_page_bitmap(p);
532 }
533 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
534 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
535 tb_page_remove(&p->first_tb, tb);
536 invalidate_page_bitmap(p);
537 }
538
36bdbe54 539 tb_invalidated_flag = 1;
59817ccb 540
fd6ce8f6 541 /* remove the TB from the hash list */
8a40a180 542 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
543 for(env = first_cpu; env != NULL; env = env->next_cpu) {
544 if (env->tb_jmp_cache[h] == tb)
545 env->tb_jmp_cache[h] = NULL;
546 }
d4e8164f
FB
547
548 /* suppress this TB from the two jump lists */
549 tb_jmp_remove(tb, 0);
550 tb_jmp_remove(tb, 1);
551
552 /* suppress any remaining jumps to this TB */
553 tb1 = tb->jmp_first;
554 for(;;) {
555 n1 = (long)tb1 & 3;
556 if (n1 == 2)
557 break;
558 tb1 = (TranslationBlock *)((long)tb1 & ~3);
559 tb2 = tb1->jmp_next[n1];
560 tb_reset_jump(tb1, n1);
561 tb1->jmp_next[n1] = NULL;
562 tb1 = tb2;
563 }
564 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 565
e3db7226 566 tb_phys_invalidate_count++;
9fa3e853
FB
567}
568
569static inline void set_bits(uint8_t *tab, int start, int len)
570{
571 int end, mask, end1;
572
573 end = start + len;
574 tab += start >> 3;
575 mask = 0xff << (start & 7);
576 if ((start & ~7) == (end & ~7)) {
577 if (start < end) {
578 mask &= ~(0xff << (end & 7));
579 *tab |= mask;
580 }
581 } else {
582 *tab++ |= mask;
583 start = (start + 8) & ~7;
584 end1 = end & ~7;
585 while (start < end1) {
586 *tab++ = 0xff;
587 start += 8;
588 }
589 if (start < end) {
590 mask = ~(0xff << (end & 7));
591 *tab |= mask;
592 }
593 }
594}
595
596static void build_page_bitmap(PageDesc *p)
597{
598 int n, tb_start, tb_end;
599 TranslationBlock *tb;
3b46e624 600
59817ccb 601 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
602 if (!p->code_bitmap)
603 return;
604 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
605
606 tb = p->first_tb;
607 while (tb != NULL) {
608 n = (long)tb & 3;
609 tb = (TranslationBlock *)((long)tb & ~3);
610 /* NOTE: this is subtle as a TB may span two physical pages */
611 if (n == 0) {
612 /* NOTE: tb_end may be after the end of the page, but
613 it is not a problem */
614 tb_start = tb->pc & ~TARGET_PAGE_MASK;
615 tb_end = tb_start + tb->size;
616 if (tb_end > TARGET_PAGE_SIZE)
617 tb_end = TARGET_PAGE_SIZE;
618 } else {
619 tb_start = 0;
620 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
621 }
622 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
623 tb = tb->page_next[n];
624 }
625}
626
d720b93d
FB
627#ifdef TARGET_HAS_PRECISE_SMC
628
5fafdf24 629static void tb_gen_code(CPUState *env,
d720b93d
FB
630 target_ulong pc, target_ulong cs_base, int flags,
631 int cflags)
632{
633 TranslationBlock *tb;
634 uint8_t *tc_ptr;
635 target_ulong phys_pc, phys_page2, virt_page2;
636 int code_gen_size;
637
c27004ec
FB
638 phys_pc = get_phys_addr_code(env, pc);
639 tb = tb_alloc(pc);
d720b93d
FB
640 if (!tb) {
641 /* flush must be done */
642 tb_flush(env);
643 /* cannot fail at this point */
c27004ec 644 tb = tb_alloc(pc);
d720b93d
FB
645 }
646 tc_ptr = code_gen_ptr;
647 tb->tc_ptr = tc_ptr;
648 tb->cs_base = cs_base;
649 tb->flags = flags;
650 tb->cflags = cflags;
d07bde88 651 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 652 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 653
d720b93d 654 /* check next page if needed */
c27004ec 655 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 656 phys_page2 = -1;
c27004ec 657 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
658 phys_page2 = get_phys_addr_code(env, virt_page2);
659 }
660 tb_link_phys(tb, phys_pc, phys_page2);
661}
662#endif
3b46e624 663
9fa3e853
FB
664/* invalidate all TBs which intersect with the target physical page
665 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
666 the same physical page. 'is_cpu_write_access' should be true if called
667 from a real cpu write access: the virtual CPU will exit the current
668 TB if code is modified inside this TB. */
5fafdf24 669void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
d720b93d
FB
670 int is_cpu_write_access)
671{
672 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 673 CPUState *env = cpu_single_env;
9fa3e853 674 PageDesc *p;
ea1c1802 675 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 676 target_ulong tb_start, tb_end;
d720b93d 677 target_ulong current_pc, current_cs_base;
9fa3e853
FB
678
679 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 680 if (!p)
9fa3e853 681 return;
5fafdf24 682 if (!p->code_bitmap &&
d720b93d
FB
683 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
684 is_cpu_write_access) {
9fa3e853
FB
685 /* build code bitmap */
686 build_page_bitmap(p);
687 }
688
689 /* we remove all the TBs in the range [start, end[ */
690 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
691 current_tb_not_found = is_cpu_write_access;
692 current_tb_modified = 0;
693 current_tb = NULL; /* avoid warning */
694 current_pc = 0; /* avoid warning */
695 current_cs_base = 0; /* avoid warning */
696 current_flags = 0; /* avoid warning */
9fa3e853
FB
697 tb = p->first_tb;
698 while (tb != NULL) {
699 n = (long)tb & 3;
700 tb = (TranslationBlock *)((long)tb & ~3);
701 tb_next = tb->page_next[n];
702 /* NOTE: this is subtle as a TB may span two physical pages */
703 if (n == 0) {
704 /* NOTE: tb_end may be after the end of the page, but
705 it is not a problem */
706 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
707 tb_end = tb_start + tb->size;
708 } else {
709 tb_start = tb->page_addr[1];
710 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
711 }
712 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
713#ifdef TARGET_HAS_PRECISE_SMC
714 if (current_tb_not_found) {
715 current_tb_not_found = 0;
716 current_tb = NULL;
717 if (env->mem_write_pc) {
718 /* now we have a real cpu fault */
719 current_tb = tb_find_pc(env->mem_write_pc);
720 }
721 }
722 if (current_tb == tb &&
723 !(current_tb->cflags & CF_SINGLE_INSN)) {
724 /* If we are modifying the current TB, we must stop
725 its execution. We could be more precise by checking
726 that the modification is after the current PC, but it
727 would require a specialized function to partially
728 restore the CPU state */
3b46e624 729
d720b93d 730 current_tb_modified = 1;
5fafdf24 731 cpu_restore_state(current_tb, env,
d720b93d
FB
732 env->mem_write_pc, NULL);
733#if defined(TARGET_I386)
734 current_flags = env->hflags;
735 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
736 current_cs_base = (target_ulong)env->segs[R_CS].base;
737 current_pc = current_cs_base + env->eip;
738#else
739#error unsupported CPU
740#endif
741 }
742#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
743 /* we need to do that to handle the case where a signal
744 occurs while doing tb_phys_invalidate() */
745 saved_tb = NULL;
746 if (env) {
747 saved_tb = env->current_tb;
748 env->current_tb = NULL;
749 }
9fa3e853 750 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
751 if (env) {
752 env->current_tb = saved_tb;
753 if (env->interrupt_request && env->current_tb)
754 cpu_interrupt(env, env->interrupt_request);
755 }
9fa3e853
FB
756 }
757 tb = tb_next;
758 }
759#if !defined(CONFIG_USER_ONLY)
760 /* if no code remaining, no need to continue to use slow writes */
761 if (!p->first_tb) {
762 invalidate_page_bitmap(p);
d720b93d
FB
763 if (is_cpu_write_access) {
764 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
765 }
766 }
767#endif
768#ifdef TARGET_HAS_PRECISE_SMC
769 if (current_tb_modified) {
770 /* we generate a block containing just the instruction
771 modifying the memory. It will ensure that it cannot modify
772 itself */
ea1c1802 773 env->current_tb = NULL;
5fafdf24 774 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
775 CF_SINGLE_INSN);
776 cpu_resume_from_signal(env, NULL);
9fa3e853 777 }
fd6ce8f6 778#endif
9fa3e853 779}
fd6ce8f6 780
9fa3e853 781/* len must be <= 8 and start must be a multiple of len */
d720b93d 782static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
783{
784 PageDesc *p;
785 int offset, b;
59817ccb 786#if 0
a4193c8a
FB
787 if (1) {
788 if (loglevel) {
5fafdf24
TS
789 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
790 cpu_single_env->mem_write_vaddr, len,
791 cpu_single_env->eip,
a4193c8a
FB
792 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
793 }
59817ccb
FB
794 }
795#endif
9fa3e853 796 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 797 if (!p)
9fa3e853
FB
798 return;
799 if (p->code_bitmap) {
800 offset = start & ~TARGET_PAGE_MASK;
801 b = p->code_bitmap[offset >> 3] >> (offset & 7);
802 if (b & ((1 << len) - 1))
803 goto do_invalidate;
804 } else {
805 do_invalidate:
d720b93d 806 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
807 }
808}
809
9fa3e853 810#if !defined(CONFIG_SOFTMMU)
5fafdf24 811static void tb_invalidate_phys_page(target_ulong addr,
d720b93d 812 unsigned long pc, void *puc)
9fa3e853 813{
d720b93d
FB
814 int n, current_flags, current_tb_modified;
815 target_ulong current_pc, current_cs_base;
9fa3e853 816 PageDesc *p;
d720b93d
FB
817 TranslationBlock *tb, *current_tb;
818#ifdef TARGET_HAS_PRECISE_SMC
819 CPUState *env = cpu_single_env;
820#endif
9fa3e853
FB
821
822 addr &= TARGET_PAGE_MASK;
823 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 824 if (!p)
9fa3e853
FB
825 return;
826 tb = p->first_tb;
d720b93d
FB
827 current_tb_modified = 0;
828 current_tb = NULL;
829 current_pc = 0; /* avoid warning */
830 current_cs_base = 0; /* avoid warning */
831 current_flags = 0; /* avoid warning */
832#ifdef TARGET_HAS_PRECISE_SMC
833 if (tb && pc != 0) {
834 current_tb = tb_find_pc(pc);
835 }
836#endif
9fa3e853
FB
837 while (tb != NULL) {
838 n = (long)tb & 3;
839 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
840#ifdef TARGET_HAS_PRECISE_SMC
841 if (current_tb == tb &&
842 !(current_tb->cflags & CF_SINGLE_INSN)) {
843 /* If we are modifying the current TB, we must stop
844 its execution. We could be more precise by checking
845 that the modification is after the current PC, but it
846 would require a specialized function to partially
847 restore the CPU state */
3b46e624 848
d720b93d
FB
849 current_tb_modified = 1;
850 cpu_restore_state(current_tb, env, pc, puc);
851#if defined(TARGET_I386)
852 current_flags = env->hflags;
853 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
854 current_cs_base = (target_ulong)env->segs[R_CS].base;
855 current_pc = current_cs_base + env->eip;
856#else
857#error unsupported CPU
858#endif
859 }
860#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
861 tb_phys_invalidate(tb, addr);
862 tb = tb->page_next[n];
863 }
fd6ce8f6 864 p->first_tb = NULL;
d720b93d
FB
865#ifdef TARGET_HAS_PRECISE_SMC
866 if (current_tb_modified) {
867 /* we generate a block containing just the instruction
868 modifying the memory. It will ensure that it cannot modify
869 itself */
ea1c1802 870 env->current_tb = NULL;
5fafdf24 871 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
872 CF_SINGLE_INSN);
873 cpu_resume_from_signal(env, puc);
874 }
875#endif
fd6ce8f6 876}
9fa3e853 877#endif
fd6ce8f6
FB
878
879/* add the tb in the target page and protect it if necessary */
5fafdf24 880static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 881 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
882{
883 PageDesc *p;
9fa3e853
FB
884 TranslationBlock *last_first_tb;
885
886 tb->page_addr[n] = page_addr;
3a7d929e 887 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
888 tb->page_next[n] = p->first_tb;
889 last_first_tb = p->first_tb;
890 p->first_tb = (TranslationBlock *)((long)tb | n);
891 invalidate_page_bitmap(p);
fd6ce8f6 892
107db443 893#if defined(TARGET_HAS_SMC) || 1
d720b93d 894
9fa3e853 895#if defined(CONFIG_USER_ONLY)
fd6ce8f6 896 if (p->flags & PAGE_WRITE) {
53a5960a
PB
897 target_ulong addr;
898 PageDesc *p2;
9fa3e853
FB
899 int prot;
900
fd6ce8f6
FB
901 /* force the host page as non writable (writes will have a
902 page fault + mprotect overhead) */
53a5960a 903 page_addr &= qemu_host_page_mask;
fd6ce8f6 904 prot = 0;
53a5960a
PB
905 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
906 addr += TARGET_PAGE_SIZE) {
907
908 p2 = page_find (addr >> TARGET_PAGE_BITS);
909 if (!p2)
910 continue;
911 prot |= p2->flags;
912 p2->flags &= ~PAGE_WRITE;
913 page_get_flags(addr);
914 }
5fafdf24 915 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
916 (prot & PAGE_BITS) & ~PAGE_WRITE);
917#ifdef DEBUG_TB_INVALIDATE
ab3d1727 918 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 919 page_addr);
fd6ce8f6 920#endif
fd6ce8f6 921 }
9fa3e853
FB
922#else
923 /* if some code is already present, then the pages are already
924 protected. So we handle the case where only the first TB is
925 allocated in a physical page */
926 if (!last_first_tb) {
6a00d601 927 tlb_protect_code(page_addr);
9fa3e853
FB
928 }
929#endif
d720b93d
FB
930
931#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
932}
933
934/* Allocate a new translation block. Flush the translation buffer if
935 too many translation blocks or too much generated code. */
c27004ec 936TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
937{
938 TranslationBlock *tb;
fd6ce8f6 939
5fafdf24 940 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
fd6ce8f6 941 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 942 return NULL;
fd6ce8f6
FB
943 tb = &tbs[nb_tbs++];
944 tb->pc = pc;
b448f2f3 945 tb->cflags = 0;
d4e8164f
FB
946 return tb;
947}
948
9fa3e853
FB
949/* add a new TB and link it to the physical page tables. phys_page2 is
950 (-1) to indicate that only one page contains the TB. */
5fafdf24 951void tb_link_phys(TranslationBlock *tb,
9fa3e853 952 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 953{
9fa3e853
FB
954 unsigned int h;
955 TranslationBlock **ptb;
956
957 /* add in the physical hash table */
958 h = tb_phys_hash_func(phys_pc);
959 ptb = &tb_phys_hash[h];
960 tb->phys_hash_next = *ptb;
961 *ptb = tb;
fd6ce8f6
FB
962
963 /* add in the page list */
9fa3e853
FB
964 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
965 if (phys_page2 != -1)
966 tb_alloc_page(tb, 1, phys_page2);
967 else
968 tb->page_addr[1] = -1;
9fa3e853 969
d4e8164f
FB
970 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
971 tb->jmp_next[0] = NULL;
972 tb->jmp_next[1] = NULL;
973
974 /* init original jump addresses */
975 if (tb->tb_next_offset[0] != 0xffff)
976 tb_reset_jump(tb, 0);
977 if (tb->tb_next_offset[1] != 0xffff)
978 tb_reset_jump(tb, 1);
8a40a180
FB
979
980#ifdef DEBUG_TB_CHECK
981 tb_page_check();
982#endif
fd6ce8f6
FB
983}
984
9fa3e853
FB
985/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
986 tb[1].tc_ptr. Return NULL if not found */
987TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 988{
9fa3e853
FB
989 int m_min, m_max, m;
990 unsigned long v;
991 TranslationBlock *tb;
a513fe19
FB
992
993 if (nb_tbs <= 0)
994 return NULL;
995 if (tc_ptr < (unsigned long)code_gen_buffer ||
996 tc_ptr >= (unsigned long)code_gen_ptr)
997 return NULL;
998 /* binary search (cf Knuth) */
999 m_min = 0;
1000 m_max = nb_tbs - 1;
1001 while (m_min <= m_max) {
1002 m = (m_min + m_max) >> 1;
1003 tb = &tbs[m];
1004 v = (unsigned long)tb->tc_ptr;
1005 if (v == tc_ptr)
1006 return tb;
1007 else if (tc_ptr < v) {
1008 m_max = m - 1;
1009 } else {
1010 m_min = m + 1;
1011 }
5fafdf24 1012 }
a513fe19
FB
1013 return &tbs[m_max];
1014}
7501267e 1015
ea041c0e
FB
1016static void tb_reset_jump_recursive(TranslationBlock *tb);
1017
1018static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1019{
1020 TranslationBlock *tb1, *tb_next, **ptb;
1021 unsigned int n1;
1022
1023 tb1 = tb->jmp_next[n];
1024 if (tb1 != NULL) {
1025 /* find head of list */
1026 for(;;) {
1027 n1 = (long)tb1 & 3;
1028 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1029 if (n1 == 2)
1030 break;
1031 tb1 = tb1->jmp_next[n1];
1032 }
1033 /* we are now sure now that tb jumps to tb1 */
1034 tb_next = tb1;
1035
1036 /* remove tb from the jmp_first list */
1037 ptb = &tb_next->jmp_first;
1038 for(;;) {
1039 tb1 = *ptb;
1040 n1 = (long)tb1 & 3;
1041 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1042 if (n1 == n && tb1 == tb)
1043 break;
1044 ptb = &tb1->jmp_next[n1];
1045 }
1046 *ptb = tb->jmp_next[n];
1047 tb->jmp_next[n] = NULL;
3b46e624 1048
ea041c0e
FB
1049 /* suppress the jump to next tb in generated code */
1050 tb_reset_jump(tb, n);
1051
0124311e 1052 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1053 tb_reset_jump_recursive(tb_next);
1054 }
1055}
1056
1057static void tb_reset_jump_recursive(TranslationBlock *tb)
1058{
1059 tb_reset_jump_recursive2(tb, 0);
1060 tb_reset_jump_recursive2(tb, 1);
1061}
1062
1fddef4b 1063#if defined(TARGET_HAS_ICE)
d720b93d
FB
1064static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1065{
9b3c35e0
JM
1066 target_phys_addr_t addr;
1067 target_ulong pd;
c2f07f81
PB
1068 ram_addr_t ram_addr;
1069 PhysPageDesc *p;
d720b93d 1070
c2f07f81
PB
1071 addr = cpu_get_phys_page_debug(env, pc);
1072 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1073 if (!p) {
1074 pd = IO_MEM_UNASSIGNED;
1075 } else {
1076 pd = p->phys_offset;
1077 }
1078 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1079 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1080}
c27004ec 1081#endif
d720b93d 1082
6658ffb8
PB
1083/* Add a watchpoint. */
1084int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1085{
1086 int i;
1087
1088 for (i = 0; i < env->nb_watchpoints; i++) {
1089 if (addr == env->watchpoint[i].vaddr)
1090 return 0;
1091 }
1092 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1093 return -1;
1094
1095 i = env->nb_watchpoints++;
1096 env->watchpoint[i].vaddr = addr;
1097 tlb_flush_page(env, addr);
1098 /* FIXME: This flush is needed because of the hack to make memory ops
1099 terminate the TB. It can be removed once the proper IO trap and
1100 re-execute bits are in. */
1101 tb_flush(env);
1102 return i;
1103}
1104
1105/* Remove a watchpoint. */
1106int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1107{
1108 int i;
1109
1110 for (i = 0; i < env->nb_watchpoints; i++) {
1111 if (addr == env->watchpoint[i].vaddr) {
1112 env->nb_watchpoints--;
1113 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1114 tlb_flush_page(env, addr);
1115 return 0;
1116 }
1117 }
1118 return -1;
1119}
1120
c33a346e
FB
1121/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1122 breakpoint is reached */
2e12669a 1123int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1124{
1fddef4b 1125#if defined(TARGET_HAS_ICE)
4c3a88a2 1126 int i;
3b46e624 1127
4c3a88a2
FB
1128 for(i = 0; i < env->nb_breakpoints; i++) {
1129 if (env->breakpoints[i] == pc)
1130 return 0;
1131 }
1132
1133 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1134 return -1;
1135 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1136
d720b93d 1137 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1138 return 0;
1139#else
1140 return -1;
1141#endif
1142}
1143
1144/* remove a breakpoint */
2e12669a 1145int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1146{
1fddef4b 1147#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1148 int i;
1149 for(i = 0; i < env->nb_breakpoints; i++) {
1150 if (env->breakpoints[i] == pc)
1151 goto found;
1152 }
1153 return -1;
1154 found:
4c3a88a2 1155 env->nb_breakpoints--;
1fddef4b
FB
1156 if (i < env->nb_breakpoints)
1157 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1158
1159 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1160 return 0;
1161#else
1162 return -1;
1163#endif
1164}
1165
c33a346e
FB
1166/* enable or disable single step mode. EXCP_DEBUG is returned by the
1167 CPU loop after each instruction */
1168void cpu_single_step(CPUState *env, int enabled)
1169{
1fddef4b 1170#if defined(TARGET_HAS_ICE)
c33a346e
FB
1171 if (env->singlestep_enabled != enabled) {
1172 env->singlestep_enabled = enabled;
1173 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1174 /* XXX: only flush what is necessary */
0124311e 1175 tb_flush(env);
c33a346e
FB
1176 }
1177#endif
1178}
1179
34865134
FB
1180/* enable or disable low levels log */
1181void cpu_set_log(int log_flags)
1182{
1183 loglevel = log_flags;
1184 if (loglevel && !logfile) {
11fcfab4 1185 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1186 if (!logfile) {
1187 perror(logfilename);
1188 _exit(1);
1189 }
9fa3e853
FB
1190#if !defined(CONFIG_SOFTMMU)
1191 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1192 {
1193 static uint8_t logfile_buf[4096];
1194 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1195 }
1196#else
34865134 1197 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1198#endif
e735b91c
PB
1199 log_append = 1;
1200 }
1201 if (!loglevel && logfile) {
1202 fclose(logfile);
1203 logfile = NULL;
34865134
FB
1204 }
1205}
1206
1207void cpu_set_log_filename(const char *filename)
1208{
1209 logfilename = strdup(filename);
e735b91c
PB
1210 if (logfile) {
1211 fclose(logfile);
1212 logfile = NULL;
1213 }
1214 cpu_set_log(loglevel);
34865134 1215}
c33a346e 1216
0124311e 1217/* mask must never be zero, except for A20 change call */
68a79315 1218void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1219{
1220 TranslationBlock *tb;
15a51156 1221 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1222
68a79315 1223 env->interrupt_request |= mask;
ea041c0e
FB
1224 /* if the cpu is currently executing code, we must unlink it and
1225 all the potentially executing TB */
1226 tb = env->current_tb;
ee8b7021
FB
1227 if (tb && !testandset(&interrupt_lock)) {
1228 env->current_tb = NULL;
ea041c0e 1229 tb_reset_jump_recursive(tb);
15a51156 1230 resetlock(&interrupt_lock);
ea041c0e
FB
1231 }
1232}
1233
b54ad049
FB
1234void cpu_reset_interrupt(CPUState *env, int mask)
1235{
1236 env->interrupt_request &= ~mask;
1237}
1238
f193c797 1239CPULogItem cpu_log_items[] = {
5fafdf24 1240 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1241 "show generated host assembly code for each compiled TB" },
1242 { CPU_LOG_TB_IN_ASM, "in_asm",
1243 "show target assembly code for each compiled TB" },
5fafdf24 1244 { CPU_LOG_TB_OP, "op",
57fec1fe 1245 "show micro ops for each compiled TB" },
f193c797 1246 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1247 "show micro ops "
1248#ifdef TARGET_I386
1249 "before eflags optimization and "
f193c797 1250#endif
e01a1157 1251 "after liveness analysis" },
f193c797
FB
1252 { CPU_LOG_INT, "int",
1253 "show interrupts/exceptions in short format" },
1254 { CPU_LOG_EXEC, "exec",
1255 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1256 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1257 "show CPU state before block translation" },
f193c797
FB
1258#ifdef TARGET_I386
1259 { CPU_LOG_PCALL, "pcall",
1260 "show protected mode far calls/returns/exceptions" },
1261#endif
8e3a9fd2 1262#ifdef DEBUG_IOPORT
fd872598
FB
1263 { CPU_LOG_IOPORT, "ioport",
1264 "show all i/o ports accesses" },
8e3a9fd2 1265#endif
f193c797
FB
1266 { 0, NULL, NULL },
1267};
1268
1269static int cmp1(const char *s1, int n, const char *s2)
1270{
1271 if (strlen(s2) != n)
1272 return 0;
1273 return memcmp(s1, s2, n) == 0;
1274}
3b46e624 1275
f193c797
FB
1276/* takes a comma separated list of log masks. Return 0 if error. */
1277int cpu_str_to_log_mask(const char *str)
1278{
1279 CPULogItem *item;
1280 int mask;
1281 const char *p, *p1;
1282
1283 p = str;
1284 mask = 0;
1285 for(;;) {
1286 p1 = strchr(p, ',');
1287 if (!p1)
1288 p1 = p + strlen(p);
8e3a9fd2
FB
1289 if(cmp1(p,p1-p,"all")) {
1290 for(item = cpu_log_items; item->mask != 0; item++) {
1291 mask |= item->mask;
1292 }
1293 } else {
f193c797
FB
1294 for(item = cpu_log_items; item->mask != 0; item++) {
1295 if (cmp1(p, p1 - p, item->name))
1296 goto found;
1297 }
1298 return 0;
8e3a9fd2 1299 }
f193c797
FB
1300 found:
1301 mask |= item->mask;
1302 if (*p1 != ',')
1303 break;
1304 p = p1 + 1;
1305 }
1306 return mask;
1307}
ea041c0e 1308
7501267e
FB
1309void cpu_abort(CPUState *env, const char *fmt, ...)
1310{
1311 va_list ap;
493ae1f0 1312 va_list ap2;
7501267e
FB
1313
1314 va_start(ap, fmt);
493ae1f0 1315 va_copy(ap2, ap);
7501267e
FB
1316 fprintf(stderr, "qemu: fatal: ");
1317 vfprintf(stderr, fmt, ap);
1318 fprintf(stderr, "\n");
1319#ifdef TARGET_I386
0573fbfc
TS
1320 if(env->intercept & INTERCEPT_SVM_MASK) {
1321 /* most probably the virtual machine should not
1322 be shut down but rather caught by the VMM */
1323 vmexit(SVM_EXIT_SHUTDOWN, 0);
1324 }
7fe48483
FB
1325 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1326#else
1327 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1328#endif
924edcae 1329 if (logfile) {
f9373291 1330 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1331 vfprintf(logfile, fmt, ap2);
f9373291
JM
1332 fprintf(logfile, "\n");
1333#ifdef TARGET_I386
1334 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1335#else
1336 cpu_dump_state(env, logfile, fprintf, 0);
1337#endif
924edcae
AZ
1338 fflush(logfile);
1339 fclose(logfile);
1340 }
493ae1f0 1341 va_end(ap2);
f9373291 1342 va_end(ap);
7501267e
FB
1343 abort();
1344}
1345
c5be9f08
TS
1346CPUState *cpu_copy(CPUState *env)
1347{
01ba9816 1348 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1349 /* preserve chaining and index */
1350 CPUState *next_cpu = new_env->next_cpu;
1351 int cpu_index = new_env->cpu_index;
1352 memcpy(new_env, env, sizeof(CPUState));
1353 new_env->next_cpu = next_cpu;
1354 new_env->cpu_index = cpu_index;
1355 return new_env;
1356}
1357
0124311e
FB
1358#if !defined(CONFIG_USER_ONLY)
1359
ee8b7021
FB
1360/* NOTE: if flush_global is true, also flush global entries (not
1361 implemented yet) */
1362void tlb_flush(CPUState *env, int flush_global)
33417e70 1363{
33417e70 1364 int i;
0124311e 1365
9fa3e853
FB
1366#if defined(DEBUG_TLB)
1367 printf("tlb_flush:\n");
1368#endif
0124311e
FB
1369 /* must reset current TB so that interrupts cannot modify the
1370 links while we are modifying them */
1371 env->current_tb = NULL;
1372
33417e70 1373 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1374 env->tlb_table[0][i].addr_read = -1;
1375 env->tlb_table[0][i].addr_write = -1;
1376 env->tlb_table[0][i].addr_code = -1;
1377 env->tlb_table[1][i].addr_read = -1;
1378 env->tlb_table[1][i].addr_write = -1;
1379 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1380#if (NB_MMU_MODES >= 3)
1381 env->tlb_table[2][i].addr_read = -1;
1382 env->tlb_table[2][i].addr_write = -1;
1383 env->tlb_table[2][i].addr_code = -1;
1384#if (NB_MMU_MODES == 4)
1385 env->tlb_table[3][i].addr_read = -1;
1386 env->tlb_table[3][i].addr_write = -1;
1387 env->tlb_table[3][i].addr_code = -1;
1388#endif
1389#endif
33417e70 1390 }
9fa3e853 1391
8a40a180 1392 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1393
1394#if !defined(CONFIG_SOFTMMU)
1395 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1396#endif
1397#ifdef USE_KQEMU
1398 if (env->kqemu_enabled) {
1399 kqemu_flush(env, flush_global);
1400 }
9fa3e853 1401#endif
e3db7226 1402 tlb_flush_count++;
33417e70
FB
1403}
1404
274da6b2 1405static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1406{
5fafdf24 1407 if (addr == (tlb_entry->addr_read &
84b7b8e7 1408 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1409 addr == (tlb_entry->addr_write &
84b7b8e7 1410 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1411 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1412 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1413 tlb_entry->addr_read = -1;
1414 tlb_entry->addr_write = -1;
1415 tlb_entry->addr_code = -1;
1416 }
61382a50
FB
1417}
1418
2e12669a 1419void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1420{
8a40a180 1421 int i;
9fa3e853 1422 TranslationBlock *tb;
0124311e 1423
9fa3e853 1424#if defined(DEBUG_TLB)
108c49b8 1425 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1426#endif
0124311e
FB
1427 /* must reset current TB so that interrupts cannot modify the
1428 links while we are modifying them */
1429 env->current_tb = NULL;
61382a50
FB
1430
1431 addr &= TARGET_PAGE_MASK;
1432 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1433 tlb_flush_entry(&env->tlb_table[0][i], addr);
1434 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1435#if (NB_MMU_MODES >= 3)
1436 tlb_flush_entry(&env->tlb_table[2][i], addr);
1437#if (NB_MMU_MODES == 4)
1438 tlb_flush_entry(&env->tlb_table[3][i], addr);
1439#endif
1440#endif
0124311e 1441
b362e5e0
PB
1442 /* Discard jump cache entries for any tb which might potentially
1443 overlap the flushed page. */
1444 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1445 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1446
1447 i = tb_jmp_cache_hash_page(addr);
1448 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1449
0124311e 1450#if !defined(CONFIG_SOFTMMU)
9fa3e853 1451 if (addr < MMAP_AREA_END)
0124311e 1452 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1453#endif
0a962c02
FB
1454#ifdef USE_KQEMU
1455 if (env->kqemu_enabled) {
1456 kqemu_flush_page(env, addr);
1457 }
1458#endif
9fa3e853
FB
1459}
1460
9fa3e853
FB
1461/* update the TLBs so that writes to code in the virtual page 'addr'
1462 can be detected */
6a00d601 1463static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1464{
5fafdf24 1465 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1466 ram_addr + TARGET_PAGE_SIZE,
1467 CODE_DIRTY_FLAG);
9fa3e853
FB
1468}
1469
9fa3e853 1470/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1471 tested for self modifying code */
5fafdf24 1472static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1473 target_ulong vaddr)
9fa3e853 1474{
3a7d929e 1475 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1476}
1477
5fafdf24 1478static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1479 unsigned long start, unsigned long length)
1480{
1481 unsigned long addr;
84b7b8e7
FB
1482 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1483 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1484 if ((addr - start) < length) {
84b7b8e7 1485 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1486 }
1487 }
1488}
1489
3a7d929e 1490void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1491 int dirty_flags)
1ccde1cb
FB
1492{
1493 CPUState *env;
4f2ac237 1494 unsigned long length, start1;
0a962c02
FB
1495 int i, mask, len;
1496 uint8_t *p;
1ccde1cb
FB
1497
1498 start &= TARGET_PAGE_MASK;
1499 end = TARGET_PAGE_ALIGN(end);
1500
1501 length = end - start;
1502 if (length == 0)
1503 return;
0a962c02 1504 len = length >> TARGET_PAGE_BITS;
3a7d929e 1505#ifdef USE_KQEMU
6a00d601
FB
1506 /* XXX: should not depend on cpu context */
1507 env = first_cpu;
3a7d929e 1508 if (env->kqemu_enabled) {
f23db169
FB
1509 ram_addr_t addr;
1510 addr = start;
1511 for(i = 0; i < len; i++) {
1512 kqemu_set_notdirty(env, addr);
1513 addr += TARGET_PAGE_SIZE;
1514 }
3a7d929e
FB
1515 }
1516#endif
f23db169
FB
1517 mask = ~dirty_flags;
1518 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1519 for(i = 0; i < len; i++)
1520 p[i] &= mask;
1521
1ccde1cb
FB
1522 /* we modify the TLB cache so that the dirty bit will be set again
1523 when accessing the range */
59817ccb 1524 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1525 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1526 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1527 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1528 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1529 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1530#if (NB_MMU_MODES >= 3)
1531 for(i = 0; i < CPU_TLB_SIZE; i++)
1532 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1533#if (NB_MMU_MODES == 4)
1534 for(i = 0; i < CPU_TLB_SIZE; i++)
1535 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1536#endif
1537#endif
6a00d601 1538 }
59817ccb
FB
1539
1540#if !defined(CONFIG_SOFTMMU)
1541 /* XXX: this is expensive */
1542 {
1543 VirtPageDesc *p;
1544 int j;
1545 target_ulong addr;
1546
1547 for(i = 0; i < L1_SIZE; i++) {
1548 p = l1_virt_map[i];
1549 if (p) {
1550 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1551 for(j = 0; j < L2_SIZE; j++) {
1552 if (p->valid_tag == virt_valid_tag &&
1553 p->phys_addr >= start && p->phys_addr < end &&
1554 (p->prot & PROT_WRITE)) {
1555 if (addr < MMAP_AREA_END) {
5fafdf24 1556 mprotect((void *)addr, TARGET_PAGE_SIZE,
59817ccb
FB
1557 p->prot & ~PROT_WRITE);
1558 }
1559 }
1560 addr += TARGET_PAGE_SIZE;
1561 p++;
1562 }
1563 }
1564 }
1565 }
1566#endif
1ccde1cb
FB
1567}
1568
3a7d929e
FB
1569static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1570{
1571 ram_addr_t ram_addr;
1572
84b7b8e7 1573 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1574 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1575 tlb_entry->addend - (unsigned long)phys_ram_base;
1576 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1577 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1578 }
1579 }
1580}
1581
1582/* update the TLB according to the current state of the dirty bits */
1583void cpu_tlb_update_dirty(CPUState *env)
1584{
1585 int i;
1586 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1587 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1588 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1589 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1590#if (NB_MMU_MODES >= 3)
1591 for(i = 0; i < CPU_TLB_SIZE; i++)
1592 tlb_update_dirty(&env->tlb_table[2][i]);
1593#if (NB_MMU_MODES == 4)
1594 for(i = 0; i < CPU_TLB_SIZE; i++)
1595 tlb_update_dirty(&env->tlb_table[3][i]);
1596#endif
1597#endif
3a7d929e
FB
1598}
1599
5fafdf24 1600static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1601 unsigned long start)
1ccde1cb
FB
1602{
1603 unsigned long addr;
84b7b8e7
FB
1604 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1605 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1606 if (addr == start) {
84b7b8e7 1607 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1608 }
1609 }
1610}
1611
1612/* update the TLB corresponding to virtual page vaddr and phys addr
1613 addr so that it is no longer dirty */
6a00d601
FB
1614static inline void tlb_set_dirty(CPUState *env,
1615 unsigned long addr, target_ulong vaddr)
1ccde1cb 1616{
1ccde1cb
FB
1617 int i;
1618
1ccde1cb
FB
1619 addr &= TARGET_PAGE_MASK;
1620 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1621 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1622 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1623#if (NB_MMU_MODES >= 3)
1624 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1625#if (NB_MMU_MODES == 4)
1626 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1627#endif
1628#endif
9fa3e853
FB
1629}
1630
59817ccb
FB
1631/* add a new TLB entry. At most one entry for a given virtual address
1632 is permitted. Return 0 if OK or 2 if the page could not be mapped
1633 (can only happen in non SOFTMMU mode for I/O pages or pages
1634 conflicting with the host address space). */
5fafdf24
TS
1635int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1636 target_phys_addr_t paddr, int prot,
6ebbf390 1637 int mmu_idx, int is_softmmu)
9fa3e853 1638{
92e873b9 1639 PhysPageDesc *p;
4f2ac237 1640 unsigned long pd;
9fa3e853 1641 unsigned int index;
4f2ac237 1642 target_ulong address;
108c49b8 1643 target_phys_addr_t addend;
9fa3e853 1644 int ret;
84b7b8e7 1645 CPUTLBEntry *te;
6658ffb8 1646 int i;
9fa3e853 1647
92e873b9 1648 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1649 if (!p) {
1650 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1651 } else {
1652 pd = p->phys_offset;
9fa3e853
FB
1653 }
1654#if defined(DEBUG_TLB)
6ebbf390
JM
1655 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1656 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1657#endif
1658
1659 ret = 0;
1660#if !defined(CONFIG_SOFTMMU)
5fafdf24 1661 if (is_softmmu)
9fa3e853
FB
1662#endif
1663 {
2a4188a3 1664 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1665 /* IO memory case */
1666 address = vaddr | pd;
1667 addend = paddr;
1668 } else {
1669 /* standard memory */
1670 address = vaddr;
1671 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1672 }
6658ffb8
PB
1673
1674 /* Make accesses to pages with watchpoints go via the
1675 watchpoint trap routines. */
1676 for (i = 0; i < env->nb_watchpoints; i++) {
1677 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1678 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1679 env->watchpoint[i].addend = 0;
6658ffb8
PB
1680 address = vaddr | io_mem_watch;
1681 } else {
d79acba4
AZ
1682 env->watchpoint[i].addend = pd - paddr +
1683 (unsigned long) phys_ram_base;
6658ffb8
PB
1684 /* TODO: Figure out how to make read watchpoints coexist
1685 with code. */
1686 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1687 }
1688 }
1689 }
d79acba4 1690
90f18422 1691 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1692 addend -= vaddr;
6ebbf390 1693 te = &env->tlb_table[mmu_idx][index];
84b7b8e7 1694 te->addend = addend;
67b915a5 1695 if (prot & PAGE_READ) {
84b7b8e7
FB
1696 te->addr_read = address;
1697 } else {
1698 te->addr_read = -1;
1699 }
1700 if (prot & PAGE_EXEC) {
1701 te->addr_code = address;
9fa3e853 1702 } else {
84b7b8e7 1703 te->addr_code = -1;
9fa3e853 1704 }
67b915a5 1705 if (prot & PAGE_WRITE) {
5fafdf24 1706 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
856074ec
FB
1707 (pd & IO_MEM_ROMD)) {
1708 /* write access calls the I/O callback */
5fafdf24 1709 te->addr_write = vaddr |
856074ec 1710 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
5fafdf24 1711 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1712 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1713 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1714 } else {
84b7b8e7 1715 te->addr_write = address;
9fa3e853
FB
1716 }
1717 } else {
84b7b8e7 1718 te->addr_write = -1;
9fa3e853
FB
1719 }
1720 }
1721#if !defined(CONFIG_SOFTMMU)
1722 else {
1723 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1724 /* IO access: no mapping is done as it will be handled by the
1725 soft MMU */
1726 if (!(env->hflags & HF_SOFTMMU_MASK))
1727 ret = 2;
1728 } else {
1729 void *map_addr;
59817ccb
FB
1730
1731 if (vaddr >= MMAP_AREA_END) {
1732 ret = 2;
1733 } else {
1734 if (prot & PROT_WRITE) {
5fafdf24 1735 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1736#if defined(TARGET_HAS_SMC) || 1
59817ccb 1737 first_tb ||
d720b93d 1738#endif
5fafdf24 1739 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
59817ccb
FB
1740 !cpu_physical_memory_is_dirty(pd))) {
1741 /* ROM: we do as if code was inside */
1742 /* if code is present, we only map as read only and save the
1743 original mapping */
1744 VirtPageDesc *vp;
3b46e624 1745
90f18422 1746 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1747 vp->phys_addr = pd;
1748 vp->prot = prot;
1749 vp->valid_tag = virt_valid_tag;
1750 prot &= ~PAGE_WRITE;
1751 }
1752 }
5fafdf24 1753 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
59817ccb
FB
1754 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1755 if (map_addr == MAP_FAILED) {
1756 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1757 paddr, vaddr);
9fa3e853 1758 }
9fa3e853
FB
1759 }
1760 }
1761 }
1762#endif
1763 return ret;
1764}
1765
1766/* called from signal handler: invalidate the code and unprotect the
1767 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1768int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1769{
1770#if !defined(CONFIG_SOFTMMU)
1771 VirtPageDesc *vp;
1772
1773#if defined(DEBUG_TLB)
1774 printf("page_unprotect: addr=0x%08x\n", addr);
1775#endif
1776 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1777
1778 /* if it is not mapped, no need to worry here */
1779 if (addr >= MMAP_AREA_END)
1780 return 0;
9fa3e853
FB
1781 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1782 if (!vp)
1783 return 0;
1784 /* NOTE: in this case, validate_tag is _not_ tested as it
1785 validates only the code TLB */
1786 if (vp->valid_tag != virt_valid_tag)
1787 return 0;
1788 if (!(vp->prot & PAGE_WRITE))
1789 return 0;
1790#if defined(DEBUG_TLB)
5fafdf24 1791 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
9fa3e853
FB
1792 addr, vp->phys_addr, vp->prot);
1793#endif
59817ccb
FB
1794 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1795 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1796 (unsigned long)addr, vp->prot);
d720b93d 1797 /* set the dirty bit */
0a962c02 1798 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1799 /* flush the code inside */
1800 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1801 return 1;
1802#else
1803 return 0;
1804#endif
33417e70
FB
1805}
1806
0124311e
FB
1807#else
1808
ee8b7021 1809void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1810{
1811}
1812
2e12669a 1813void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1814{
1815}
1816
5fafdf24
TS
1817int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1818 target_phys_addr_t paddr, int prot,
6ebbf390 1819 int mmu_idx, int is_softmmu)
9fa3e853
FB
1820{
1821 return 0;
1822}
0124311e 1823
9fa3e853
FB
1824/* dump memory mappings */
1825void page_dump(FILE *f)
33417e70 1826{
9fa3e853
FB
1827 unsigned long start, end;
1828 int i, j, prot, prot1;
1829 PageDesc *p;
33417e70 1830
9fa3e853
FB
1831 fprintf(f, "%-8s %-8s %-8s %s\n",
1832 "start", "end", "size", "prot");
1833 start = -1;
1834 end = -1;
1835 prot = 0;
1836 for(i = 0; i <= L1_SIZE; i++) {
1837 if (i < L1_SIZE)
1838 p = l1_map[i];
1839 else
1840 p = NULL;
1841 for(j = 0;j < L2_SIZE; j++) {
1842 if (!p)
1843 prot1 = 0;
1844 else
1845 prot1 = p[j].flags;
1846 if (prot1 != prot) {
1847 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1848 if (start != -1) {
1849 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1850 start, end, end - start,
9fa3e853
FB
1851 prot & PAGE_READ ? 'r' : '-',
1852 prot & PAGE_WRITE ? 'w' : '-',
1853 prot & PAGE_EXEC ? 'x' : '-');
1854 }
1855 if (prot1 != 0)
1856 start = end;
1857 else
1858 start = -1;
1859 prot = prot1;
1860 }
1861 if (!p)
1862 break;
1863 }
33417e70 1864 }
33417e70
FB
1865}
1866
53a5960a 1867int page_get_flags(target_ulong address)
33417e70 1868{
9fa3e853
FB
1869 PageDesc *p;
1870
1871 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1872 if (!p)
9fa3e853
FB
1873 return 0;
1874 return p->flags;
1875}
1876
1877/* modify the flags of a page and invalidate the code if
1878 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1879 depending on PAGE_WRITE */
53a5960a 1880void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1881{
1882 PageDesc *p;
53a5960a 1883 target_ulong addr;
9fa3e853
FB
1884
1885 start = start & TARGET_PAGE_MASK;
1886 end = TARGET_PAGE_ALIGN(end);
1887 if (flags & PAGE_WRITE)
1888 flags |= PAGE_WRITE_ORG;
1889 spin_lock(&tb_lock);
1890 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1891 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1892 /* if the write protection is set, then we invalidate the code
1893 inside */
5fafdf24 1894 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
1895 (flags & PAGE_WRITE) &&
1896 p->first_tb) {
d720b93d 1897 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1898 }
1899 p->flags = flags;
1900 }
1901 spin_unlock(&tb_lock);
33417e70
FB
1902}
1903
3d97b40b
TS
1904int page_check_range(target_ulong start, target_ulong len, int flags)
1905{
1906 PageDesc *p;
1907 target_ulong end;
1908 target_ulong addr;
1909
1910 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1911 start = start & TARGET_PAGE_MASK;
1912
1913 if( end < start )
1914 /* we've wrapped around */
1915 return -1;
1916 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1917 p = page_find(addr >> TARGET_PAGE_BITS);
1918 if( !p )
1919 return -1;
1920 if( !(p->flags & PAGE_VALID) )
1921 return -1;
1922
dae3270c 1923 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 1924 return -1;
dae3270c
FB
1925 if (flags & PAGE_WRITE) {
1926 if (!(p->flags & PAGE_WRITE_ORG))
1927 return -1;
1928 /* unprotect the page if it was put read-only because it
1929 contains translated code */
1930 if (!(p->flags & PAGE_WRITE)) {
1931 if (!page_unprotect(addr, 0, NULL))
1932 return -1;
1933 }
1934 return 0;
1935 }
3d97b40b
TS
1936 }
1937 return 0;
1938}
1939
9fa3e853
FB
1940/* called from signal handler: invalidate the code and unprotect the
1941 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1942int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1943{
1944 unsigned int page_index, prot, pindex;
1945 PageDesc *p, *p1;
53a5960a 1946 target_ulong host_start, host_end, addr;
9fa3e853 1947
83fb7adf 1948 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1949 page_index = host_start >> TARGET_PAGE_BITS;
1950 p1 = page_find(page_index);
1951 if (!p1)
1952 return 0;
83fb7adf 1953 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1954 p = p1;
1955 prot = 0;
1956 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1957 prot |= p->flags;
1958 p++;
1959 }
1960 /* if the page was really writable, then we change its
1961 protection back to writable */
1962 if (prot & PAGE_WRITE_ORG) {
1963 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1964 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 1965 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1966 (prot & PAGE_BITS) | PAGE_WRITE);
1967 p1[pindex].flags |= PAGE_WRITE;
1968 /* and since the content will be modified, we must invalidate
1969 the corresponding translated code. */
d720b93d 1970 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1971#ifdef DEBUG_TB_CHECK
1972 tb_invalidate_check(address);
1973#endif
1974 return 1;
1975 }
1976 }
1977 return 0;
1978}
1979
6a00d601
FB
1980static inline void tlb_set_dirty(CPUState *env,
1981 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1982{
1983}
9fa3e853
FB
1984#endif /* defined(CONFIG_USER_ONLY) */
1985
db7b5426
BS
1986static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1987 int memory);
1988static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1989 int orig_memory);
1990#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1991 need_subpage) \
1992 do { \
1993 if (addr > start_addr) \
1994 start_addr2 = 0; \
1995 else { \
1996 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1997 if (start_addr2 > 0) \
1998 need_subpage = 1; \
1999 } \
2000 \
49e9fba2 2001 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2002 end_addr2 = TARGET_PAGE_SIZE - 1; \
2003 else { \
2004 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2005 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2006 need_subpage = 1; \
2007 } \
2008 } while (0)
2009
33417e70
FB
2010/* register physical memory. 'size' must be a multiple of the target
2011 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2012 io memory page */
5fafdf24 2013void cpu_register_physical_memory(target_phys_addr_t start_addr,
2e12669a
FB
2014 unsigned long size,
2015 unsigned long phys_offset)
33417e70 2016{
108c49b8 2017 target_phys_addr_t addr, end_addr;
92e873b9 2018 PhysPageDesc *p;
9d42037b 2019 CPUState *env;
db7b5426
BS
2020 unsigned long orig_size = size;
2021 void *subpage;
33417e70 2022
5fd386f6 2023 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2024 end_addr = start_addr + (target_phys_addr_t)size;
2025 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2026 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2027 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2028 unsigned long orig_memory = p->phys_offset;
2029 target_phys_addr_t start_addr2, end_addr2;
2030 int need_subpage = 0;
2031
2032 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2033 need_subpage);
4254fab8 2034 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2035 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2036 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2037 &p->phys_offset, orig_memory);
2038 } else {
2039 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2040 >> IO_MEM_SHIFT];
2041 }
2042 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2043 } else {
2044 p->phys_offset = phys_offset;
2045 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2046 (phys_offset & IO_MEM_ROMD))
2047 phys_offset += TARGET_PAGE_SIZE;
2048 }
2049 } else {
2050 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2051 p->phys_offset = phys_offset;
2052 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2053 (phys_offset & IO_MEM_ROMD))
2054 phys_offset += TARGET_PAGE_SIZE;
2055 else {
2056 target_phys_addr_t start_addr2, end_addr2;
2057 int need_subpage = 0;
2058
2059 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2060 end_addr2, need_subpage);
2061
4254fab8 2062 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2063 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2064 &p->phys_offset, IO_MEM_UNASSIGNED);
2065 subpage_register(subpage, start_addr2, end_addr2,
2066 phys_offset);
2067 }
2068 }
2069 }
33417e70 2070 }
3b46e624 2071
9d42037b
FB
2072 /* since each CPU stores ram addresses in its TLB cache, we must
2073 reset the modified entries */
2074 /* XXX: slow ! */
2075 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2076 tlb_flush(env, 1);
2077 }
33417e70
FB
2078}
2079
ba863458
FB
2080/* XXX: temporary until new memory mapping API */
2081uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2082{
2083 PhysPageDesc *p;
2084
2085 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2086 if (!p)
2087 return IO_MEM_UNASSIGNED;
2088 return p->phys_offset;
2089}
2090
e9a1ab19
FB
2091/* XXX: better than nothing */
2092ram_addr_t qemu_ram_alloc(unsigned int size)
2093{
2094 ram_addr_t addr;
2095 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
5fafdf24 2096 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
e9a1ab19
FB
2097 size, phys_ram_size);
2098 abort();
2099 }
2100 addr = phys_ram_alloc_offset;
2101 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2102 return addr;
2103}
2104
2105void qemu_ram_free(ram_addr_t addr)
2106{
2107}
2108
a4193c8a 2109static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2110{
67d3b957 2111#ifdef DEBUG_UNASSIGNED
ab3d1727 2112 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2113#endif
2114#ifdef TARGET_SPARC
6c36d3fa 2115 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2116#elif TARGET_CRIS
2117 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2118#endif
33417e70
FB
2119 return 0;
2120}
2121
a4193c8a 2122static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2123{
67d3b957 2124#ifdef DEBUG_UNASSIGNED
ab3d1727 2125 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2126#endif
b4f0a316 2127#ifdef TARGET_SPARC
6c36d3fa 2128 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2129#elif TARGET_CRIS
2130 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2131#endif
33417e70
FB
2132}
2133
2134static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2135 unassigned_mem_readb,
2136 unassigned_mem_readb,
2137 unassigned_mem_readb,
2138};
2139
2140static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2141 unassigned_mem_writeb,
2142 unassigned_mem_writeb,
2143 unassigned_mem_writeb,
2144};
2145
3a7d929e 2146static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2147{
3a7d929e
FB
2148 unsigned long ram_addr;
2149 int dirty_flags;
2150 ram_addr = addr - (unsigned long)phys_ram_base;
2151 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2152 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2153#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2154 tb_invalidate_phys_page_fast(ram_addr, 1);
2155 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2156#endif
3a7d929e 2157 }
c27004ec 2158 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2159#ifdef USE_KQEMU
2160 if (cpu_single_env->kqemu_enabled &&
2161 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2162 kqemu_modify_page(cpu_single_env, ram_addr);
2163#endif
f23db169
FB
2164 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2165 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2166 /* we remove the notdirty callback only if the code has been
2167 flushed */
2168 if (dirty_flags == 0xff)
6a00d601 2169 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2170}
2171
3a7d929e 2172static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2173{
3a7d929e
FB
2174 unsigned long ram_addr;
2175 int dirty_flags;
2176 ram_addr = addr - (unsigned long)phys_ram_base;
2177 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2178 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2179#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2180 tb_invalidate_phys_page_fast(ram_addr, 2);
2181 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2182#endif
3a7d929e 2183 }
c27004ec 2184 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2185#ifdef USE_KQEMU
2186 if (cpu_single_env->kqemu_enabled &&
2187 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2188 kqemu_modify_page(cpu_single_env, ram_addr);
2189#endif
f23db169
FB
2190 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2191 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2192 /* we remove the notdirty callback only if the code has been
2193 flushed */
2194 if (dirty_flags == 0xff)
6a00d601 2195 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2196}
2197
3a7d929e 2198static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2199{
3a7d929e
FB
2200 unsigned long ram_addr;
2201 int dirty_flags;
2202 ram_addr = addr - (unsigned long)phys_ram_base;
2203 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2204 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2205#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2206 tb_invalidate_phys_page_fast(ram_addr, 4);
2207 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2208#endif
3a7d929e 2209 }
c27004ec 2210 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2211#ifdef USE_KQEMU
2212 if (cpu_single_env->kqemu_enabled &&
2213 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2214 kqemu_modify_page(cpu_single_env, ram_addr);
2215#endif
f23db169
FB
2216 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2217 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2218 /* we remove the notdirty callback only if the code has been
2219 flushed */
2220 if (dirty_flags == 0xff)
6a00d601 2221 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2222}
2223
3a7d929e 2224static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2225 NULL, /* never used */
2226 NULL, /* never used */
2227 NULL, /* never used */
2228};
2229
1ccde1cb
FB
2230static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2231 notdirty_mem_writeb,
2232 notdirty_mem_writew,
2233 notdirty_mem_writel,
2234};
2235
6658ffb8
PB
2236#if defined(CONFIG_SOFTMMU)
2237/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2238 so these check for a hit then pass through to the normal out-of-line
2239 phys routines. */
2240static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2241{
2242 return ldub_phys(addr);
2243}
2244
2245static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2246{
2247 return lduw_phys(addr);
2248}
2249
2250static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2251{
2252 return ldl_phys(addr);
2253}
2254
2255/* Generate a debug exception if a watchpoint has been hit.
2256 Returns the real physical address of the access. addr will be a host
d79acba4 2257 address in case of a RAM location. */
6658ffb8
PB
2258static target_ulong check_watchpoint(target_phys_addr_t addr)
2259{
2260 CPUState *env = cpu_single_env;
2261 target_ulong watch;
2262 target_ulong retaddr;
2263 int i;
2264
2265 retaddr = addr;
2266 for (i = 0; i < env->nb_watchpoints; i++) {
2267 watch = env->watchpoint[i].vaddr;
2268 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2269 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2270 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2271 cpu_single_env->watchpoint_hit = i + 1;
2272 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2273 break;
2274 }
2275 }
2276 }
2277 return retaddr;
2278}
2279
2280static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2281 uint32_t val)
2282{
2283 addr = check_watchpoint(addr);
2284 stb_phys(addr, val);
2285}
2286
2287static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2288 uint32_t val)
2289{
2290 addr = check_watchpoint(addr);
2291 stw_phys(addr, val);
2292}
2293
2294static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2295 uint32_t val)
2296{
2297 addr = check_watchpoint(addr);
2298 stl_phys(addr, val);
2299}
2300
2301static CPUReadMemoryFunc *watch_mem_read[3] = {
2302 watch_mem_readb,
2303 watch_mem_readw,
2304 watch_mem_readl,
2305};
2306
2307static CPUWriteMemoryFunc *watch_mem_write[3] = {
2308 watch_mem_writeb,
2309 watch_mem_writew,
2310 watch_mem_writel,
2311};
2312#endif
2313
db7b5426
BS
2314static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2315 unsigned int len)
2316{
db7b5426
BS
2317 uint32_t ret;
2318 unsigned int idx;
2319
2320 idx = SUBPAGE_IDX(addr - mmio->base);
2321#if defined(DEBUG_SUBPAGE)
2322 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2323 mmio, len, addr, idx);
2324#endif
3ee89922 2325 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2326
2327 return ret;
2328}
2329
2330static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2331 uint32_t value, unsigned int len)
2332{
db7b5426
BS
2333 unsigned int idx;
2334
2335 idx = SUBPAGE_IDX(addr - mmio->base);
2336#if defined(DEBUG_SUBPAGE)
2337 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2338 mmio, len, addr, idx, value);
2339#endif
3ee89922 2340 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2341}
2342
2343static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2344{
2345#if defined(DEBUG_SUBPAGE)
2346 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2347#endif
2348
2349 return subpage_readlen(opaque, addr, 0);
2350}
2351
2352static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2353 uint32_t value)
2354{
2355#if defined(DEBUG_SUBPAGE)
2356 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2357#endif
2358 subpage_writelen(opaque, addr, value, 0);
2359}
2360
2361static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2362{
2363#if defined(DEBUG_SUBPAGE)
2364 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2365#endif
2366
2367 return subpage_readlen(opaque, addr, 1);
2368}
2369
2370static void subpage_writew (void *opaque, target_phys_addr_t addr,
2371 uint32_t value)
2372{
2373#if defined(DEBUG_SUBPAGE)
2374 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2375#endif
2376 subpage_writelen(opaque, addr, value, 1);
2377}
2378
2379static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2380{
2381#if defined(DEBUG_SUBPAGE)
2382 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2383#endif
2384
2385 return subpage_readlen(opaque, addr, 2);
2386}
2387
2388static void subpage_writel (void *opaque,
2389 target_phys_addr_t addr, uint32_t value)
2390{
2391#if defined(DEBUG_SUBPAGE)
2392 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2393#endif
2394 subpage_writelen(opaque, addr, value, 2);
2395}
2396
2397static CPUReadMemoryFunc *subpage_read[] = {
2398 &subpage_readb,
2399 &subpage_readw,
2400 &subpage_readl,
2401};
2402
2403static CPUWriteMemoryFunc *subpage_write[] = {
2404 &subpage_writeb,
2405 &subpage_writew,
2406 &subpage_writel,
2407};
2408
2409static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2410 int memory)
2411{
2412 int idx, eidx;
4254fab8 2413 unsigned int i;
db7b5426
BS
2414
2415 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2416 return -1;
2417 idx = SUBPAGE_IDX(start);
2418 eidx = SUBPAGE_IDX(end);
2419#if defined(DEBUG_SUBPAGE)
2420 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2421 mmio, start, end, idx, eidx, memory);
2422#endif
2423 memory >>= IO_MEM_SHIFT;
2424 for (; idx <= eidx; idx++) {
4254fab8 2425 for (i = 0; i < 4; i++) {
3ee89922
BS
2426 if (io_mem_read[memory][i]) {
2427 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2428 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2429 }
2430 if (io_mem_write[memory][i]) {
2431 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2432 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2433 }
4254fab8 2434 }
db7b5426
BS
2435 }
2436
2437 return 0;
2438}
2439
2440static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2441 int orig_memory)
2442{
2443 subpage_t *mmio;
2444 int subpage_memory;
2445
2446 mmio = qemu_mallocz(sizeof(subpage_t));
2447 if (mmio != NULL) {
2448 mmio->base = base;
2449 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2450#if defined(DEBUG_SUBPAGE)
2451 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2452 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2453#endif
2454 *phys = subpage_memory | IO_MEM_SUBPAGE;
2455 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2456 }
2457
2458 return mmio;
2459}
2460
33417e70
FB
2461static void io_mem_init(void)
2462{
3a7d929e 2463 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2464 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2465 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2466 io_mem_nb = 5;
2467
6658ffb8
PB
2468#if defined(CONFIG_SOFTMMU)
2469 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2470 watch_mem_write, NULL);
2471#endif
1ccde1cb 2472 /* alloc dirty bits array */
0a962c02 2473 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2474 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2475}
2476
2477/* mem_read and mem_write are arrays of functions containing the
2478 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2479 2). Functions can be omitted with a NULL function pointer. The
2480 registered functions may be modified dynamically later.
2481 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2482 modified. If it is zero, a new io zone is allocated. The return
2483 value can be used with cpu_register_physical_memory(). (-1) is
2484 returned if error. */
33417e70
FB
2485int cpu_register_io_memory(int io_index,
2486 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2487 CPUWriteMemoryFunc **mem_write,
2488 void *opaque)
33417e70 2489{
4254fab8 2490 int i, subwidth = 0;
33417e70
FB
2491
2492 if (io_index <= 0) {
b5ff1b31 2493 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2494 return -1;
2495 io_index = io_mem_nb++;
2496 } else {
2497 if (io_index >= IO_MEM_NB_ENTRIES)
2498 return -1;
2499 }
b5ff1b31 2500
33417e70 2501 for(i = 0;i < 3; i++) {
4254fab8
BS
2502 if (!mem_read[i] || !mem_write[i])
2503 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2504 io_mem_read[io_index][i] = mem_read[i];
2505 io_mem_write[io_index][i] = mem_write[i];
2506 }
a4193c8a 2507 io_mem_opaque[io_index] = opaque;
4254fab8 2508 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2509}
61382a50 2510
8926b517
FB
2511CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2512{
2513 return io_mem_write[io_index >> IO_MEM_SHIFT];
2514}
2515
2516CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2517{
2518 return io_mem_read[io_index >> IO_MEM_SHIFT];
2519}
2520
13eb76e0
FB
2521/* physical memory access (slow version, mainly for debug) */
2522#if defined(CONFIG_USER_ONLY)
5fafdf24 2523void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2524 int len, int is_write)
2525{
2526 int l, flags;
2527 target_ulong page;
53a5960a 2528 void * p;
13eb76e0
FB
2529
2530 while (len > 0) {
2531 page = addr & TARGET_PAGE_MASK;
2532 l = (page + TARGET_PAGE_SIZE) - addr;
2533 if (l > len)
2534 l = len;
2535 flags = page_get_flags(page);
2536 if (!(flags & PAGE_VALID))
2537 return;
2538 if (is_write) {
2539 if (!(flags & PAGE_WRITE))
2540 return;
579a97f7
FB
2541 /* XXX: this code should not depend on lock_user */
2542 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2543 /* FIXME - should this return an error rather than just fail? */
2544 return;
53a5960a
PB
2545 memcpy(p, buf, len);
2546 unlock_user(p, addr, len);
13eb76e0
FB
2547 } else {
2548 if (!(flags & PAGE_READ))
2549 return;
579a97f7
FB
2550 /* XXX: this code should not depend on lock_user */
2551 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2552 /* FIXME - should this return an error rather than just fail? */
2553 return;
53a5960a
PB
2554 memcpy(buf, p, len);
2555 unlock_user(p, addr, 0);
13eb76e0
FB
2556 }
2557 len -= l;
2558 buf += l;
2559 addr += l;
2560 }
2561}
8df1cd07 2562
13eb76e0 2563#else
5fafdf24 2564void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2565 int len, int is_write)
2566{
2567 int l, io_index;
2568 uint8_t *ptr;
2569 uint32_t val;
2e12669a
FB
2570 target_phys_addr_t page;
2571 unsigned long pd;
92e873b9 2572 PhysPageDesc *p;
3b46e624 2573
13eb76e0
FB
2574 while (len > 0) {
2575 page = addr & TARGET_PAGE_MASK;
2576 l = (page + TARGET_PAGE_SIZE) - addr;
2577 if (l > len)
2578 l = len;
92e873b9 2579 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2580 if (!p) {
2581 pd = IO_MEM_UNASSIGNED;
2582 } else {
2583 pd = p->phys_offset;
2584 }
3b46e624 2585
13eb76e0 2586 if (is_write) {
3a7d929e 2587 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2588 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2589 /* XXX: could force cpu_single_env to NULL to avoid
2590 potential bugs */
13eb76e0 2591 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2592 /* 32 bit write access */
c27004ec 2593 val = ldl_p(buf);
a4193c8a 2594 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2595 l = 4;
2596 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2597 /* 16 bit write access */
c27004ec 2598 val = lduw_p(buf);
a4193c8a 2599 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2600 l = 2;
2601 } else {
1c213d19 2602 /* 8 bit write access */
c27004ec 2603 val = ldub_p(buf);
a4193c8a 2604 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2605 l = 1;
2606 }
2607 } else {
b448f2f3
FB
2608 unsigned long addr1;
2609 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2610 /* RAM case */
b448f2f3 2611 ptr = phys_ram_base + addr1;
13eb76e0 2612 memcpy(ptr, buf, l);
3a7d929e
FB
2613 if (!cpu_physical_memory_is_dirty(addr1)) {
2614 /* invalidate code */
2615 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2616 /* set dirty bit */
5fafdf24 2617 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2618 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2619 }
13eb76e0
FB
2620 }
2621 } else {
5fafdf24 2622 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2623 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2624 /* I/O case */
2625 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2626 if (l >= 4 && ((addr & 3) == 0)) {
2627 /* 32 bit read access */
a4193c8a 2628 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2629 stl_p(buf, val);
13eb76e0
FB
2630 l = 4;
2631 } else if (l >= 2 && ((addr & 1) == 0)) {
2632 /* 16 bit read access */
a4193c8a 2633 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2634 stw_p(buf, val);
13eb76e0
FB
2635 l = 2;
2636 } else {
1c213d19 2637 /* 8 bit read access */
a4193c8a 2638 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2639 stb_p(buf, val);
13eb76e0
FB
2640 l = 1;
2641 }
2642 } else {
2643 /* RAM case */
5fafdf24 2644 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2645 (addr & ~TARGET_PAGE_MASK);
2646 memcpy(buf, ptr, l);
2647 }
2648 }
2649 len -= l;
2650 buf += l;
2651 addr += l;
2652 }
2653}
8df1cd07 2654
d0ecd2aa 2655/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2656void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2657 const uint8_t *buf, int len)
2658{
2659 int l;
2660 uint8_t *ptr;
2661 target_phys_addr_t page;
2662 unsigned long pd;
2663 PhysPageDesc *p;
3b46e624 2664
d0ecd2aa
FB
2665 while (len > 0) {
2666 page = addr & TARGET_PAGE_MASK;
2667 l = (page + TARGET_PAGE_SIZE) - addr;
2668 if (l > len)
2669 l = len;
2670 p = phys_page_find(page >> TARGET_PAGE_BITS);
2671 if (!p) {
2672 pd = IO_MEM_UNASSIGNED;
2673 } else {
2674 pd = p->phys_offset;
2675 }
3b46e624 2676
d0ecd2aa 2677 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2678 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2679 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2680 /* do nothing */
2681 } else {
2682 unsigned long addr1;
2683 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2684 /* ROM/RAM case */
2685 ptr = phys_ram_base + addr1;
2686 memcpy(ptr, buf, l);
2687 }
2688 len -= l;
2689 buf += l;
2690 addr += l;
2691 }
2692}
2693
2694
8df1cd07
FB
2695/* warning: addr must be aligned */
2696uint32_t ldl_phys(target_phys_addr_t addr)
2697{
2698 int io_index;
2699 uint8_t *ptr;
2700 uint32_t val;
2701 unsigned long pd;
2702 PhysPageDesc *p;
2703
2704 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2705 if (!p) {
2706 pd = IO_MEM_UNASSIGNED;
2707 } else {
2708 pd = p->phys_offset;
2709 }
3b46e624 2710
5fafdf24 2711 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2712 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2713 /* I/O case */
2714 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2715 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2716 } else {
2717 /* RAM case */
5fafdf24 2718 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2719 (addr & ~TARGET_PAGE_MASK);
2720 val = ldl_p(ptr);
2721 }
2722 return val;
2723}
2724
84b7b8e7
FB
2725/* warning: addr must be aligned */
2726uint64_t ldq_phys(target_phys_addr_t addr)
2727{
2728 int io_index;
2729 uint8_t *ptr;
2730 uint64_t val;
2731 unsigned long pd;
2732 PhysPageDesc *p;
2733
2734 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2735 if (!p) {
2736 pd = IO_MEM_UNASSIGNED;
2737 } else {
2738 pd = p->phys_offset;
2739 }
3b46e624 2740
2a4188a3
FB
2741 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2742 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2743 /* I/O case */
2744 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2745#ifdef TARGET_WORDS_BIGENDIAN
2746 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2747 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2748#else
2749 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2750 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2751#endif
2752 } else {
2753 /* RAM case */
5fafdf24 2754 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2755 (addr & ~TARGET_PAGE_MASK);
2756 val = ldq_p(ptr);
2757 }
2758 return val;
2759}
2760
aab33094
FB
2761/* XXX: optimize */
2762uint32_t ldub_phys(target_phys_addr_t addr)
2763{
2764 uint8_t val;
2765 cpu_physical_memory_read(addr, &val, 1);
2766 return val;
2767}
2768
2769/* XXX: optimize */
2770uint32_t lduw_phys(target_phys_addr_t addr)
2771{
2772 uint16_t val;
2773 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2774 return tswap16(val);
2775}
2776
8df1cd07
FB
2777/* warning: addr must be aligned. The ram page is not masked as dirty
2778 and the code inside is not invalidated. It is useful if the dirty
2779 bits are used to track modified PTEs */
2780void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2781{
2782 int io_index;
2783 uint8_t *ptr;
2784 unsigned long pd;
2785 PhysPageDesc *p;
2786
2787 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2788 if (!p) {
2789 pd = IO_MEM_UNASSIGNED;
2790 } else {
2791 pd = p->phys_offset;
2792 }
3b46e624 2793
3a7d929e 2794 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2795 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2796 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2797 } else {
5fafdf24 2798 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2799 (addr & ~TARGET_PAGE_MASK);
2800 stl_p(ptr, val);
2801 }
2802}
2803
bc98a7ef
JM
2804void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2805{
2806 int io_index;
2807 uint8_t *ptr;
2808 unsigned long pd;
2809 PhysPageDesc *p;
2810
2811 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2812 if (!p) {
2813 pd = IO_MEM_UNASSIGNED;
2814 } else {
2815 pd = p->phys_offset;
2816 }
3b46e624 2817
bc98a7ef
JM
2818 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2819 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2820#ifdef TARGET_WORDS_BIGENDIAN
2821 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2822 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2823#else
2824 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2825 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2826#endif
2827 } else {
5fafdf24 2828 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2829 (addr & ~TARGET_PAGE_MASK);
2830 stq_p(ptr, val);
2831 }
2832}
2833
8df1cd07 2834/* warning: addr must be aligned */
8df1cd07
FB
2835void stl_phys(target_phys_addr_t addr, uint32_t val)
2836{
2837 int io_index;
2838 uint8_t *ptr;
2839 unsigned long pd;
2840 PhysPageDesc *p;
2841
2842 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2843 if (!p) {
2844 pd = IO_MEM_UNASSIGNED;
2845 } else {
2846 pd = p->phys_offset;
2847 }
3b46e624 2848
3a7d929e 2849 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2850 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2851 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2852 } else {
2853 unsigned long addr1;
2854 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2855 /* RAM case */
2856 ptr = phys_ram_base + addr1;
2857 stl_p(ptr, val);
3a7d929e
FB
2858 if (!cpu_physical_memory_is_dirty(addr1)) {
2859 /* invalidate code */
2860 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2861 /* set dirty bit */
f23db169
FB
2862 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2863 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2864 }
8df1cd07
FB
2865 }
2866}
2867
aab33094
FB
2868/* XXX: optimize */
2869void stb_phys(target_phys_addr_t addr, uint32_t val)
2870{
2871 uint8_t v = val;
2872 cpu_physical_memory_write(addr, &v, 1);
2873}
2874
2875/* XXX: optimize */
2876void stw_phys(target_phys_addr_t addr, uint32_t val)
2877{
2878 uint16_t v = tswap16(val);
2879 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2880}
2881
2882/* XXX: optimize */
2883void stq_phys(target_phys_addr_t addr, uint64_t val)
2884{
2885 val = tswap64(val);
2886 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2887}
2888
13eb76e0
FB
2889#endif
2890
2891/* virtual memory access for debug */
5fafdf24 2892int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 2893 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2894{
2895 int l;
9b3c35e0
JM
2896 target_phys_addr_t phys_addr;
2897 target_ulong page;
13eb76e0
FB
2898
2899 while (len > 0) {
2900 page = addr & TARGET_PAGE_MASK;
2901 phys_addr = cpu_get_phys_page_debug(env, page);
2902 /* if no physical page mapped, return an error */
2903 if (phys_addr == -1)
2904 return -1;
2905 l = (page + TARGET_PAGE_SIZE) - addr;
2906 if (l > len)
2907 l = len;
5fafdf24 2908 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 2909 buf, l, is_write);
13eb76e0
FB
2910 len -= l;
2911 buf += l;
2912 addr += l;
2913 }
2914 return 0;
2915}
2916
e3db7226
FB
2917void dump_exec_info(FILE *f,
2918 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2919{
2920 int i, target_code_size, max_target_code_size;
2921 int direct_jmp_count, direct_jmp2_count, cross_page;
2922 TranslationBlock *tb;
3b46e624 2923
e3db7226
FB
2924 target_code_size = 0;
2925 max_target_code_size = 0;
2926 cross_page = 0;
2927 direct_jmp_count = 0;
2928 direct_jmp2_count = 0;
2929 for(i = 0; i < nb_tbs; i++) {
2930 tb = &tbs[i];
2931 target_code_size += tb->size;
2932 if (tb->size > max_target_code_size)
2933 max_target_code_size = tb->size;
2934 if (tb->page_addr[1] != -1)
2935 cross_page++;
2936 if (tb->tb_next_offset[0] != 0xffff) {
2937 direct_jmp_count++;
2938 if (tb->tb_next_offset[1] != 0xffff) {
2939 direct_jmp2_count++;
2940 }
2941 }
2942 }
2943 /* XXX: avoid using doubles ? */
57fec1fe 2944 cpu_fprintf(f, "Translation buffer state:\n");
e3db7226 2945 cpu_fprintf(f, "TB count %d\n", nb_tbs);
5fafdf24 2946 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
2947 nb_tbs ? target_code_size / nb_tbs : 0,
2948 max_target_code_size);
5fafdf24 2949 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
2950 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2951 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
2952 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2953 cross_page,
e3db7226
FB
2954 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2955 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 2956 direct_jmp_count,
e3db7226
FB
2957 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2958 direct_jmp2_count,
2959 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 2960 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
2961 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2962 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2963 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
57fec1fe
FB
2964#ifdef CONFIG_PROFILER
2965 {
2966 int64_t tot;
2967 tot = dyngen_interm_time + dyngen_code_time;
2968 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2969 tot, tot / 2.4e9);
2970 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2971 dyngen_tb_count,
2972 dyngen_tb_count1 - dyngen_tb_count,
2973 dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
2974 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2975 dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
2976 cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
2977 dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
2978 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2979 dyngen_tb_count ?
2980 (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
2981 cpu_fprintf(f, "cycles/op %0.1f\n",
2982 dyngen_op_count ? (double)tot / dyngen_op_count : 0);
2983 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2984 dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
2985 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2986 dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
2987 if (tot == 0)
2988 tot = 1;
2989 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2990 (double)dyngen_interm_time / tot * 100.0);
2991 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2992 (double)dyngen_code_time / tot * 100.0);
2993 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2994 dyngen_restore_count);
2995 cpu_fprintf(f, " avg cycles %0.1f\n",
2996 dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
2997 {
2998 extern void dump_op_count(void);
2999 dump_op_count();
3000 }
3001 }
3002#endif
e3db7226
FB
3003}
3004
5fafdf24 3005#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3006
3007#define MMUSUFFIX _cmmu
3008#define GETPC() NULL
3009#define env cpu_single_env
b769d8fe 3010#define SOFTMMU_CODE_ACCESS
61382a50
FB
3011
3012#define SHIFT 0
3013#include "softmmu_template.h"
3014
3015#define SHIFT 1
3016#include "softmmu_template.h"
3017
3018#define SHIFT 2
3019#include "softmmu_template.h"
3020
3021#define SHIFT 3
3022#include "softmmu_template.h"
3023
3024#undef env
3025
3026#endif