]> git.proxmox.com Git - qemu.git/blame - exec.c
Revert wrong changes
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
53a5960a
PB
39#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
41#endif
54936004 42
fd6ce8f6 43//#define DEBUG_TB_INVALIDATE
66e85a21 44//#define DEBUG_FLUSH
9fa3e853 45//#define DEBUG_TLB
67d3b957 46//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
47
48/* make various TB consistency checks */
5fafdf24
TS
49//#define DEBUG_TB_CHECK
50//#define DEBUG_TLB_CHECK
fd6ce8f6 51
1196be37 52//#define DEBUG_IOPORT
db7b5426 53//#define DEBUG_SUBPAGE
1196be37 54
99773bd4
PB
55#if !defined(CONFIG_USER_ONLY)
56/* TB consistency checks only implemented for usermode emulation. */
57#undef DEBUG_TB_CHECK
58#endif
59
fd6ce8f6 60/* threshold to flush the translated code buffer */
d07bde88 61#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
fd6ce8f6 62
9fa3e853
FB
63#define SMC_BITMAP_USE_THRESHOLD 10
64
65#define MMAP_AREA_START 0x00000000
66#define MMAP_AREA_END 0xa8000000
fd6ce8f6 67
108c49b8
FB
68#if defined(TARGET_SPARC64)
69#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
70#elif defined(TARGET_SPARC)
71#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
72#elif defined(TARGET_ALPHA)
73#define TARGET_PHYS_ADDR_SPACE_BITS 42
74#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
75#elif defined(TARGET_PPC64)
76#define TARGET_PHYS_ADDR_SPACE_BITS 42
77#else
78/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
79#define TARGET_PHYS_ADDR_SPACE_BITS 32
80#endif
81
fd6ce8f6 82TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 83TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 84int nb_tbs;
eb51d102
FB
85/* any access to the tbs or the page table must use this lock */
86spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 87
b8076a74 88uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
89uint8_t *code_gen_ptr;
90
03875444 91int phys_ram_size;
9fa3e853
FB
92int phys_ram_fd;
93uint8_t *phys_ram_base;
1ccde1cb 94uint8_t *phys_ram_dirty;
e9a1ab19 95static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 96
6a00d601
FB
97CPUState *first_cpu;
98/* current CPU in the current thread. It is only valid inside
99 cpu_exec() */
5fafdf24 100CPUState *cpu_single_env;
6a00d601 101
54936004 102typedef struct PageDesc {
92e873b9 103 /* list of TBs intersecting this ram page */
fd6ce8f6 104 TranslationBlock *first_tb;
9fa3e853
FB
105 /* in order to optimize self modifying code, we count the number
106 of lookups we do to a given page to use a bitmap */
107 unsigned int code_write_count;
108 uint8_t *code_bitmap;
109#if defined(CONFIG_USER_ONLY)
110 unsigned long flags;
111#endif
54936004
FB
112} PageDesc;
113
92e873b9
FB
114typedef struct PhysPageDesc {
115 /* offset in host memory of the page + io_index in the low 12 bits */
03875444 116 uint32_t phys_offset;
92e873b9
FB
117} PhysPageDesc;
118
54936004 119#define L2_BITS 10
bedb69ea
JM
120#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
121/* XXX: this is a temporary hack for alpha target.
122 * In the future, this is to be replaced by a multi-level table
123 * to actually be able to handle the complete 64 bits address space.
124 */
125#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
126#else
03875444 127#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 128#endif
54936004
FB
129
130#define L1_SIZE (1 << L1_BITS)
131#define L2_SIZE (1 << L2_BITS)
132
33417e70 133static void io_mem_init(void);
fd6ce8f6 134
83fb7adf
FB
135unsigned long qemu_real_host_page_size;
136unsigned long qemu_host_page_bits;
137unsigned long qemu_host_page_size;
138unsigned long qemu_host_page_mask;
54936004 139
92e873b9 140/* XXX: for system emulation, it could just be an array */
54936004 141static PageDesc *l1_map[L1_SIZE];
0a962c02 142PhysPageDesc **l1_phys_map;
54936004 143
33417e70 144/* io memory support */
33417e70
FB
145CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
146CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 147void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 148static int io_mem_nb;
6658ffb8
PB
149#if defined(CONFIG_SOFTMMU)
150static int io_mem_watch;
151#endif
33417e70 152
34865134
FB
153/* log support */
154char *logfilename = "/tmp/qemu.log";
155FILE *logfile;
156int loglevel;
e735b91c 157static int log_append = 0;
34865134 158
e3db7226
FB
159/* statistics */
160static int tlb_flush_count;
161static int tb_flush_count;
162static int tb_phys_invalidate_count;
163
db7b5426
BS
164#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
165typedef struct subpage_t {
166 target_phys_addr_t base;
3ee89922
BS
167 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
168 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
169 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
170} subpage_t;
171
b346ff46 172static void page_init(void)
54936004 173{
83fb7adf 174 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 175 TARGET_PAGE_SIZE */
67b915a5 176#ifdef _WIN32
d5a8f07c
FB
177 {
178 SYSTEM_INFO system_info;
179 DWORD old_protect;
3b46e624 180
d5a8f07c
FB
181 GetSystemInfo(&system_info);
182 qemu_real_host_page_size = system_info.dwPageSize;
3b46e624 183
d5a8f07c
FB
184 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
185 PAGE_EXECUTE_READWRITE, &old_protect);
186 }
67b915a5 187#else
83fb7adf 188 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
189 {
190 unsigned long start, end;
191
192 start = (unsigned long)code_gen_buffer;
193 start &= ~(qemu_real_host_page_size - 1);
3b46e624 194
d5a8f07c
FB
195 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
196 end += qemu_real_host_page_size - 1;
197 end &= ~(qemu_real_host_page_size - 1);
3b46e624 198
5fafdf24 199 mprotect((void *)start, end - start,
d5a8f07c
FB
200 PROT_READ | PROT_WRITE | PROT_EXEC);
201 }
67b915a5 202#endif
d5a8f07c 203
83fb7adf
FB
204 if (qemu_host_page_size == 0)
205 qemu_host_page_size = qemu_real_host_page_size;
206 if (qemu_host_page_size < TARGET_PAGE_SIZE)
207 qemu_host_page_size = TARGET_PAGE_SIZE;
208 qemu_host_page_bits = 0;
209 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
210 qemu_host_page_bits++;
211 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
212 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
213 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
214
215#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
216 {
217 long long startaddr, endaddr;
218 FILE *f;
219 int n;
220
221 f = fopen("/proc/self/maps", "r");
222 if (f) {
223 do {
224 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
225 if (n == 2) {
226 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
227 TARGET_PAGE_ALIGN(endaddr),
228 PAGE_RESERVED);
229 }
230 } while (!feof(f));
231 fclose(f);
232 }
233 }
234#endif
54936004
FB
235}
236
03875444 237static inline PageDesc *page_find_alloc(unsigned int index)
54936004 238{
54936004
FB
239 PageDesc **lp, *p;
240
54936004
FB
241 lp = &l1_map[index >> L2_BITS];
242 p = *lp;
243 if (!p) {
244 /* allocate if not found */
59817ccb 245 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 246 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
247 *lp = p;
248 }
249 return p + (index & (L2_SIZE - 1));
250}
251
03875444 252static inline PageDesc *page_find(unsigned int index)
54936004 253{
54936004
FB
254 PageDesc *p;
255
54936004
FB
256 p = l1_map[index >> L2_BITS];
257 if (!p)
258 return 0;
fd6ce8f6
FB
259 return p + (index & (L2_SIZE - 1));
260}
261
108c49b8 262static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 263{
108c49b8 264 void **lp, **p;
e3f4e2a4 265 PhysPageDesc *pd;
92e873b9 266
108c49b8
FB
267 p = (void **)l1_phys_map;
268#if TARGET_PHYS_ADDR_SPACE_BITS > 32
269
270#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
271#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
272#endif
273 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
274 p = *lp;
275 if (!p) {
276 /* allocate if not found */
108c49b8
FB
277 if (!alloc)
278 return NULL;
279 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
280 memset(p, 0, sizeof(void *) * L1_SIZE);
281 *lp = p;
282 }
283#endif
284 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
285 pd = *lp;
286 if (!pd) {
287 int i;
108c49b8
FB
288 /* allocate if not found */
289 if (!alloc)
290 return NULL;
e3f4e2a4
PB
291 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
292 *lp = pd;
293 for (i = 0; i < L2_SIZE; i++)
294 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 295 }
e3f4e2a4 296 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
297}
298
108c49b8 299static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 300{
108c49b8 301 return phys_page_find_alloc(index, 0);
92e873b9
FB
302}
303
9fa3e853 304#if !defined(CONFIG_USER_ONLY)
6a00d601 305static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 306static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 307 target_ulong vaddr);
9fa3e853 308#endif
fd6ce8f6 309
6a00d601 310void cpu_exec_init(CPUState *env)
fd6ce8f6 311{
6a00d601
FB
312 CPUState **penv;
313 int cpu_index;
314
fd6ce8f6 315 if (!code_gen_ptr) {
57fec1fe 316 cpu_gen_init();
fd6ce8f6 317 code_gen_ptr = code_gen_buffer;
b346ff46 318 page_init();
33417e70 319 io_mem_init();
fd6ce8f6 320 }
6a00d601
FB
321 env->next_cpu = NULL;
322 penv = &first_cpu;
323 cpu_index = 0;
324 while (*penv != NULL) {
325 penv = (CPUState **)&(*penv)->next_cpu;
326 cpu_index++;
327 }
328 env->cpu_index = cpu_index;
6658ffb8 329 env->nb_watchpoints = 0;
6a00d601 330 *penv = env;
fd6ce8f6
FB
331}
332
9fa3e853
FB
333static inline void invalidate_page_bitmap(PageDesc *p)
334{
335 if (p->code_bitmap) {
59817ccb 336 qemu_free(p->code_bitmap);
9fa3e853
FB
337 p->code_bitmap = NULL;
338 }
339 p->code_write_count = 0;
340}
341
fd6ce8f6
FB
342/* set to NULL all the 'first_tb' fields in all PageDescs */
343static void page_flush_tb(void)
344{
345 int i, j;
346 PageDesc *p;
347
348 for(i = 0; i < L1_SIZE; i++) {
349 p = l1_map[i];
350 if (p) {
9fa3e853
FB
351 for(j = 0; j < L2_SIZE; j++) {
352 p->first_tb = NULL;
353 invalidate_page_bitmap(p);
354 p++;
355 }
fd6ce8f6
FB
356 }
357 }
358}
359
360/* flush all the translation blocks */
d4e8164f 361/* XXX: tb_flush is currently not thread safe */
6a00d601 362void tb_flush(CPUState *env1)
fd6ce8f6 363{
6a00d601 364 CPUState *env;
0124311e 365#if defined(DEBUG_FLUSH)
ab3d1727
BS
366 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
367 (unsigned long)(code_gen_ptr - code_gen_buffer),
368 nb_tbs, nb_tbs > 0 ?
369 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 370#endif
a208e54a
PB
371 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
372 cpu_abort(env1, "Internal error: code buffer overflow\n");
373
fd6ce8f6 374 nb_tbs = 0;
3b46e624 375
6a00d601
FB
376 for(env = first_cpu; env != NULL; env = env->next_cpu) {
377 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
378 }
9fa3e853 379
8a8a608f 380 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 381 page_flush_tb();
9fa3e853 382
fd6ce8f6 383 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
384 /* XXX: flush processor icache at this point if cache flush is
385 expensive */
e3db7226 386 tb_flush_count++;
fd6ce8f6
FB
387}
388
389#ifdef DEBUG_TB_CHECK
390
bc98a7ef 391static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
392{
393 TranslationBlock *tb;
394 int i;
395 address &= TARGET_PAGE_MASK;
99773bd4
PB
396 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
397 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
398 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
399 address >= tb->pc + tb->size)) {
400 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 401 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
402 }
403 }
404 }
405}
406
407/* verify that all the pages have correct rights for code */
408static void tb_page_check(void)
409{
410 TranslationBlock *tb;
411 int i, flags1, flags2;
3b46e624 412
99773bd4
PB
413 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
414 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
415 flags1 = page_get_flags(tb->pc);
416 flags2 = page_get_flags(tb->pc + tb->size - 1);
417 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
418 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 419 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
420 }
421 }
422 }
423}
424
d4e8164f
FB
425void tb_jmp_check(TranslationBlock *tb)
426{
427 TranslationBlock *tb1;
428 unsigned int n1;
429
430 /* suppress any remaining jumps to this TB */
431 tb1 = tb->jmp_first;
432 for(;;) {
433 n1 = (long)tb1 & 3;
434 tb1 = (TranslationBlock *)((long)tb1 & ~3);
435 if (n1 == 2)
436 break;
437 tb1 = tb1->jmp_next[n1];
438 }
439 /* check end of list */
440 if (tb1 != tb) {
441 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
442 }
443}
444
fd6ce8f6
FB
445#endif
446
447/* invalidate one TB */
448static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
449 int next_offset)
450{
451 TranslationBlock *tb1;
452 for(;;) {
453 tb1 = *ptb;
454 if (tb1 == tb) {
455 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
456 break;
457 }
458 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
459 }
460}
461
9fa3e853
FB
462static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
463{
464 TranslationBlock *tb1;
465 unsigned int n1;
466
467 for(;;) {
468 tb1 = *ptb;
469 n1 = (long)tb1 & 3;
470 tb1 = (TranslationBlock *)((long)tb1 & ~3);
471 if (tb1 == tb) {
472 *ptb = tb1->page_next[n1];
473 break;
474 }
475 ptb = &tb1->page_next[n1];
476 }
477}
478
d4e8164f
FB
479static inline void tb_jmp_remove(TranslationBlock *tb, int n)
480{
481 TranslationBlock *tb1, **ptb;
482 unsigned int n1;
483
484 ptb = &tb->jmp_next[n];
485 tb1 = *ptb;
486 if (tb1) {
487 /* find tb(n) in circular list */
488 for(;;) {
489 tb1 = *ptb;
490 n1 = (long)tb1 & 3;
491 tb1 = (TranslationBlock *)((long)tb1 & ~3);
492 if (n1 == n && tb1 == tb)
493 break;
494 if (n1 == 2) {
495 ptb = &tb1->jmp_first;
496 } else {
497 ptb = &tb1->jmp_next[n1];
498 }
499 }
500 /* now we can suppress tb(n) from the list */
501 *ptb = tb->jmp_next[n];
502
503 tb->jmp_next[n] = NULL;
504 }
505}
506
507/* reset the jump entry 'n' of a TB so that it is not chained to
508 another TB */
509static inline void tb_reset_jump(TranslationBlock *tb, int n)
510{
511 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
512}
513
03875444 514static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 515{
6a00d601 516 CPUState *env;
8a40a180 517 PageDesc *p;
d4e8164f 518 unsigned int h, n1;
03875444 519 target_ulong phys_pc;
8a40a180 520 TranslationBlock *tb1, *tb2;
3b46e624 521
8a40a180
FB
522 /* remove the TB from the hash list */
523 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
524 h = tb_phys_hash_func(phys_pc);
5fafdf24 525 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
526 offsetof(TranslationBlock, phys_hash_next));
527
528 /* remove the TB from the page list */
529 if (tb->page_addr[0] != page_addr) {
530 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
531 tb_page_remove(&p->first_tb, tb);
532 invalidate_page_bitmap(p);
533 }
534 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
535 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
536 tb_page_remove(&p->first_tb, tb);
537 invalidate_page_bitmap(p);
538 }
539
36bdbe54 540 tb_invalidated_flag = 1;
59817ccb 541
fd6ce8f6 542 /* remove the TB from the hash list */
8a40a180 543 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
544 for(env = first_cpu; env != NULL; env = env->next_cpu) {
545 if (env->tb_jmp_cache[h] == tb)
546 env->tb_jmp_cache[h] = NULL;
547 }
d4e8164f
FB
548
549 /* suppress this TB from the two jump lists */
550 tb_jmp_remove(tb, 0);
551 tb_jmp_remove(tb, 1);
552
553 /* suppress any remaining jumps to this TB */
554 tb1 = tb->jmp_first;
555 for(;;) {
556 n1 = (long)tb1 & 3;
557 if (n1 == 2)
558 break;
559 tb1 = (TranslationBlock *)((long)tb1 & ~3);
560 tb2 = tb1->jmp_next[n1];
561 tb_reset_jump(tb1, n1);
562 tb1->jmp_next[n1] = NULL;
563 tb1 = tb2;
564 }
565 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 566
e3db7226 567 tb_phys_invalidate_count++;
9fa3e853
FB
568}
569
570static inline void set_bits(uint8_t *tab, int start, int len)
571{
572 int end, mask, end1;
573
574 end = start + len;
575 tab += start >> 3;
576 mask = 0xff << (start & 7);
577 if ((start & ~7) == (end & ~7)) {
578 if (start < end) {
579 mask &= ~(0xff << (end & 7));
580 *tab |= mask;
581 }
582 } else {
583 *tab++ |= mask;
584 start = (start + 8) & ~7;
585 end1 = end & ~7;
586 while (start < end1) {
587 *tab++ = 0xff;
588 start += 8;
589 }
590 if (start < end) {
591 mask = ~(0xff << (end & 7));
592 *tab |= mask;
593 }
594 }
595}
596
597static void build_page_bitmap(PageDesc *p)
598{
599 int n, tb_start, tb_end;
600 TranslationBlock *tb;
3b46e624 601
59817ccb 602 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
603 if (!p->code_bitmap)
604 return;
605 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
606
607 tb = p->first_tb;
608 while (tb != NULL) {
609 n = (long)tb & 3;
610 tb = (TranslationBlock *)((long)tb & ~3);
611 /* NOTE: this is subtle as a TB may span two physical pages */
612 if (n == 0) {
613 /* NOTE: tb_end may be after the end of the page, but
614 it is not a problem */
615 tb_start = tb->pc & ~TARGET_PAGE_MASK;
616 tb_end = tb_start + tb->size;
617 if (tb_end > TARGET_PAGE_SIZE)
618 tb_end = TARGET_PAGE_SIZE;
619 } else {
620 tb_start = 0;
621 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
622 }
623 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
624 tb = tb->page_next[n];
625 }
626}
627
d720b93d
FB
628#ifdef TARGET_HAS_PRECISE_SMC
629
5fafdf24 630static void tb_gen_code(CPUState *env,
d720b93d
FB
631 target_ulong pc, target_ulong cs_base, int flags,
632 int cflags)
633{
634 TranslationBlock *tb;
635 uint8_t *tc_ptr;
636 target_ulong phys_pc, phys_page2, virt_page2;
637 int code_gen_size;
638
c27004ec
FB
639 phys_pc = get_phys_addr_code(env, pc);
640 tb = tb_alloc(pc);
d720b93d
FB
641 if (!tb) {
642 /* flush must be done */
643 tb_flush(env);
644 /* cannot fail at this point */
c27004ec 645 tb = tb_alloc(pc);
d720b93d
FB
646 }
647 tc_ptr = code_gen_ptr;
648 tb->tc_ptr = tc_ptr;
649 tb->cs_base = cs_base;
650 tb->flags = flags;
651 tb->cflags = cflags;
d07bde88 652 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 653 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 654
d720b93d 655 /* check next page if needed */
c27004ec 656 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 657 phys_page2 = -1;
c27004ec 658 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
659 phys_page2 = get_phys_addr_code(env, virt_page2);
660 }
661 tb_link_phys(tb, phys_pc, phys_page2);
662}
663#endif
3b46e624 664
9fa3e853
FB
665/* invalidate all TBs which intersect with the target physical page
666 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
667 the same physical page. 'is_cpu_write_access' should be true if called
668 from a real cpu write access: the virtual CPU will exit the current
669 TB if code is modified inside this TB. */
03875444 670void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
d720b93d
FB
671 int is_cpu_write_access)
672{
673 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 674 CPUState *env = cpu_single_env;
9fa3e853 675 PageDesc *p;
ea1c1802 676 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 677 target_ulong tb_start, tb_end;
d720b93d 678 target_ulong current_pc, current_cs_base;
9fa3e853
FB
679
680 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 681 if (!p)
9fa3e853 682 return;
5fafdf24 683 if (!p->code_bitmap &&
d720b93d
FB
684 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
685 is_cpu_write_access) {
9fa3e853
FB
686 /* build code bitmap */
687 build_page_bitmap(p);
688 }
689
690 /* we remove all the TBs in the range [start, end[ */
691 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
692 current_tb_not_found = is_cpu_write_access;
693 current_tb_modified = 0;
694 current_tb = NULL; /* avoid warning */
695 current_pc = 0; /* avoid warning */
696 current_cs_base = 0; /* avoid warning */
697 current_flags = 0; /* avoid warning */
9fa3e853
FB
698 tb = p->first_tb;
699 while (tb != NULL) {
700 n = (long)tb & 3;
701 tb = (TranslationBlock *)((long)tb & ~3);
702 tb_next = tb->page_next[n];
703 /* NOTE: this is subtle as a TB may span two physical pages */
704 if (n == 0) {
705 /* NOTE: tb_end may be after the end of the page, but
706 it is not a problem */
707 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
708 tb_end = tb_start + tb->size;
709 } else {
710 tb_start = tb->page_addr[1];
711 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
712 }
713 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
714#ifdef TARGET_HAS_PRECISE_SMC
715 if (current_tb_not_found) {
716 current_tb_not_found = 0;
717 current_tb = NULL;
718 if (env->mem_write_pc) {
719 /* now we have a real cpu fault */
720 current_tb = tb_find_pc(env->mem_write_pc);
721 }
722 }
723 if (current_tb == tb &&
724 !(current_tb->cflags & CF_SINGLE_INSN)) {
725 /* If we are modifying the current TB, we must stop
726 its execution. We could be more precise by checking
727 that the modification is after the current PC, but it
728 would require a specialized function to partially
729 restore the CPU state */
3b46e624 730
d720b93d 731 current_tb_modified = 1;
5fafdf24 732 cpu_restore_state(current_tb, env,
d720b93d
FB
733 env->mem_write_pc, NULL);
734#if defined(TARGET_I386)
735 current_flags = env->hflags;
736 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
737 current_cs_base = (target_ulong)env->segs[R_CS].base;
738 current_pc = current_cs_base + env->eip;
739#else
740#error unsupported CPU
741#endif
742 }
743#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
744 /* we need to do that to handle the case where a signal
745 occurs while doing tb_phys_invalidate() */
746 saved_tb = NULL;
747 if (env) {
748 saved_tb = env->current_tb;
749 env->current_tb = NULL;
750 }
9fa3e853 751 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
752 if (env) {
753 env->current_tb = saved_tb;
754 if (env->interrupt_request && env->current_tb)
755 cpu_interrupt(env, env->interrupt_request);
756 }
9fa3e853
FB
757 }
758 tb = tb_next;
759 }
760#if !defined(CONFIG_USER_ONLY)
761 /* if no code remaining, no need to continue to use slow writes */
762 if (!p->first_tb) {
763 invalidate_page_bitmap(p);
d720b93d
FB
764 if (is_cpu_write_access) {
765 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
766 }
767 }
768#endif
769#ifdef TARGET_HAS_PRECISE_SMC
770 if (current_tb_modified) {
771 /* we generate a block containing just the instruction
772 modifying the memory. It will ensure that it cannot modify
773 itself */
ea1c1802 774 env->current_tb = NULL;
5fafdf24 775 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
776 CF_SINGLE_INSN);
777 cpu_resume_from_signal(env, NULL);
9fa3e853 778 }
fd6ce8f6 779#endif
9fa3e853 780}
fd6ce8f6 781
9fa3e853 782/* len must be <= 8 and start must be a multiple of len */
03875444 783static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
784{
785 PageDesc *p;
786 int offset, b;
59817ccb 787#if 0
a4193c8a
FB
788 if (1) {
789 if (loglevel) {
5fafdf24
TS
790 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
791 cpu_single_env->mem_write_vaddr, len,
792 cpu_single_env->eip,
a4193c8a
FB
793 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
794 }
59817ccb
FB
795 }
796#endif
9fa3e853 797 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 798 if (!p)
9fa3e853
FB
799 return;
800 if (p->code_bitmap) {
801 offset = start & ~TARGET_PAGE_MASK;
802 b = p->code_bitmap[offset >> 3] >> (offset & 7);
803 if (b & ((1 << len) - 1))
804 goto do_invalidate;
805 } else {
806 do_invalidate:
d720b93d 807 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
808 }
809}
810
9fa3e853 811#if !defined(CONFIG_SOFTMMU)
03875444 812static void tb_invalidate_phys_page(target_ulong addr,
d720b93d 813 unsigned long pc, void *puc)
9fa3e853 814{
d720b93d
FB
815 int n, current_flags, current_tb_modified;
816 target_ulong current_pc, current_cs_base;
9fa3e853 817 PageDesc *p;
d720b93d
FB
818 TranslationBlock *tb, *current_tb;
819#ifdef TARGET_HAS_PRECISE_SMC
820 CPUState *env = cpu_single_env;
821#endif
9fa3e853
FB
822
823 addr &= TARGET_PAGE_MASK;
824 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 825 if (!p)
9fa3e853
FB
826 return;
827 tb = p->first_tb;
d720b93d
FB
828 current_tb_modified = 0;
829 current_tb = NULL;
830 current_pc = 0; /* avoid warning */
831 current_cs_base = 0; /* avoid warning */
832 current_flags = 0; /* avoid warning */
833#ifdef TARGET_HAS_PRECISE_SMC
834 if (tb && pc != 0) {
835 current_tb = tb_find_pc(pc);
836 }
837#endif
9fa3e853
FB
838 while (tb != NULL) {
839 n = (long)tb & 3;
840 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
841#ifdef TARGET_HAS_PRECISE_SMC
842 if (current_tb == tb &&
843 !(current_tb->cflags & CF_SINGLE_INSN)) {
844 /* If we are modifying the current TB, we must stop
845 its execution. We could be more precise by checking
846 that the modification is after the current PC, but it
847 would require a specialized function to partially
848 restore the CPU state */
3b46e624 849
d720b93d
FB
850 current_tb_modified = 1;
851 cpu_restore_state(current_tb, env, pc, puc);
852#if defined(TARGET_I386)
853 current_flags = env->hflags;
854 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
855 current_cs_base = (target_ulong)env->segs[R_CS].base;
856 current_pc = current_cs_base + env->eip;
857#else
858#error unsupported CPU
859#endif
860 }
861#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
862 tb_phys_invalidate(tb, addr);
863 tb = tb->page_next[n];
864 }
fd6ce8f6 865 p->first_tb = NULL;
d720b93d
FB
866#ifdef TARGET_HAS_PRECISE_SMC
867 if (current_tb_modified) {
868 /* we generate a block containing just the instruction
869 modifying the memory. It will ensure that it cannot modify
870 itself */
ea1c1802 871 env->current_tb = NULL;
5fafdf24 872 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
873 CF_SINGLE_INSN);
874 cpu_resume_from_signal(env, puc);
875 }
876#endif
fd6ce8f6 877}
9fa3e853 878#endif
fd6ce8f6
FB
879
880/* add the tb in the target page and protect it if necessary */
5fafdf24 881static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 882 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
883{
884 PageDesc *p;
9fa3e853
FB
885 TranslationBlock *last_first_tb;
886
887 tb->page_addr[n] = page_addr;
3a7d929e 888 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
889 tb->page_next[n] = p->first_tb;
890 last_first_tb = p->first_tb;
891 p->first_tb = (TranslationBlock *)((long)tb | n);
892 invalidate_page_bitmap(p);
fd6ce8f6 893
107db443 894#if defined(TARGET_HAS_SMC) || 1
d720b93d 895
9fa3e853 896#if defined(CONFIG_USER_ONLY)
fd6ce8f6 897 if (p->flags & PAGE_WRITE) {
53a5960a
PB
898 target_ulong addr;
899 PageDesc *p2;
9fa3e853
FB
900 int prot;
901
fd6ce8f6
FB
902 /* force the host page as non writable (writes will have a
903 page fault + mprotect overhead) */
53a5960a 904 page_addr &= qemu_host_page_mask;
fd6ce8f6 905 prot = 0;
53a5960a
PB
906 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
907 addr += TARGET_PAGE_SIZE) {
908
909 p2 = page_find (addr >> TARGET_PAGE_BITS);
910 if (!p2)
911 continue;
912 prot |= p2->flags;
913 p2->flags &= ~PAGE_WRITE;
914 page_get_flags(addr);
915 }
5fafdf24 916 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
917 (prot & PAGE_BITS) & ~PAGE_WRITE);
918#ifdef DEBUG_TB_INVALIDATE
ab3d1727 919 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 920 page_addr);
fd6ce8f6 921#endif
fd6ce8f6 922 }
9fa3e853
FB
923#else
924 /* if some code is already present, then the pages are already
925 protected. So we handle the case where only the first TB is
926 allocated in a physical page */
927 if (!last_first_tb) {
6a00d601 928 tlb_protect_code(page_addr);
9fa3e853
FB
929 }
930#endif
d720b93d
FB
931
932#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
933}
934
935/* Allocate a new translation block. Flush the translation buffer if
936 too many translation blocks or too much generated code. */
c27004ec 937TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
938{
939 TranslationBlock *tb;
fd6ce8f6 940
5fafdf24 941 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
fd6ce8f6 942 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 943 return NULL;
fd6ce8f6
FB
944 tb = &tbs[nb_tbs++];
945 tb->pc = pc;
b448f2f3 946 tb->cflags = 0;
d4e8164f
FB
947 return tb;
948}
949
9fa3e853
FB
950/* add a new TB and link it to the physical page tables. phys_page2 is
951 (-1) to indicate that only one page contains the TB. */
5fafdf24 952void tb_link_phys(TranslationBlock *tb,
9fa3e853 953 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 954{
9fa3e853
FB
955 unsigned int h;
956 TranslationBlock **ptb;
957
958 /* add in the physical hash table */
959 h = tb_phys_hash_func(phys_pc);
960 ptb = &tb_phys_hash[h];
961 tb->phys_hash_next = *ptb;
962 *ptb = tb;
fd6ce8f6
FB
963
964 /* add in the page list */
9fa3e853
FB
965 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
966 if (phys_page2 != -1)
967 tb_alloc_page(tb, 1, phys_page2);
968 else
969 tb->page_addr[1] = -1;
9fa3e853 970
d4e8164f
FB
971 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
972 tb->jmp_next[0] = NULL;
973 tb->jmp_next[1] = NULL;
974
975 /* init original jump addresses */
976 if (tb->tb_next_offset[0] != 0xffff)
977 tb_reset_jump(tb, 0);
978 if (tb->tb_next_offset[1] != 0xffff)
979 tb_reset_jump(tb, 1);
8a40a180
FB
980
981#ifdef DEBUG_TB_CHECK
982 tb_page_check();
983#endif
fd6ce8f6
FB
984}
985
9fa3e853
FB
986/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
987 tb[1].tc_ptr. Return NULL if not found */
988TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 989{
9fa3e853
FB
990 int m_min, m_max, m;
991 unsigned long v;
992 TranslationBlock *tb;
a513fe19
FB
993
994 if (nb_tbs <= 0)
995 return NULL;
996 if (tc_ptr < (unsigned long)code_gen_buffer ||
997 tc_ptr >= (unsigned long)code_gen_ptr)
998 return NULL;
999 /* binary search (cf Knuth) */
1000 m_min = 0;
1001 m_max = nb_tbs - 1;
1002 while (m_min <= m_max) {
1003 m = (m_min + m_max) >> 1;
1004 tb = &tbs[m];
1005 v = (unsigned long)tb->tc_ptr;
1006 if (v == tc_ptr)
1007 return tb;
1008 else if (tc_ptr < v) {
1009 m_max = m - 1;
1010 } else {
1011 m_min = m + 1;
1012 }
5fafdf24 1013 }
a513fe19
FB
1014 return &tbs[m_max];
1015}
7501267e 1016
ea041c0e
FB
1017static void tb_reset_jump_recursive(TranslationBlock *tb);
1018
1019static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1020{
1021 TranslationBlock *tb1, *tb_next, **ptb;
1022 unsigned int n1;
1023
1024 tb1 = tb->jmp_next[n];
1025 if (tb1 != NULL) {
1026 /* find head of list */
1027 for(;;) {
1028 n1 = (long)tb1 & 3;
1029 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1030 if (n1 == 2)
1031 break;
1032 tb1 = tb1->jmp_next[n1];
1033 }
1034 /* we are now sure now that tb jumps to tb1 */
1035 tb_next = tb1;
1036
1037 /* remove tb from the jmp_first list */
1038 ptb = &tb_next->jmp_first;
1039 for(;;) {
1040 tb1 = *ptb;
1041 n1 = (long)tb1 & 3;
1042 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1043 if (n1 == n && tb1 == tb)
1044 break;
1045 ptb = &tb1->jmp_next[n1];
1046 }
1047 *ptb = tb->jmp_next[n];
1048 tb->jmp_next[n] = NULL;
3b46e624 1049
ea041c0e
FB
1050 /* suppress the jump to next tb in generated code */
1051 tb_reset_jump(tb, n);
1052
0124311e 1053 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1054 tb_reset_jump_recursive(tb_next);
1055 }
1056}
1057
1058static void tb_reset_jump_recursive(TranslationBlock *tb)
1059{
1060 tb_reset_jump_recursive2(tb, 0);
1061 tb_reset_jump_recursive2(tb, 1);
1062}
1063
1fddef4b 1064#if defined(TARGET_HAS_ICE)
d720b93d
FB
1065static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1066{
9b3c35e0
JM
1067 target_phys_addr_t addr;
1068 target_ulong pd;
c2f07f81
PB
1069 ram_addr_t ram_addr;
1070 PhysPageDesc *p;
d720b93d 1071
c2f07f81
PB
1072 addr = cpu_get_phys_page_debug(env, pc);
1073 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1074 if (!p) {
1075 pd = IO_MEM_UNASSIGNED;
1076 } else {
1077 pd = p->phys_offset;
1078 }
1079 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1080 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1081}
c27004ec 1082#endif
d720b93d 1083
6658ffb8
PB
1084/* Add a watchpoint. */
1085int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1086{
1087 int i;
1088
1089 for (i = 0; i < env->nb_watchpoints; i++) {
1090 if (addr == env->watchpoint[i].vaddr)
1091 return 0;
1092 }
1093 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1094 return -1;
1095
1096 i = env->nb_watchpoints++;
1097 env->watchpoint[i].vaddr = addr;
1098 tlb_flush_page(env, addr);
1099 /* FIXME: This flush is needed because of the hack to make memory ops
1100 terminate the TB. It can be removed once the proper IO trap and
1101 re-execute bits are in. */
1102 tb_flush(env);
1103 return i;
1104}
1105
1106/* Remove a watchpoint. */
1107int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1108{
1109 int i;
1110
1111 for (i = 0; i < env->nb_watchpoints; i++) {
1112 if (addr == env->watchpoint[i].vaddr) {
1113 env->nb_watchpoints--;
1114 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1115 tlb_flush_page(env, addr);
1116 return 0;
1117 }
1118 }
1119 return -1;
1120}
1121
c33a346e
FB
1122/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1123 breakpoint is reached */
2e12669a 1124int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1125{
1fddef4b 1126#if defined(TARGET_HAS_ICE)
4c3a88a2 1127 int i;
3b46e624 1128
4c3a88a2
FB
1129 for(i = 0; i < env->nb_breakpoints; i++) {
1130 if (env->breakpoints[i] == pc)
1131 return 0;
1132 }
1133
1134 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1135 return -1;
1136 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1137
d720b93d 1138 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1139 return 0;
1140#else
1141 return -1;
1142#endif
1143}
1144
1145/* remove a breakpoint */
2e12669a 1146int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1147{
1fddef4b 1148#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1149 int i;
1150 for(i = 0; i < env->nb_breakpoints; i++) {
1151 if (env->breakpoints[i] == pc)
1152 goto found;
1153 }
1154 return -1;
1155 found:
4c3a88a2 1156 env->nb_breakpoints--;
1fddef4b
FB
1157 if (i < env->nb_breakpoints)
1158 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1159
1160 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1161 return 0;
1162#else
1163 return -1;
1164#endif
1165}
1166
c33a346e
FB
1167/* enable or disable single step mode. EXCP_DEBUG is returned by the
1168 CPU loop after each instruction */
1169void cpu_single_step(CPUState *env, int enabled)
1170{
1fddef4b 1171#if defined(TARGET_HAS_ICE)
c33a346e
FB
1172 if (env->singlestep_enabled != enabled) {
1173 env->singlestep_enabled = enabled;
1174 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1175 /* XXX: only flush what is necessary */
0124311e 1176 tb_flush(env);
c33a346e
FB
1177 }
1178#endif
1179}
1180
34865134
FB
1181/* enable or disable low levels log */
1182void cpu_set_log(int log_flags)
1183{
1184 loglevel = log_flags;
1185 if (loglevel && !logfile) {
11fcfab4 1186 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1187 if (!logfile) {
1188 perror(logfilename);
1189 _exit(1);
1190 }
9fa3e853
FB
1191#if !defined(CONFIG_SOFTMMU)
1192 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1193 {
1194 static uint8_t logfile_buf[4096];
1195 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1196 }
1197#else
34865134 1198 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1199#endif
e735b91c
PB
1200 log_append = 1;
1201 }
1202 if (!loglevel && logfile) {
1203 fclose(logfile);
1204 logfile = NULL;
34865134
FB
1205 }
1206}
1207
1208void cpu_set_log_filename(const char *filename)
1209{
1210 logfilename = strdup(filename);
e735b91c
PB
1211 if (logfile) {
1212 fclose(logfile);
1213 logfile = NULL;
1214 }
1215 cpu_set_log(loglevel);
34865134 1216}
c33a346e 1217
0124311e 1218/* mask must never be zero, except for A20 change call */
68a79315 1219void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1220{
1221 TranslationBlock *tb;
15a51156 1222 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1223
68a79315 1224 env->interrupt_request |= mask;
ea041c0e
FB
1225 /* if the cpu is currently executing code, we must unlink it and
1226 all the potentially executing TB */
1227 tb = env->current_tb;
ee8b7021
FB
1228 if (tb && !testandset(&interrupt_lock)) {
1229 env->current_tb = NULL;
ea041c0e 1230 tb_reset_jump_recursive(tb);
15a51156 1231 resetlock(&interrupt_lock);
ea041c0e
FB
1232 }
1233}
1234
b54ad049
FB
1235void cpu_reset_interrupt(CPUState *env, int mask)
1236{
1237 env->interrupt_request &= ~mask;
1238}
1239
f193c797 1240CPULogItem cpu_log_items[] = {
5fafdf24 1241 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1242 "show generated host assembly code for each compiled TB" },
1243 { CPU_LOG_TB_IN_ASM, "in_asm",
1244 "show target assembly code for each compiled TB" },
5fafdf24 1245 { CPU_LOG_TB_OP, "op",
57fec1fe 1246 "show micro ops for each compiled TB" },
f193c797 1247 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1248 "show micro ops "
1249#ifdef TARGET_I386
1250 "before eflags optimization and "
f193c797 1251#endif
e01a1157 1252 "after liveness analysis" },
f193c797
FB
1253 { CPU_LOG_INT, "int",
1254 "show interrupts/exceptions in short format" },
1255 { CPU_LOG_EXEC, "exec",
1256 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1257 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1258 "show CPU state before block translation" },
f193c797
FB
1259#ifdef TARGET_I386
1260 { CPU_LOG_PCALL, "pcall",
1261 "show protected mode far calls/returns/exceptions" },
1262#endif
8e3a9fd2 1263#ifdef DEBUG_IOPORT
fd872598
FB
1264 { CPU_LOG_IOPORT, "ioport",
1265 "show all i/o ports accesses" },
8e3a9fd2 1266#endif
f193c797
FB
1267 { 0, NULL, NULL },
1268};
1269
1270static int cmp1(const char *s1, int n, const char *s2)
1271{
1272 if (strlen(s2) != n)
1273 return 0;
1274 return memcmp(s1, s2, n) == 0;
1275}
3b46e624 1276
f193c797
FB
1277/* takes a comma separated list of log masks. Return 0 if error. */
1278int cpu_str_to_log_mask(const char *str)
1279{
1280 CPULogItem *item;
1281 int mask;
1282 const char *p, *p1;
1283
1284 p = str;
1285 mask = 0;
1286 for(;;) {
1287 p1 = strchr(p, ',');
1288 if (!p1)
1289 p1 = p + strlen(p);
8e3a9fd2
FB
1290 if(cmp1(p,p1-p,"all")) {
1291 for(item = cpu_log_items; item->mask != 0; item++) {
1292 mask |= item->mask;
1293 }
1294 } else {
f193c797
FB
1295 for(item = cpu_log_items; item->mask != 0; item++) {
1296 if (cmp1(p, p1 - p, item->name))
1297 goto found;
1298 }
1299 return 0;
8e3a9fd2 1300 }
f193c797
FB
1301 found:
1302 mask |= item->mask;
1303 if (*p1 != ',')
1304 break;
1305 p = p1 + 1;
1306 }
1307 return mask;
1308}
ea041c0e 1309
7501267e
FB
1310void cpu_abort(CPUState *env, const char *fmt, ...)
1311{
1312 va_list ap;
493ae1f0 1313 va_list ap2;
7501267e
FB
1314
1315 va_start(ap, fmt);
493ae1f0 1316 va_copy(ap2, ap);
7501267e
FB
1317 fprintf(stderr, "qemu: fatal: ");
1318 vfprintf(stderr, fmt, ap);
1319 fprintf(stderr, "\n");
1320#ifdef TARGET_I386
0573fbfc
TS
1321 if(env->intercept & INTERCEPT_SVM_MASK) {
1322 /* most probably the virtual machine should not
1323 be shut down but rather caught by the VMM */
1324 vmexit(SVM_EXIT_SHUTDOWN, 0);
1325 }
7fe48483
FB
1326 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1327#else
1328 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1329#endif
924edcae 1330 if (logfile) {
f9373291 1331 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1332 vfprintf(logfile, fmt, ap2);
f9373291
JM
1333 fprintf(logfile, "\n");
1334#ifdef TARGET_I386
1335 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1336#else
1337 cpu_dump_state(env, logfile, fprintf, 0);
1338#endif
924edcae
AZ
1339 fflush(logfile);
1340 fclose(logfile);
1341 }
493ae1f0 1342 va_end(ap2);
f9373291 1343 va_end(ap);
7501267e
FB
1344 abort();
1345}
1346
c5be9f08
TS
1347CPUState *cpu_copy(CPUState *env)
1348{
01ba9816 1349 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1350 /* preserve chaining and index */
1351 CPUState *next_cpu = new_env->next_cpu;
1352 int cpu_index = new_env->cpu_index;
1353 memcpy(new_env, env, sizeof(CPUState));
1354 new_env->next_cpu = next_cpu;
1355 new_env->cpu_index = cpu_index;
1356 return new_env;
1357}
1358
0124311e
FB
1359#if !defined(CONFIG_USER_ONLY)
1360
ee8b7021
FB
1361/* NOTE: if flush_global is true, also flush global entries (not
1362 implemented yet) */
1363void tlb_flush(CPUState *env, int flush_global)
33417e70 1364{
33417e70 1365 int i;
0124311e 1366
9fa3e853
FB
1367#if defined(DEBUG_TLB)
1368 printf("tlb_flush:\n");
1369#endif
0124311e
FB
1370 /* must reset current TB so that interrupts cannot modify the
1371 links while we are modifying them */
1372 env->current_tb = NULL;
1373
33417e70 1374 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1375 env->tlb_table[0][i].addr_read = -1;
1376 env->tlb_table[0][i].addr_write = -1;
1377 env->tlb_table[0][i].addr_code = -1;
1378 env->tlb_table[1][i].addr_read = -1;
1379 env->tlb_table[1][i].addr_write = -1;
1380 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1381#if (NB_MMU_MODES >= 3)
1382 env->tlb_table[2][i].addr_read = -1;
1383 env->tlb_table[2][i].addr_write = -1;
1384 env->tlb_table[2][i].addr_code = -1;
1385#if (NB_MMU_MODES == 4)
1386 env->tlb_table[3][i].addr_read = -1;
1387 env->tlb_table[3][i].addr_write = -1;
1388 env->tlb_table[3][i].addr_code = -1;
1389#endif
1390#endif
33417e70 1391 }
9fa3e853 1392
8a40a180 1393 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1394
1395#if !defined(CONFIG_SOFTMMU)
1396 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1397#endif
1398#ifdef USE_KQEMU
1399 if (env->kqemu_enabled) {
1400 kqemu_flush(env, flush_global);
1401 }
9fa3e853 1402#endif
e3db7226 1403 tlb_flush_count++;
33417e70
FB
1404}
1405
274da6b2 1406static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1407{
5fafdf24 1408 if (addr == (tlb_entry->addr_read &
84b7b8e7 1409 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1410 addr == (tlb_entry->addr_write &
84b7b8e7 1411 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1412 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1413 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1414 tlb_entry->addr_read = -1;
1415 tlb_entry->addr_write = -1;
1416 tlb_entry->addr_code = -1;
1417 }
61382a50
FB
1418}
1419
2e12669a 1420void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1421{
8a40a180 1422 int i;
9fa3e853 1423 TranslationBlock *tb;
0124311e 1424
9fa3e853 1425#if defined(DEBUG_TLB)
108c49b8 1426 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1427#endif
0124311e
FB
1428 /* must reset current TB so that interrupts cannot modify the
1429 links while we are modifying them */
1430 env->current_tb = NULL;
61382a50
FB
1431
1432 addr &= TARGET_PAGE_MASK;
1433 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1434 tlb_flush_entry(&env->tlb_table[0][i], addr);
1435 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1436#if (NB_MMU_MODES >= 3)
1437 tlb_flush_entry(&env->tlb_table[2][i], addr);
1438#if (NB_MMU_MODES == 4)
1439 tlb_flush_entry(&env->tlb_table[3][i], addr);
1440#endif
1441#endif
0124311e 1442
b362e5e0
PB
1443 /* Discard jump cache entries for any tb which might potentially
1444 overlap the flushed page. */
1445 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1446 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1447
1448 i = tb_jmp_cache_hash_page(addr);
1449 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1450
0124311e 1451#if !defined(CONFIG_SOFTMMU)
9fa3e853 1452 if (addr < MMAP_AREA_END)
0124311e 1453 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1454#endif
0a962c02
FB
1455#ifdef USE_KQEMU
1456 if (env->kqemu_enabled) {
1457 kqemu_flush_page(env, addr);
1458 }
1459#endif
9fa3e853
FB
1460}
1461
9fa3e853
FB
1462/* update the TLBs so that writes to code in the virtual page 'addr'
1463 can be detected */
6a00d601 1464static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1465{
5fafdf24 1466 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1467 ram_addr + TARGET_PAGE_SIZE,
1468 CODE_DIRTY_FLAG);
9fa3e853
FB
1469}
1470
9fa3e853 1471/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1472 tested for self modifying code */
5fafdf24 1473static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1474 target_ulong vaddr)
9fa3e853 1475{
3a7d929e 1476 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1477}
1478
5fafdf24 1479static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1480 unsigned long start, unsigned long length)
1481{
1482 unsigned long addr;
84b7b8e7
FB
1483 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1484 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1485 if ((addr - start) < length) {
84b7b8e7 1486 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1487 }
1488 }
1489}
1490
3a7d929e 1491void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1492 int dirty_flags)
1ccde1cb
FB
1493{
1494 CPUState *env;
4f2ac237 1495 unsigned long length, start1;
0a962c02
FB
1496 int i, mask, len;
1497 uint8_t *p;
1ccde1cb
FB
1498
1499 start &= TARGET_PAGE_MASK;
1500 end = TARGET_PAGE_ALIGN(end);
1501
1502 length = end - start;
1503 if (length == 0)
1504 return;
0a962c02 1505 len = length >> TARGET_PAGE_BITS;
3a7d929e 1506#ifdef USE_KQEMU
6a00d601
FB
1507 /* XXX: should not depend on cpu context */
1508 env = first_cpu;
3a7d929e 1509 if (env->kqemu_enabled) {
f23db169
FB
1510 ram_addr_t addr;
1511 addr = start;
1512 for(i = 0; i < len; i++) {
1513 kqemu_set_notdirty(env, addr);
1514 addr += TARGET_PAGE_SIZE;
1515 }
3a7d929e
FB
1516 }
1517#endif
f23db169
FB
1518 mask = ~dirty_flags;
1519 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1520 for(i = 0; i < len; i++)
1521 p[i] &= mask;
1522
1ccde1cb
FB
1523 /* we modify the TLB cache so that the dirty bit will be set again
1524 when accessing the range */
59817ccb 1525 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1526 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1527 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1528 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1529 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1530 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1531#if (NB_MMU_MODES >= 3)
1532 for(i = 0; i < CPU_TLB_SIZE; i++)
1533 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1534#if (NB_MMU_MODES == 4)
1535 for(i = 0; i < CPU_TLB_SIZE; i++)
1536 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1537#endif
1538#endif
6a00d601 1539 }
59817ccb
FB
1540
1541#if !defined(CONFIG_SOFTMMU)
1542 /* XXX: this is expensive */
1543 {
1544 VirtPageDesc *p;
1545 int j;
1546 target_ulong addr;
1547
1548 for(i = 0; i < L1_SIZE; i++) {
1549 p = l1_virt_map[i];
1550 if (p) {
1551 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1552 for(j = 0; j < L2_SIZE; j++) {
1553 if (p->valid_tag == virt_valid_tag &&
1554 p->phys_addr >= start && p->phys_addr < end &&
1555 (p->prot & PROT_WRITE)) {
1556 if (addr < MMAP_AREA_END) {
5fafdf24 1557 mprotect((void *)addr, TARGET_PAGE_SIZE,
59817ccb
FB
1558 p->prot & ~PROT_WRITE);
1559 }
1560 }
1561 addr += TARGET_PAGE_SIZE;
1562 p++;
1563 }
1564 }
1565 }
1566 }
1567#endif
1ccde1cb
FB
1568}
1569
3a7d929e
FB
1570static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1571{
1572 ram_addr_t ram_addr;
1573
84b7b8e7 1574 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1575 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1576 tlb_entry->addend - (unsigned long)phys_ram_base;
1577 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1578 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1579 }
1580 }
1581}
1582
1583/* update the TLB according to the current state of the dirty bits */
1584void cpu_tlb_update_dirty(CPUState *env)
1585{
1586 int i;
1587 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1588 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1589 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1590 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1591#if (NB_MMU_MODES >= 3)
1592 for(i = 0; i < CPU_TLB_SIZE; i++)
1593 tlb_update_dirty(&env->tlb_table[2][i]);
1594#if (NB_MMU_MODES == 4)
1595 for(i = 0; i < CPU_TLB_SIZE; i++)
1596 tlb_update_dirty(&env->tlb_table[3][i]);
1597#endif
1598#endif
3a7d929e
FB
1599}
1600
5fafdf24 1601static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1602 unsigned long start)
1ccde1cb
FB
1603{
1604 unsigned long addr;
84b7b8e7
FB
1605 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1606 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1607 if (addr == start) {
84b7b8e7 1608 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1609 }
1610 }
1611}
1612
1613/* update the TLB corresponding to virtual page vaddr and phys addr
1614 addr so that it is no longer dirty */
6a00d601
FB
1615static inline void tlb_set_dirty(CPUState *env,
1616 unsigned long addr, target_ulong vaddr)
1ccde1cb 1617{
1ccde1cb
FB
1618 int i;
1619
1ccde1cb
FB
1620 addr &= TARGET_PAGE_MASK;
1621 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1622 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1623 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1624#if (NB_MMU_MODES >= 3)
1625 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1626#if (NB_MMU_MODES == 4)
1627 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1628#endif
1629#endif
9fa3e853
FB
1630}
1631
59817ccb
FB
1632/* add a new TLB entry. At most one entry for a given virtual address
1633 is permitted. Return 0 if OK or 2 if the page could not be mapped
1634 (can only happen in non SOFTMMU mode for I/O pages or pages
1635 conflicting with the host address space). */
5fafdf24
TS
1636int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1637 target_phys_addr_t paddr, int prot,
6ebbf390 1638 int mmu_idx, int is_softmmu)
9fa3e853 1639{
92e873b9 1640 PhysPageDesc *p;
4f2ac237 1641 unsigned long pd;
9fa3e853 1642 unsigned int index;
4f2ac237 1643 target_ulong address;
108c49b8 1644 target_phys_addr_t addend;
9fa3e853 1645 int ret;
84b7b8e7 1646 CPUTLBEntry *te;
6658ffb8 1647 int i;
9fa3e853 1648
92e873b9 1649 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1650 if (!p) {
1651 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1652 } else {
1653 pd = p->phys_offset;
9fa3e853
FB
1654 }
1655#if defined(DEBUG_TLB)
6ebbf390
JM
1656 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1657 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1658#endif
1659
1660 ret = 0;
1661#if !defined(CONFIG_SOFTMMU)
5fafdf24 1662 if (is_softmmu)
9fa3e853
FB
1663#endif
1664 {
2a4188a3 1665 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1666 /* IO memory case */
1667 address = vaddr | pd;
1668 addend = paddr;
1669 } else {
1670 /* standard memory */
1671 address = vaddr;
1672 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1673 }
6658ffb8
PB
1674
1675 /* Make accesses to pages with watchpoints go via the
1676 watchpoint trap routines. */
1677 for (i = 0; i < env->nb_watchpoints; i++) {
1678 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1679 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1680 env->watchpoint[i].addend = 0;
6658ffb8
PB
1681 address = vaddr | io_mem_watch;
1682 } else {
d79acba4
AZ
1683 env->watchpoint[i].addend = pd - paddr +
1684 (unsigned long) phys_ram_base;
6658ffb8
PB
1685 /* TODO: Figure out how to make read watchpoints coexist
1686 with code. */
1687 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1688 }
1689 }
1690 }
d79acba4 1691
90f18422 1692 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1693 addend -= vaddr;
6ebbf390 1694 te = &env->tlb_table[mmu_idx][index];
84b7b8e7 1695 te->addend = addend;
67b915a5 1696 if (prot & PAGE_READ) {
84b7b8e7
FB
1697 te->addr_read = address;
1698 } else {
1699 te->addr_read = -1;
1700 }
1701 if (prot & PAGE_EXEC) {
1702 te->addr_code = address;
9fa3e853 1703 } else {
84b7b8e7 1704 te->addr_code = -1;
9fa3e853 1705 }
67b915a5 1706 if (prot & PAGE_WRITE) {
5fafdf24 1707 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
856074ec
FB
1708 (pd & IO_MEM_ROMD)) {
1709 /* write access calls the I/O callback */
5fafdf24 1710 te->addr_write = vaddr |
856074ec 1711 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
5fafdf24 1712 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1713 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1714 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1715 } else {
84b7b8e7 1716 te->addr_write = address;
9fa3e853
FB
1717 }
1718 } else {
84b7b8e7 1719 te->addr_write = -1;
9fa3e853
FB
1720 }
1721 }
1722#if !defined(CONFIG_SOFTMMU)
1723 else {
1724 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1725 /* IO access: no mapping is done as it will be handled by the
1726 soft MMU */
1727 if (!(env->hflags & HF_SOFTMMU_MASK))
1728 ret = 2;
1729 } else {
1730 void *map_addr;
59817ccb
FB
1731
1732 if (vaddr >= MMAP_AREA_END) {
1733 ret = 2;
1734 } else {
1735 if (prot & PROT_WRITE) {
5fafdf24 1736 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1737#if defined(TARGET_HAS_SMC) || 1
59817ccb 1738 first_tb ||
d720b93d 1739#endif
5fafdf24 1740 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
59817ccb
FB
1741 !cpu_physical_memory_is_dirty(pd))) {
1742 /* ROM: we do as if code was inside */
1743 /* if code is present, we only map as read only and save the
1744 original mapping */
1745 VirtPageDesc *vp;
3b46e624 1746
90f18422 1747 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1748 vp->phys_addr = pd;
1749 vp->prot = prot;
1750 vp->valid_tag = virt_valid_tag;
1751 prot &= ~PAGE_WRITE;
1752 }
1753 }
5fafdf24 1754 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
59817ccb
FB
1755 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1756 if (map_addr == MAP_FAILED) {
1757 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1758 paddr, vaddr);
9fa3e853 1759 }
9fa3e853
FB
1760 }
1761 }
1762 }
1763#endif
1764 return ret;
1765}
1766
1767/* called from signal handler: invalidate the code and unprotect the
1768 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1769int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1770{
1771#if !defined(CONFIG_SOFTMMU)
1772 VirtPageDesc *vp;
1773
1774#if defined(DEBUG_TLB)
1775 printf("page_unprotect: addr=0x%08x\n", addr);
1776#endif
1777 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1778
1779 /* if it is not mapped, no need to worry here */
1780 if (addr >= MMAP_AREA_END)
1781 return 0;
9fa3e853
FB
1782 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1783 if (!vp)
1784 return 0;
1785 /* NOTE: in this case, validate_tag is _not_ tested as it
1786 validates only the code TLB */
1787 if (vp->valid_tag != virt_valid_tag)
1788 return 0;
1789 if (!(vp->prot & PAGE_WRITE))
1790 return 0;
1791#if defined(DEBUG_TLB)
5fafdf24 1792 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
9fa3e853
FB
1793 addr, vp->phys_addr, vp->prot);
1794#endif
59817ccb
FB
1795 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1796 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1797 (unsigned long)addr, vp->prot);
d720b93d 1798 /* set the dirty bit */
0a962c02 1799 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1800 /* flush the code inside */
1801 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1802 return 1;
1803#else
1804 return 0;
1805#endif
33417e70
FB
1806}
1807
0124311e
FB
1808#else
1809
ee8b7021 1810void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1811{
1812}
1813
2e12669a 1814void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1815{
1816}
1817
5fafdf24
TS
1818int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1819 target_phys_addr_t paddr, int prot,
6ebbf390 1820 int mmu_idx, int is_softmmu)
9fa3e853
FB
1821{
1822 return 0;
1823}
0124311e 1824
9fa3e853
FB
1825/* dump memory mappings */
1826void page_dump(FILE *f)
33417e70 1827{
9fa3e853
FB
1828 unsigned long start, end;
1829 int i, j, prot, prot1;
1830 PageDesc *p;
33417e70 1831
9fa3e853
FB
1832 fprintf(f, "%-8s %-8s %-8s %s\n",
1833 "start", "end", "size", "prot");
1834 start = -1;
1835 end = -1;
1836 prot = 0;
1837 for(i = 0; i <= L1_SIZE; i++) {
1838 if (i < L1_SIZE)
1839 p = l1_map[i];
1840 else
1841 p = NULL;
1842 for(j = 0;j < L2_SIZE; j++) {
1843 if (!p)
1844 prot1 = 0;
1845 else
1846 prot1 = p[j].flags;
1847 if (prot1 != prot) {
1848 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1849 if (start != -1) {
1850 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1851 start, end, end - start,
9fa3e853
FB
1852 prot & PAGE_READ ? 'r' : '-',
1853 prot & PAGE_WRITE ? 'w' : '-',
1854 prot & PAGE_EXEC ? 'x' : '-');
1855 }
1856 if (prot1 != 0)
1857 start = end;
1858 else
1859 start = -1;
1860 prot = prot1;
1861 }
1862 if (!p)
1863 break;
1864 }
33417e70 1865 }
33417e70
FB
1866}
1867
53a5960a 1868int page_get_flags(target_ulong address)
33417e70 1869{
9fa3e853
FB
1870 PageDesc *p;
1871
1872 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1873 if (!p)
9fa3e853
FB
1874 return 0;
1875 return p->flags;
1876}
1877
1878/* modify the flags of a page and invalidate the code if
1879 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1880 depending on PAGE_WRITE */
53a5960a 1881void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1882{
1883 PageDesc *p;
53a5960a 1884 target_ulong addr;
9fa3e853
FB
1885
1886 start = start & TARGET_PAGE_MASK;
1887 end = TARGET_PAGE_ALIGN(end);
1888 if (flags & PAGE_WRITE)
1889 flags |= PAGE_WRITE_ORG;
1890 spin_lock(&tb_lock);
1891 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1892 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1893 /* if the write protection is set, then we invalidate the code
1894 inside */
5fafdf24 1895 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
1896 (flags & PAGE_WRITE) &&
1897 p->first_tb) {
d720b93d 1898 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1899 }
1900 p->flags = flags;
1901 }
1902 spin_unlock(&tb_lock);
33417e70
FB
1903}
1904
3d97b40b
TS
1905int page_check_range(target_ulong start, target_ulong len, int flags)
1906{
1907 PageDesc *p;
1908 target_ulong end;
1909 target_ulong addr;
1910
1911 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1912 start = start & TARGET_PAGE_MASK;
1913
1914 if( end < start )
1915 /* we've wrapped around */
1916 return -1;
1917 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1918 p = page_find(addr >> TARGET_PAGE_BITS);
1919 if( !p )
1920 return -1;
1921 if( !(p->flags & PAGE_VALID) )
1922 return -1;
1923
dae3270c 1924 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 1925 return -1;
dae3270c
FB
1926 if (flags & PAGE_WRITE) {
1927 if (!(p->flags & PAGE_WRITE_ORG))
1928 return -1;
1929 /* unprotect the page if it was put read-only because it
1930 contains translated code */
1931 if (!(p->flags & PAGE_WRITE)) {
1932 if (!page_unprotect(addr, 0, NULL))
1933 return -1;
1934 }
1935 return 0;
1936 }
3d97b40b
TS
1937 }
1938 return 0;
1939}
1940
9fa3e853
FB
1941/* called from signal handler: invalidate the code and unprotect the
1942 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1943int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1944{
1945 unsigned int page_index, prot, pindex;
1946 PageDesc *p, *p1;
53a5960a 1947 target_ulong host_start, host_end, addr;
9fa3e853 1948
83fb7adf 1949 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1950 page_index = host_start >> TARGET_PAGE_BITS;
1951 p1 = page_find(page_index);
1952 if (!p1)
1953 return 0;
83fb7adf 1954 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1955 p = p1;
1956 prot = 0;
1957 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1958 prot |= p->flags;
1959 p++;
1960 }
1961 /* if the page was really writable, then we change its
1962 protection back to writable */
1963 if (prot & PAGE_WRITE_ORG) {
1964 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1965 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 1966 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1967 (prot & PAGE_BITS) | PAGE_WRITE);
1968 p1[pindex].flags |= PAGE_WRITE;
1969 /* and since the content will be modified, we must invalidate
1970 the corresponding translated code. */
d720b93d 1971 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1972#ifdef DEBUG_TB_CHECK
1973 tb_invalidate_check(address);
1974#endif
1975 return 1;
1976 }
1977 }
1978 return 0;
1979}
1980
6a00d601
FB
1981static inline void tlb_set_dirty(CPUState *env,
1982 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1983{
1984}
9fa3e853
FB
1985#endif /* defined(CONFIG_USER_ONLY) */
1986
db7b5426
BS
1987static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1988 int memory);
03875444 1989static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
db7b5426
BS
1990 int orig_memory);
1991#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1992 need_subpage) \
1993 do { \
1994 if (addr > start_addr) \
1995 start_addr2 = 0; \
1996 else { \
1997 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1998 if (start_addr2 > 0) \
1999 need_subpage = 1; \
2000 } \
2001 \
49e9fba2 2002 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2003 end_addr2 = TARGET_PAGE_SIZE - 1; \
2004 else { \
2005 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2006 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2007 need_subpage = 1; \
2008 } \
2009 } while (0)
2010
33417e70
FB
2011/* register physical memory. 'size' must be a multiple of the target
2012 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2013 io memory page */
5fafdf24 2014void cpu_register_physical_memory(target_phys_addr_t start_addr,
03875444
AJ
2015 unsigned long size,
2016 unsigned long phys_offset)
33417e70 2017{
108c49b8 2018 target_phys_addr_t addr, end_addr;
92e873b9 2019 PhysPageDesc *p;
9d42037b 2020 CPUState *env;
03875444 2021 unsigned long orig_size = size;
db7b5426 2022 void *subpage;
33417e70 2023
5fd386f6 2024 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2025 end_addr = start_addr + (target_phys_addr_t)size;
2026 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2027 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2028 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
03875444 2029 unsigned long orig_memory = p->phys_offset;
db7b5426
BS
2030 target_phys_addr_t start_addr2, end_addr2;
2031 int need_subpage = 0;
2032
2033 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2034 need_subpage);
4254fab8 2035 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2036 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2037 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2038 &p->phys_offset, orig_memory);
2039 } else {
2040 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2041 >> IO_MEM_SHIFT];
2042 }
2043 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2044 } else {
2045 p->phys_offset = phys_offset;
2046 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2047 (phys_offset & IO_MEM_ROMD))
2048 phys_offset += TARGET_PAGE_SIZE;
2049 }
2050 } else {
2051 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2052 p->phys_offset = phys_offset;
2053 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2054 (phys_offset & IO_MEM_ROMD))
2055 phys_offset += TARGET_PAGE_SIZE;
2056 else {
2057 target_phys_addr_t start_addr2, end_addr2;
2058 int need_subpage = 0;
2059
2060 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2061 end_addr2, need_subpage);
2062
4254fab8 2063 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2064 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2065 &p->phys_offset, IO_MEM_UNASSIGNED);
2066 subpage_register(subpage, start_addr2, end_addr2,
2067 phys_offset);
2068 }
2069 }
2070 }
33417e70 2071 }
3b46e624 2072
9d42037b
FB
2073 /* since each CPU stores ram addresses in its TLB cache, we must
2074 reset the modified entries */
2075 /* XXX: slow ! */
2076 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2077 tlb_flush(env, 1);
2078 }
33417e70
FB
2079}
2080
ba863458 2081/* XXX: temporary until new memory mapping API */
03875444 2082uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2083{
2084 PhysPageDesc *p;
2085
2086 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2087 if (!p)
2088 return IO_MEM_UNASSIGNED;
2089 return p->phys_offset;
2090}
2091
e9a1ab19 2092/* XXX: better than nothing */
03875444 2093ram_addr_t qemu_ram_alloc(unsigned int size)
e9a1ab19
FB
2094{
2095 ram_addr_t addr;
7fb4fdcf 2096 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
03875444
AJ
2097 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2098 size, phys_ram_size);
e9a1ab19
FB
2099 abort();
2100 }
2101 addr = phys_ram_alloc_offset;
2102 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2103 return addr;
2104}
2105
2106void qemu_ram_free(ram_addr_t addr)
2107{
2108}
2109
a4193c8a 2110static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2111{
67d3b957 2112#ifdef DEBUG_UNASSIGNED
ab3d1727 2113 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2114#endif
2115#ifdef TARGET_SPARC
6c36d3fa 2116 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2117#elif TARGET_CRIS
2118 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2119#endif
33417e70
FB
2120 return 0;
2121}
2122
a4193c8a 2123static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2124{
67d3b957 2125#ifdef DEBUG_UNASSIGNED
ab3d1727 2126 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2127#endif
b4f0a316 2128#ifdef TARGET_SPARC
6c36d3fa 2129 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2130#elif TARGET_CRIS
2131 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2132#endif
33417e70
FB
2133}
2134
2135static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2136 unassigned_mem_readb,
2137 unassigned_mem_readb,
2138 unassigned_mem_readb,
2139};
2140
2141static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2142 unassigned_mem_writeb,
2143 unassigned_mem_writeb,
2144 unassigned_mem_writeb,
2145};
2146
3a7d929e 2147static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2148{
3a7d929e
FB
2149 unsigned long ram_addr;
2150 int dirty_flags;
2151 ram_addr = addr - (unsigned long)phys_ram_base;
2152 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2153 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2154#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2155 tb_invalidate_phys_page_fast(ram_addr, 1);
2156 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2157#endif
3a7d929e 2158 }
c27004ec 2159 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2160#ifdef USE_KQEMU
2161 if (cpu_single_env->kqemu_enabled &&
2162 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2163 kqemu_modify_page(cpu_single_env, ram_addr);
2164#endif
f23db169
FB
2165 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2166 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2167 /* we remove the notdirty callback only if the code has been
2168 flushed */
2169 if (dirty_flags == 0xff)
6a00d601 2170 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2171}
2172
3a7d929e 2173static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2174{
3a7d929e
FB
2175 unsigned long ram_addr;
2176 int dirty_flags;
2177 ram_addr = addr - (unsigned long)phys_ram_base;
2178 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2179 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2180#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2181 tb_invalidate_phys_page_fast(ram_addr, 2);
2182 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2183#endif
3a7d929e 2184 }
c27004ec 2185 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2186#ifdef USE_KQEMU
2187 if (cpu_single_env->kqemu_enabled &&
2188 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2189 kqemu_modify_page(cpu_single_env, ram_addr);
2190#endif
f23db169
FB
2191 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2192 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2193 /* we remove the notdirty callback only if the code has been
2194 flushed */
2195 if (dirty_flags == 0xff)
6a00d601 2196 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2197}
2198
3a7d929e 2199static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2200{
3a7d929e
FB
2201 unsigned long ram_addr;
2202 int dirty_flags;
2203 ram_addr = addr - (unsigned long)phys_ram_base;
2204 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2205 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2206#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2207 tb_invalidate_phys_page_fast(ram_addr, 4);
2208 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2209#endif
3a7d929e 2210 }
c27004ec 2211 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2212#ifdef USE_KQEMU
2213 if (cpu_single_env->kqemu_enabled &&
2214 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2215 kqemu_modify_page(cpu_single_env, ram_addr);
2216#endif
f23db169
FB
2217 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2218 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2219 /* we remove the notdirty callback only if the code has been
2220 flushed */
2221 if (dirty_flags == 0xff)
6a00d601 2222 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2223}
2224
3a7d929e 2225static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2226 NULL, /* never used */
2227 NULL, /* never used */
2228 NULL, /* never used */
2229};
2230
1ccde1cb
FB
2231static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2232 notdirty_mem_writeb,
2233 notdirty_mem_writew,
2234 notdirty_mem_writel,
2235};
2236
6658ffb8
PB
2237#if defined(CONFIG_SOFTMMU)
2238/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2239 so these check for a hit then pass through to the normal out-of-line
2240 phys routines. */
2241static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2242{
2243 return ldub_phys(addr);
2244}
2245
2246static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2247{
2248 return lduw_phys(addr);
2249}
2250
2251static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2252{
2253 return ldl_phys(addr);
2254}
2255
2256/* Generate a debug exception if a watchpoint has been hit.
2257 Returns the real physical address of the access. addr will be a host
d79acba4 2258 address in case of a RAM location. */
6658ffb8
PB
2259static target_ulong check_watchpoint(target_phys_addr_t addr)
2260{
2261 CPUState *env = cpu_single_env;
2262 target_ulong watch;
2263 target_ulong retaddr;
2264 int i;
2265
2266 retaddr = addr;
2267 for (i = 0; i < env->nb_watchpoints; i++) {
2268 watch = env->watchpoint[i].vaddr;
2269 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2270 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2271 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2272 cpu_single_env->watchpoint_hit = i + 1;
2273 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2274 break;
2275 }
2276 }
2277 }
2278 return retaddr;
2279}
2280
2281static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2282 uint32_t val)
2283{
2284 addr = check_watchpoint(addr);
2285 stb_phys(addr, val);
2286}
2287
2288static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2289 uint32_t val)
2290{
2291 addr = check_watchpoint(addr);
2292 stw_phys(addr, val);
2293}
2294
2295static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2296 uint32_t val)
2297{
2298 addr = check_watchpoint(addr);
2299 stl_phys(addr, val);
2300}
2301
2302static CPUReadMemoryFunc *watch_mem_read[3] = {
2303 watch_mem_readb,
2304 watch_mem_readw,
2305 watch_mem_readl,
2306};
2307
2308static CPUWriteMemoryFunc *watch_mem_write[3] = {
2309 watch_mem_writeb,
2310 watch_mem_writew,
2311 watch_mem_writel,
2312};
2313#endif
2314
db7b5426
BS
2315static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2316 unsigned int len)
2317{
db7b5426
BS
2318 uint32_t ret;
2319 unsigned int idx;
2320
2321 idx = SUBPAGE_IDX(addr - mmio->base);
2322#if defined(DEBUG_SUBPAGE)
2323 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2324 mmio, len, addr, idx);
2325#endif
3ee89922 2326 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2327
2328 return ret;
2329}
2330
2331static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2332 uint32_t value, unsigned int len)
2333{
db7b5426
BS
2334 unsigned int idx;
2335
2336 idx = SUBPAGE_IDX(addr - mmio->base);
2337#if defined(DEBUG_SUBPAGE)
2338 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2339 mmio, len, addr, idx, value);
2340#endif
3ee89922 2341 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2342}
2343
2344static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2345{
2346#if defined(DEBUG_SUBPAGE)
2347 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2348#endif
2349
2350 return subpage_readlen(opaque, addr, 0);
2351}
2352
2353static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2354 uint32_t value)
2355{
2356#if defined(DEBUG_SUBPAGE)
2357 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2358#endif
2359 subpage_writelen(opaque, addr, value, 0);
2360}
2361
2362static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2363{
2364#if defined(DEBUG_SUBPAGE)
2365 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2366#endif
2367
2368 return subpage_readlen(opaque, addr, 1);
2369}
2370
2371static void subpage_writew (void *opaque, target_phys_addr_t addr,
2372 uint32_t value)
2373{
2374#if defined(DEBUG_SUBPAGE)
2375 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2376#endif
2377 subpage_writelen(opaque, addr, value, 1);
2378}
2379
2380static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2381{
2382#if defined(DEBUG_SUBPAGE)
2383 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2384#endif
2385
2386 return subpage_readlen(opaque, addr, 2);
2387}
2388
2389static void subpage_writel (void *opaque,
2390 target_phys_addr_t addr, uint32_t value)
2391{
2392#if defined(DEBUG_SUBPAGE)
2393 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2394#endif
2395 subpage_writelen(opaque, addr, value, 2);
2396}
2397
2398static CPUReadMemoryFunc *subpage_read[] = {
2399 &subpage_readb,
2400 &subpage_readw,
2401 &subpage_readl,
2402};
2403
2404static CPUWriteMemoryFunc *subpage_write[] = {
2405 &subpage_writeb,
2406 &subpage_writew,
2407 &subpage_writel,
2408};
2409
2410static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2411 int memory)
2412{
2413 int idx, eidx;
4254fab8 2414 unsigned int i;
db7b5426
BS
2415
2416 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2417 return -1;
2418 idx = SUBPAGE_IDX(start);
2419 eidx = SUBPAGE_IDX(end);
2420#if defined(DEBUG_SUBPAGE)
2421 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2422 mmio, start, end, idx, eidx, memory);
2423#endif
2424 memory >>= IO_MEM_SHIFT;
2425 for (; idx <= eidx; idx++) {
4254fab8 2426 for (i = 0; i < 4; i++) {
3ee89922
BS
2427 if (io_mem_read[memory][i]) {
2428 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2429 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2430 }
2431 if (io_mem_write[memory][i]) {
2432 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2433 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2434 }
4254fab8 2435 }
db7b5426
BS
2436 }
2437
2438 return 0;
2439}
2440
03875444 2441static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
db7b5426
BS
2442 int orig_memory)
2443{
2444 subpage_t *mmio;
2445 int subpage_memory;
2446
2447 mmio = qemu_mallocz(sizeof(subpage_t));
2448 if (mmio != NULL) {
2449 mmio->base = base;
2450 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2451#if defined(DEBUG_SUBPAGE)
2452 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2453 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2454#endif
2455 *phys = subpage_memory | IO_MEM_SUBPAGE;
2456 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2457 }
2458
2459 return mmio;
2460}
2461
33417e70
FB
2462static void io_mem_init(void)
2463{
3a7d929e 2464 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2465 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2466 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2467 io_mem_nb = 5;
2468
6658ffb8
PB
2469#if defined(CONFIG_SOFTMMU)
2470 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2471 watch_mem_write, NULL);
2472#endif
1ccde1cb 2473 /* alloc dirty bits array */
0a962c02 2474 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2475 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2476}
2477
2478/* mem_read and mem_write are arrays of functions containing the
2479 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2480 2). Functions can be omitted with a NULL function pointer. The
2481 registered functions may be modified dynamically later.
2482 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2483 modified. If it is zero, a new io zone is allocated. The return
2484 value can be used with cpu_register_physical_memory(). (-1) is
2485 returned if error. */
33417e70
FB
2486int cpu_register_io_memory(int io_index,
2487 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2488 CPUWriteMemoryFunc **mem_write,
2489 void *opaque)
33417e70 2490{
4254fab8 2491 int i, subwidth = 0;
33417e70
FB
2492
2493 if (io_index <= 0) {
b5ff1b31 2494 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2495 return -1;
2496 io_index = io_mem_nb++;
2497 } else {
2498 if (io_index >= IO_MEM_NB_ENTRIES)
2499 return -1;
2500 }
b5ff1b31 2501
33417e70 2502 for(i = 0;i < 3; i++) {
4254fab8
BS
2503 if (!mem_read[i] || !mem_write[i])
2504 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2505 io_mem_read[io_index][i] = mem_read[i];
2506 io_mem_write[io_index][i] = mem_write[i];
2507 }
a4193c8a 2508 io_mem_opaque[io_index] = opaque;
4254fab8 2509 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2510}
61382a50 2511
8926b517
FB
2512CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2513{
2514 return io_mem_write[io_index >> IO_MEM_SHIFT];
2515}
2516
2517CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2518{
2519 return io_mem_read[io_index >> IO_MEM_SHIFT];
2520}
2521
13eb76e0
FB
2522/* physical memory access (slow version, mainly for debug) */
2523#if defined(CONFIG_USER_ONLY)
5fafdf24 2524void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2525 int len, int is_write)
2526{
2527 int l, flags;
2528 target_ulong page;
53a5960a 2529 void * p;
13eb76e0
FB
2530
2531 while (len > 0) {
2532 page = addr & TARGET_PAGE_MASK;
2533 l = (page + TARGET_PAGE_SIZE) - addr;
2534 if (l > len)
2535 l = len;
2536 flags = page_get_flags(page);
2537 if (!(flags & PAGE_VALID))
2538 return;
2539 if (is_write) {
2540 if (!(flags & PAGE_WRITE))
2541 return;
579a97f7
FB
2542 /* XXX: this code should not depend on lock_user */
2543 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2544 /* FIXME - should this return an error rather than just fail? */
2545 return;
53a5960a
PB
2546 memcpy(p, buf, len);
2547 unlock_user(p, addr, len);
13eb76e0
FB
2548 } else {
2549 if (!(flags & PAGE_READ))
2550 return;
579a97f7
FB
2551 /* XXX: this code should not depend on lock_user */
2552 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2553 /* FIXME - should this return an error rather than just fail? */
2554 return;
53a5960a
PB
2555 memcpy(buf, p, len);
2556 unlock_user(p, addr, 0);
13eb76e0
FB
2557 }
2558 len -= l;
2559 buf += l;
2560 addr += l;
2561 }
2562}
8df1cd07 2563
13eb76e0 2564#else
5fafdf24 2565void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2566 int len, int is_write)
2567{
2568 int l, io_index;
2569 uint8_t *ptr;
2570 uint32_t val;
2e12669a
FB
2571 target_phys_addr_t page;
2572 unsigned long pd;
92e873b9 2573 PhysPageDesc *p;
3b46e624 2574
13eb76e0
FB
2575 while (len > 0) {
2576 page = addr & TARGET_PAGE_MASK;
2577 l = (page + TARGET_PAGE_SIZE) - addr;
2578 if (l > len)
2579 l = len;
92e873b9 2580 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2581 if (!p) {
2582 pd = IO_MEM_UNASSIGNED;
2583 } else {
2584 pd = p->phys_offset;
2585 }
3b46e624 2586
13eb76e0 2587 if (is_write) {
3a7d929e 2588 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2589 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2590 /* XXX: could force cpu_single_env to NULL to avoid
2591 potential bugs */
13eb76e0 2592 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2593 /* 32 bit write access */
c27004ec 2594 val = ldl_p(buf);
a4193c8a 2595 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2596 l = 4;
2597 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2598 /* 16 bit write access */
c27004ec 2599 val = lduw_p(buf);
a4193c8a 2600 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2601 l = 2;
2602 } else {
1c213d19 2603 /* 8 bit write access */
c27004ec 2604 val = ldub_p(buf);
a4193c8a 2605 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2606 l = 1;
2607 }
2608 } else {
b448f2f3
FB
2609 unsigned long addr1;
2610 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2611 /* RAM case */
b448f2f3 2612 ptr = phys_ram_base + addr1;
13eb76e0 2613 memcpy(ptr, buf, l);
3a7d929e
FB
2614 if (!cpu_physical_memory_is_dirty(addr1)) {
2615 /* invalidate code */
2616 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2617 /* set dirty bit */
5fafdf24 2618 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2619 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2620 }
13eb76e0
FB
2621 }
2622 } else {
5fafdf24 2623 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2624 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2625 /* I/O case */
2626 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2627 if (l >= 4 && ((addr & 3) == 0)) {
2628 /* 32 bit read access */
a4193c8a 2629 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2630 stl_p(buf, val);
13eb76e0
FB
2631 l = 4;
2632 } else if (l >= 2 && ((addr & 1) == 0)) {
2633 /* 16 bit read access */
a4193c8a 2634 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2635 stw_p(buf, val);
13eb76e0
FB
2636 l = 2;
2637 } else {
1c213d19 2638 /* 8 bit read access */
a4193c8a 2639 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2640 stb_p(buf, val);
13eb76e0
FB
2641 l = 1;
2642 }
2643 } else {
2644 /* RAM case */
5fafdf24 2645 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2646 (addr & ~TARGET_PAGE_MASK);
2647 memcpy(buf, ptr, l);
2648 }
2649 }
2650 len -= l;
2651 buf += l;
2652 addr += l;
2653 }
2654}
8df1cd07 2655
d0ecd2aa 2656/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2657void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2658 const uint8_t *buf, int len)
2659{
2660 int l;
2661 uint8_t *ptr;
2662 target_phys_addr_t page;
2663 unsigned long pd;
2664 PhysPageDesc *p;
3b46e624 2665
d0ecd2aa
FB
2666 while (len > 0) {
2667 page = addr & TARGET_PAGE_MASK;
2668 l = (page + TARGET_PAGE_SIZE) - addr;
2669 if (l > len)
2670 l = len;
2671 p = phys_page_find(page >> TARGET_PAGE_BITS);
2672 if (!p) {
2673 pd = IO_MEM_UNASSIGNED;
2674 } else {
2675 pd = p->phys_offset;
2676 }
3b46e624 2677
d0ecd2aa 2678 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2679 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2680 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2681 /* do nothing */
2682 } else {
2683 unsigned long addr1;
2684 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2685 /* ROM/RAM case */
2686 ptr = phys_ram_base + addr1;
2687 memcpy(ptr, buf, l);
2688 }
2689 len -= l;
2690 buf += l;
2691 addr += l;
2692 }
2693}
2694
2695
8df1cd07
FB
2696/* warning: addr must be aligned */
2697uint32_t ldl_phys(target_phys_addr_t addr)
2698{
2699 int io_index;
2700 uint8_t *ptr;
2701 uint32_t val;
2702 unsigned long pd;
2703 PhysPageDesc *p;
2704
2705 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2706 if (!p) {
2707 pd = IO_MEM_UNASSIGNED;
2708 } else {
2709 pd = p->phys_offset;
2710 }
3b46e624 2711
5fafdf24 2712 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2713 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2714 /* I/O case */
2715 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2716 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2717 } else {
2718 /* RAM case */
5fafdf24 2719 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2720 (addr & ~TARGET_PAGE_MASK);
2721 val = ldl_p(ptr);
2722 }
2723 return val;
2724}
2725
84b7b8e7
FB
2726/* warning: addr must be aligned */
2727uint64_t ldq_phys(target_phys_addr_t addr)
2728{
2729 int io_index;
2730 uint8_t *ptr;
2731 uint64_t val;
2732 unsigned long pd;
2733 PhysPageDesc *p;
2734
2735 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2736 if (!p) {
2737 pd = IO_MEM_UNASSIGNED;
2738 } else {
2739 pd = p->phys_offset;
2740 }
3b46e624 2741
2a4188a3
FB
2742 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2743 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2744 /* I/O case */
2745 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2746#ifdef TARGET_WORDS_BIGENDIAN
2747 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2748 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2749#else
2750 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2751 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2752#endif
2753 } else {
2754 /* RAM case */
5fafdf24 2755 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2756 (addr & ~TARGET_PAGE_MASK);
2757 val = ldq_p(ptr);
2758 }
2759 return val;
2760}
2761
aab33094
FB
2762/* XXX: optimize */
2763uint32_t ldub_phys(target_phys_addr_t addr)
2764{
2765 uint8_t val;
2766 cpu_physical_memory_read(addr, &val, 1);
2767 return val;
2768}
2769
2770/* XXX: optimize */
2771uint32_t lduw_phys(target_phys_addr_t addr)
2772{
2773 uint16_t val;
2774 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2775 return tswap16(val);
2776}
2777
8df1cd07
FB
2778/* warning: addr must be aligned. The ram page is not masked as dirty
2779 and the code inside is not invalidated. It is useful if the dirty
2780 bits are used to track modified PTEs */
2781void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2782{
2783 int io_index;
2784 uint8_t *ptr;
2785 unsigned long pd;
2786 PhysPageDesc *p;
2787
2788 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2789 if (!p) {
2790 pd = IO_MEM_UNASSIGNED;
2791 } else {
2792 pd = p->phys_offset;
2793 }
3b46e624 2794
3a7d929e 2795 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2796 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2797 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2798 } else {
5fafdf24 2799 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2800 (addr & ~TARGET_PAGE_MASK);
2801 stl_p(ptr, val);
2802 }
2803}
2804
bc98a7ef
JM
2805void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2806{
2807 int io_index;
2808 uint8_t *ptr;
2809 unsigned long pd;
2810 PhysPageDesc *p;
2811
2812 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2813 if (!p) {
2814 pd = IO_MEM_UNASSIGNED;
2815 } else {
2816 pd = p->phys_offset;
2817 }
3b46e624 2818
bc98a7ef
JM
2819 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2820 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2821#ifdef TARGET_WORDS_BIGENDIAN
2822 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2823 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2824#else
2825 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2826 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2827#endif
2828 } else {
5fafdf24 2829 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2830 (addr & ~TARGET_PAGE_MASK);
2831 stq_p(ptr, val);
2832 }
2833}
2834
8df1cd07 2835/* warning: addr must be aligned */
8df1cd07
FB
2836void stl_phys(target_phys_addr_t addr, uint32_t val)
2837{
2838 int io_index;
2839 uint8_t *ptr;
2840 unsigned long pd;
2841 PhysPageDesc *p;
2842
2843 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2844 if (!p) {
2845 pd = IO_MEM_UNASSIGNED;
2846 } else {
2847 pd = p->phys_offset;
2848 }
3b46e624 2849
3a7d929e 2850 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2851 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2852 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2853 } else {
2854 unsigned long addr1;
2855 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2856 /* RAM case */
2857 ptr = phys_ram_base + addr1;
2858 stl_p(ptr, val);
3a7d929e
FB
2859 if (!cpu_physical_memory_is_dirty(addr1)) {
2860 /* invalidate code */
2861 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2862 /* set dirty bit */
f23db169
FB
2863 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2864 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2865 }
8df1cd07
FB
2866 }
2867}
2868
aab33094
FB
2869/* XXX: optimize */
2870void stb_phys(target_phys_addr_t addr, uint32_t val)
2871{
2872 uint8_t v = val;
2873 cpu_physical_memory_write(addr, &v, 1);
2874}
2875
2876/* XXX: optimize */
2877void stw_phys(target_phys_addr_t addr, uint32_t val)
2878{
2879 uint16_t v = tswap16(val);
2880 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2881}
2882
2883/* XXX: optimize */
2884void stq_phys(target_phys_addr_t addr, uint64_t val)
2885{
2886 val = tswap64(val);
2887 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2888}
2889
13eb76e0
FB
2890#endif
2891
2892/* virtual memory access for debug */
5fafdf24 2893int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 2894 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2895{
2896 int l;
9b3c35e0
JM
2897 target_phys_addr_t phys_addr;
2898 target_ulong page;
13eb76e0
FB
2899
2900 while (len > 0) {
2901 page = addr & TARGET_PAGE_MASK;
2902 phys_addr = cpu_get_phys_page_debug(env, page);
2903 /* if no physical page mapped, return an error */
2904 if (phys_addr == -1)
2905 return -1;
2906 l = (page + TARGET_PAGE_SIZE) - addr;
2907 if (l > len)
2908 l = len;
5fafdf24 2909 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 2910 buf, l, is_write);
13eb76e0
FB
2911 len -= l;
2912 buf += l;
2913 addr += l;
2914 }
2915 return 0;
2916}
2917
e3db7226
FB
2918void dump_exec_info(FILE *f,
2919 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2920{
2921 int i, target_code_size, max_target_code_size;
2922 int direct_jmp_count, direct_jmp2_count, cross_page;
2923 TranslationBlock *tb;
3b46e624 2924
e3db7226
FB
2925 target_code_size = 0;
2926 max_target_code_size = 0;
2927 cross_page = 0;
2928 direct_jmp_count = 0;
2929 direct_jmp2_count = 0;
2930 for(i = 0; i < nb_tbs; i++) {
2931 tb = &tbs[i];
2932 target_code_size += tb->size;
2933 if (tb->size > max_target_code_size)
2934 max_target_code_size = tb->size;
2935 if (tb->page_addr[1] != -1)
2936 cross_page++;
2937 if (tb->tb_next_offset[0] != 0xffff) {
2938 direct_jmp_count++;
2939 if (tb->tb_next_offset[1] != 0xffff) {
2940 direct_jmp2_count++;
2941 }
2942 }
2943 }
2944 /* XXX: avoid using doubles ? */
57fec1fe 2945 cpu_fprintf(f, "Translation buffer state:\n");
e3db7226 2946 cpu_fprintf(f, "TB count %d\n", nb_tbs);
5fafdf24 2947 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
2948 nb_tbs ? target_code_size / nb_tbs : 0,
2949 max_target_code_size);
5fafdf24 2950 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
2951 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2952 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
2953 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2954 cross_page,
e3db7226
FB
2955 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2956 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 2957 direct_jmp_count,
e3db7226
FB
2958 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2959 direct_jmp2_count,
2960 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 2961 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
2962 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2963 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2964 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
57fec1fe
FB
2965#ifdef CONFIG_PROFILER
2966 {
2967 int64_t tot;
2968 tot = dyngen_interm_time + dyngen_code_time;
2969 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2970 tot, tot / 2.4e9);
2971 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2972 dyngen_tb_count,
2973 dyngen_tb_count1 - dyngen_tb_count,
2974 dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
2975 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2976 dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
2977 cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
2978 dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
2979 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2980 dyngen_tb_count ?
2981 (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
2982 cpu_fprintf(f, "cycles/op %0.1f\n",
2983 dyngen_op_count ? (double)tot / dyngen_op_count : 0);
2984 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2985 dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
2986 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2987 dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
2988 if (tot == 0)
2989 tot = 1;
2990 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2991 (double)dyngen_interm_time / tot * 100.0);
2992 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2993 (double)dyngen_code_time / tot * 100.0);
2994 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2995 dyngen_restore_count);
2996 cpu_fprintf(f, " avg cycles %0.1f\n",
2997 dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
2998 {
2999 extern void dump_op_count(void);
3000 dump_op_count();
3001 }
3002 }
3003#endif
e3db7226
FB
3004}
3005
5fafdf24 3006#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3007
3008#define MMUSUFFIX _cmmu
3009#define GETPC() NULL
3010#define env cpu_single_env
b769d8fe 3011#define SOFTMMU_CODE_ACCESS
61382a50
FB
3012
3013#define SHIFT 0
3014#include "softmmu_template.h"
3015
3016#define SHIFT 1
3017#include "softmmu_template.h"
3018
3019#define SHIFT 2
3020#include "softmmu_template.h"
3021
3022#define SHIFT 3
3023#include "softmmu_template.h"
3024
3025#undef env
3026
3027#endif