]> git.proxmox.com Git - qemu.git/blame - exec.c
fixed global variable handling with qemu load/stores - initial global prologue/epilog...
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
53a5960a
PB
39#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
41#endif
54936004 42
fd6ce8f6 43//#define DEBUG_TB_INVALIDATE
66e85a21 44//#define DEBUG_FLUSH
9fa3e853 45//#define DEBUG_TLB
67d3b957 46//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
47
48/* make various TB consistency checks */
5fafdf24
TS
49//#define DEBUG_TB_CHECK
50//#define DEBUG_TLB_CHECK
fd6ce8f6 51
1196be37 52//#define DEBUG_IOPORT
db7b5426 53//#define DEBUG_SUBPAGE
1196be37 54
99773bd4
PB
55#if !defined(CONFIG_USER_ONLY)
56/* TB consistency checks only implemented for usermode emulation. */
57#undef DEBUG_TB_CHECK
58#endif
59
fd6ce8f6 60/* threshold to flush the translated code buffer */
d07bde88 61#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
fd6ce8f6 62
9fa3e853
FB
63#define SMC_BITMAP_USE_THRESHOLD 10
64
65#define MMAP_AREA_START 0x00000000
66#define MMAP_AREA_END 0xa8000000
fd6ce8f6 67
108c49b8
FB
68#if defined(TARGET_SPARC64)
69#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
70#elif defined(TARGET_SPARC)
71#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
72#elif defined(TARGET_ALPHA)
73#define TARGET_PHYS_ADDR_SPACE_BITS 42
74#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
75#elif defined(TARGET_PPC64)
76#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
77#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
78#define TARGET_PHYS_ADDR_SPACE_BITS 42
79#elif defined(TARGET_I386) && !defined(USE_KQEMU)
80#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
81#else
82/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
83#define TARGET_PHYS_ADDR_SPACE_BITS 32
84#endif
85
fd6ce8f6 86TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 87TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 88int nb_tbs;
eb51d102
FB
89/* any access to the tbs or the page table must use this lock */
90spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 91
b8076a74 92uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
93uint8_t *code_gen_ptr;
94
00f82b8a 95ram_addr_t phys_ram_size;
9fa3e853
FB
96int phys_ram_fd;
97uint8_t *phys_ram_base;
1ccde1cb 98uint8_t *phys_ram_dirty;
e9a1ab19 99static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 100
6a00d601
FB
101CPUState *first_cpu;
102/* current CPU in the current thread. It is only valid inside
103 cpu_exec() */
5fafdf24 104CPUState *cpu_single_env;
6a00d601 105
54936004 106typedef struct PageDesc {
92e873b9 107 /* list of TBs intersecting this ram page */
fd6ce8f6 108 TranslationBlock *first_tb;
9fa3e853
FB
109 /* in order to optimize self modifying code, we count the number
110 of lookups we do to a given page to use a bitmap */
111 unsigned int code_write_count;
112 uint8_t *code_bitmap;
113#if defined(CONFIG_USER_ONLY)
114 unsigned long flags;
115#endif
54936004
FB
116} PageDesc;
117
92e873b9
FB
118typedef struct PhysPageDesc {
119 /* offset in host memory of the page + io_index in the low 12 bits */
00f82b8a 120 ram_addr_t phys_offset;
92e873b9
FB
121} PhysPageDesc;
122
54936004 123#define L2_BITS 10
bedb69ea
JM
124#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
125/* XXX: this is a temporary hack for alpha target.
126 * In the future, this is to be replaced by a multi-level table
127 * to actually be able to handle the complete 64 bits address space.
128 */
129#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
130#else
03875444 131#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 132#endif
54936004
FB
133
134#define L1_SIZE (1 << L1_BITS)
135#define L2_SIZE (1 << L2_BITS)
136
33417e70 137static void io_mem_init(void);
fd6ce8f6 138
83fb7adf
FB
139unsigned long qemu_real_host_page_size;
140unsigned long qemu_host_page_bits;
141unsigned long qemu_host_page_size;
142unsigned long qemu_host_page_mask;
54936004 143
92e873b9 144/* XXX: for system emulation, it could just be an array */
54936004 145static PageDesc *l1_map[L1_SIZE];
0a962c02 146PhysPageDesc **l1_phys_map;
54936004 147
33417e70 148/* io memory support */
33417e70
FB
149CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
150CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 151void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 152static int io_mem_nb;
6658ffb8
PB
153#if defined(CONFIG_SOFTMMU)
154static int io_mem_watch;
155#endif
33417e70 156
34865134
FB
157/* log support */
158char *logfilename = "/tmp/qemu.log";
159FILE *logfile;
160int loglevel;
e735b91c 161static int log_append = 0;
34865134 162
e3db7226
FB
163/* statistics */
164static int tlb_flush_count;
165static int tb_flush_count;
166static int tb_phys_invalidate_count;
167
db7b5426
BS
168#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
169typedef struct subpage_t {
170 target_phys_addr_t base;
3ee89922
BS
171 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
172 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
173 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
174} subpage_t;
175
b346ff46 176static void page_init(void)
54936004 177{
83fb7adf 178 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 179 TARGET_PAGE_SIZE */
67b915a5 180#ifdef _WIN32
d5a8f07c
FB
181 {
182 SYSTEM_INFO system_info;
183 DWORD old_protect;
3b46e624 184
d5a8f07c
FB
185 GetSystemInfo(&system_info);
186 qemu_real_host_page_size = system_info.dwPageSize;
3b46e624 187
d5a8f07c
FB
188 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
189 PAGE_EXECUTE_READWRITE, &old_protect);
190 }
67b915a5 191#else
83fb7adf 192 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
193 {
194 unsigned long start, end;
195
196 start = (unsigned long)code_gen_buffer;
197 start &= ~(qemu_real_host_page_size - 1);
3b46e624 198
d5a8f07c
FB
199 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
200 end += qemu_real_host_page_size - 1;
201 end &= ~(qemu_real_host_page_size - 1);
3b46e624 202
5fafdf24 203 mprotect((void *)start, end - start,
d5a8f07c
FB
204 PROT_READ | PROT_WRITE | PROT_EXEC);
205 }
67b915a5 206#endif
d5a8f07c 207
83fb7adf
FB
208 if (qemu_host_page_size == 0)
209 qemu_host_page_size = qemu_real_host_page_size;
210 if (qemu_host_page_size < TARGET_PAGE_SIZE)
211 qemu_host_page_size = TARGET_PAGE_SIZE;
212 qemu_host_page_bits = 0;
213 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
214 qemu_host_page_bits++;
215 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
216 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
217 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
218
219#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
220 {
221 long long startaddr, endaddr;
222 FILE *f;
223 int n;
224
225 f = fopen("/proc/self/maps", "r");
226 if (f) {
227 do {
228 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
229 if (n == 2) {
e0b8d65a
BS
230 startaddr = MIN(startaddr,
231 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
232 endaddr = MIN(endaddr,
233 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
50a9569b
AZ
234 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
235 TARGET_PAGE_ALIGN(endaddr),
236 PAGE_RESERVED);
237 }
238 } while (!feof(f));
239 fclose(f);
240 }
241 }
242#endif
54936004
FB
243}
244
00f82b8a 245static inline PageDesc *page_find_alloc(target_ulong index)
54936004 246{
54936004
FB
247 PageDesc **lp, *p;
248
54936004
FB
249 lp = &l1_map[index >> L2_BITS];
250 p = *lp;
251 if (!p) {
252 /* allocate if not found */
59817ccb 253 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 254 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
255 *lp = p;
256 }
257 return p + (index & (L2_SIZE - 1));
258}
259
00f82b8a 260static inline PageDesc *page_find(target_ulong index)
54936004 261{
54936004
FB
262 PageDesc *p;
263
54936004
FB
264 p = l1_map[index >> L2_BITS];
265 if (!p)
266 return 0;
fd6ce8f6
FB
267 return p + (index & (L2_SIZE - 1));
268}
269
108c49b8 270static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 271{
108c49b8 272 void **lp, **p;
e3f4e2a4 273 PhysPageDesc *pd;
92e873b9 274
108c49b8
FB
275 p = (void **)l1_phys_map;
276#if TARGET_PHYS_ADDR_SPACE_BITS > 32
277
278#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
279#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
280#endif
281 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
282 p = *lp;
283 if (!p) {
284 /* allocate if not found */
108c49b8
FB
285 if (!alloc)
286 return NULL;
287 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
288 memset(p, 0, sizeof(void *) * L1_SIZE);
289 *lp = p;
290 }
291#endif
292 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
293 pd = *lp;
294 if (!pd) {
295 int i;
108c49b8
FB
296 /* allocate if not found */
297 if (!alloc)
298 return NULL;
e3f4e2a4
PB
299 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
300 *lp = pd;
301 for (i = 0; i < L2_SIZE; i++)
302 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 303 }
e3f4e2a4 304 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
305}
306
108c49b8 307static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 308{
108c49b8 309 return phys_page_find_alloc(index, 0);
92e873b9
FB
310}
311
9fa3e853 312#if !defined(CONFIG_USER_ONLY)
6a00d601 313static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 314static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 315 target_ulong vaddr);
9fa3e853 316#endif
fd6ce8f6 317
6a00d601 318void cpu_exec_init(CPUState *env)
fd6ce8f6 319{
6a00d601
FB
320 CPUState **penv;
321 int cpu_index;
322
fd6ce8f6 323 if (!code_gen_ptr) {
57fec1fe 324 cpu_gen_init();
fd6ce8f6 325 code_gen_ptr = code_gen_buffer;
b346ff46 326 page_init();
33417e70 327 io_mem_init();
fd6ce8f6 328 }
6a00d601
FB
329 env->next_cpu = NULL;
330 penv = &first_cpu;
331 cpu_index = 0;
332 while (*penv != NULL) {
333 penv = (CPUState **)&(*penv)->next_cpu;
334 cpu_index++;
335 }
336 env->cpu_index = cpu_index;
6658ffb8 337 env->nb_watchpoints = 0;
6a00d601 338 *penv = env;
fd6ce8f6
FB
339}
340
9fa3e853
FB
341static inline void invalidate_page_bitmap(PageDesc *p)
342{
343 if (p->code_bitmap) {
59817ccb 344 qemu_free(p->code_bitmap);
9fa3e853
FB
345 p->code_bitmap = NULL;
346 }
347 p->code_write_count = 0;
348}
349
fd6ce8f6
FB
350/* set to NULL all the 'first_tb' fields in all PageDescs */
351static void page_flush_tb(void)
352{
353 int i, j;
354 PageDesc *p;
355
356 for(i = 0; i < L1_SIZE; i++) {
357 p = l1_map[i];
358 if (p) {
9fa3e853
FB
359 for(j = 0; j < L2_SIZE; j++) {
360 p->first_tb = NULL;
361 invalidate_page_bitmap(p);
362 p++;
363 }
fd6ce8f6
FB
364 }
365 }
366}
367
368/* flush all the translation blocks */
d4e8164f 369/* XXX: tb_flush is currently not thread safe */
6a00d601 370void tb_flush(CPUState *env1)
fd6ce8f6 371{
6a00d601 372 CPUState *env;
0124311e 373#if defined(DEBUG_FLUSH)
ab3d1727
BS
374 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
375 (unsigned long)(code_gen_ptr - code_gen_buffer),
376 nb_tbs, nb_tbs > 0 ?
377 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 378#endif
a208e54a
PB
379 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
380 cpu_abort(env1, "Internal error: code buffer overflow\n");
381
fd6ce8f6 382 nb_tbs = 0;
3b46e624 383
6a00d601
FB
384 for(env = first_cpu; env != NULL; env = env->next_cpu) {
385 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
386 }
9fa3e853 387
8a8a608f 388 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 389 page_flush_tb();
9fa3e853 390
fd6ce8f6 391 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
392 /* XXX: flush processor icache at this point if cache flush is
393 expensive */
e3db7226 394 tb_flush_count++;
fd6ce8f6
FB
395}
396
397#ifdef DEBUG_TB_CHECK
398
bc98a7ef 399static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
400{
401 TranslationBlock *tb;
402 int i;
403 address &= TARGET_PAGE_MASK;
99773bd4
PB
404 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
405 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
406 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
407 address >= tb->pc + tb->size)) {
408 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 409 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
410 }
411 }
412 }
413}
414
415/* verify that all the pages have correct rights for code */
416static void tb_page_check(void)
417{
418 TranslationBlock *tb;
419 int i, flags1, flags2;
3b46e624 420
99773bd4
PB
421 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
422 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
423 flags1 = page_get_flags(tb->pc);
424 flags2 = page_get_flags(tb->pc + tb->size - 1);
425 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
426 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 427 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
428 }
429 }
430 }
431}
432
d4e8164f
FB
433void tb_jmp_check(TranslationBlock *tb)
434{
435 TranslationBlock *tb1;
436 unsigned int n1;
437
438 /* suppress any remaining jumps to this TB */
439 tb1 = tb->jmp_first;
440 for(;;) {
441 n1 = (long)tb1 & 3;
442 tb1 = (TranslationBlock *)((long)tb1 & ~3);
443 if (n1 == 2)
444 break;
445 tb1 = tb1->jmp_next[n1];
446 }
447 /* check end of list */
448 if (tb1 != tb) {
449 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
450 }
451}
452
fd6ce8f6
FB
453#endif
454
455/* invalidate one TB */
456static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
457 int next_offset)
458{
459 TranslationBlock *tb1;
460 for(;;) {
461 tb1 = *ptb;
462 if (tb1 == tb) {
463 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
464 break;
465 }
466 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
467 }
468}
469
9fa3e853
FB
470static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
471{
472 TranslationBlock *tb1;
473 unsigned int n1;
474
475 for(;;) {
476 tb1 = *ptb;
477 n1 = (long)tb1 & 3;
478 tb1 = (TranslationBlock *)((long)tb1 & ~3);
479 if (tb1 == tb) {
480 *ptb = tb1->page_next[n1];
481 break;
482 }
483 ptb = &tb1->page_next[n1];
484 }
485}
486
d4e8164f
FB
487static inline void tb_jmp_remove(TranslationBlock *tb, int n)
488{
489 TranslationBlock *tb1, **ptb;
490 unsigned int n1;
491
492 ptb = &tb->jmp_next[n];
493 tb1 = *ptb;
494 if (tb1) {
495 /* find tb(n) in circular list */
496 for(;;) {
497 tb1 = *ptb;
498 n1 = (long)tb1 & 3;
499 tb1 = (TranslationBlock *)((long)tb1 & ~3);
500 if (n1 == n && tb1 == tb)
501 break;
502 if (n1 == 2) {
503 ptb = &tb1->jmp_first;
504 } else {
505 ptb = &tb1->jmp_next[n1];
506 }
507 }
508 /* now we can suppress tb(n) from the list */
509 *ptb = tb->jmp_next[n];
510
511 tb->jmp_next[n] = NULL;
512 }
513}
514
515/* reset the jump entry 'n' of a TB so that it is not chained to
516 another TB */
517static inline void tb_reset_jump(TranslationBlock *tb, int n)
518{
519 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
520}
521
00f82b8a 522static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 523{
6a00d601 524 CPUState *env;
8a40a180 525 PageDesc *p;
d4e8164f 526 unsigned int h, n1;
00f82b8a 527 target_phys_addr_t phys_pc;
8a40a180 528 TranslationBlock *tb1, *tb2;
3b46e624 529
8a40a180
FB
530 /* remove the TB from the hash list */
531 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
532 h = tb_phys_hash_func(phys_pc);
5fafdf24 533 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
534 offsetof(TranslationBlock, phys_hash_next));
535
536 /* remove the TB from the page list */
537 if (tb->page_addr[0] != page_addr) {
538 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
539 tb_page_remove(&p->first_tb, tb);
540 invalidate_page_bitmap(p);
541 }
542 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
543 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
544 tb_page_remove(&p->first_tb, tb);
545 invalidate_page_bitmap(p);
546 }
547
36bdbe54 548 tb_invalidated_flag = 1;
59817ccb 549
fd6ce8f6 550 /* remove the TB from the hash list */
8a40a180 551 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
552 for(env = first_cpu; env != NULL; env = env->next_cpu) {
553 if (env->tb_jmp_cache[h] == tb)
554 env->tb_jmp_cache[h] = NULL;
555 }
d4e8164f
FB
556
557 /* suppress this TB from the two jump lists */
558 tb_jmp_remove(tb, 0);
559 tb_jmp_remove(tb, 1);
560
561 /* suppress any remaining jumps to this TB */
562 tb1 = tb->jmp_first;
563 for(;;) {
564 n1 = (long)tb1 & 3;
565 if (n1 == 2)
566 break;
567 tb1 = (TranslationBlock *)((long)tb1 & ~3);
568 tb2 = tb1->jmp_next[n1];
569 tb_reset_jump(tb1, n1);
570 tb1->jmp_next[n1] = NULL;
571 tb1 = tb2;
572 }
573 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 574
e3db7226 575 tb_phys_invalidate_count++;
9fa3e853
FB
576}
577
578static inline void set_bits(uint8_t *tab, int start, int len)
579{
580 int end, mask, end1;
581
582 end = start + len;
583 tab += start >> 3;
584 mask = 0xff << (start & 7);
585 if ((start & ~7) == (end & ~7)) {
586 if (start < end) {
587 mask &= ~(0xff << (end & 7));
588 *tab |= mask;
589 }
590 } else {
591 *tab++ |= mask;
592 start = (start + 8) & ~7;
593 end1 = end & ~7;
594 while (start < end1) {
595 *tab++ = 0xff;
596 start += 8;
597 }
598 if (start < end) {
599 mask = ~(0xff << (end & 7));
600 *tab |= mask;
601 }
602 }
603}
604
605static void build_page_bitmap(PageDesc *p)
606{
607 int n, tb_start, tb_end;
608 TranslationBlock *tb;
3b46e624 609
59817ccb 610 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
611 if (!p->code_bitmap)
612 return;
613 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
614
615 tb = p->first_tb;
616 while (tb != NULL) {
617 n = (long)tb & 3;
618 tb = (TranslationBlock *)((long)tb & ~3);
619 /* NOTE: this is subtle as a TB may span two physical pages */
620 if (n == 0) {
621 /* NOTE: tb_end may be after the end of the page, but
622 it is not a problem */
623 tb_start = tb->pc & ~TARGET_PAGE_MASK;
624 tb_end = tb_start + tb->size;
625 if (tb_end > TARGET_PAGE_SIZE)
626 tb_end = TARGET_PAGE_SIZE;
627 } else {
628 tb_start = 0;
629 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
630 }
631 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
632 tb = tb->page_next[n];
633 }
634}
635
d720b93d
FB
636#ifdef TARGET_HAS_PRECISE_SMC
637
5fafdf24 638static void tb_gen_code(CPUState *env,
d720b93d
FB
639 target_ulong pc, target_ulong cs_base, int flags,
640 int cflags)
641{
642 TranslationBlock *tb;
643 uint8_t *tc_ptr;
644 target_ulong phys_pc, phys_page2, virt_page2;
645 int code_gen_size;
646
c27004ec
FB
647 phys_pc = get_phys_addr_code(env, pc);
648 tb = tb_alloc(pc);
d720b93d
FB
649 if (!tb) {
650 /* flush must be done */
651 tb_flush(env);
652 /* cannot fail at this point */
c27004ec 653 tb = tb_alloc(pc);
d720b93d
FB
654 }
655 tc_ptr = code_gen_ptr;
656 tb->tc_ptr = tc_ptr;
657 tb->cs_base = cs_base;
658 tb->flags = flags;
659 tb->cflags = cflags;
d07bde88 660 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 661 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 662
d720b93d 663 /* check next page if needed */
c27004ec 664 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 665 phys_page2 = -1;
c27004ec 666 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
667 phys_page2 = get_phys_addr_code(env, virt_page2);
668 }
669 tb_link_phys(tb, phys_pc, phys_page2);
670}
671#endif
3b46e624 672
9fa3e853
FB
673/* invalidate all TBs which intersect with the target physical page
674 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
675 the same physical page. 'is_cpu_write_access' should be true if called
676 from a real cpu write access: the virtual CPU will exit the current
677 TB if code is modified inside this TB. */
00f82b8a 678void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
679 int is_cpu_write_access)
680{
681 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 682 CPUState *env = cpu_single_env;
9fa3e853 683 PageDesc *p;
ea1c1802 684 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 685 target_ulong tb_start, tb_end;
d720b93d 686 target_ulong current_pc, current_cs_base;
9fa3e853
FB
687
688 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 689 if (!p)
9fa3e853 690 return;
5fafdf24 691 if (!p->code_bitmap &&
d720b93d
FB
692 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
693 is_cpu_write_access) {
9fa3e853
FB
694 /* build code bitmap */
695 build_page_bitmap(p);
696 }
697
698 /* we remove all the TBs in the range [start, end[ */
699 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
700 current_tb_not_found = is_cpu_write_access;
701 current_tb_modified = 0;
702 current_tb = NULL; /* avoid warning */
703 current_pc = 0; /* avoid warning */
704 current_cs_base = 0; /* avoid warning */
705 current_flags = 0; /* avoid warning */
9fa3e853
FB
706 tb = p->first_tb;
707 while (tb != NULL) {
708 n = (long)tb & 3;
709 tb = (TranslationBlock *)((long)tb & ~3);
710 tb_next = tb->page_next[n];
711 /* NOTE: this is subtle as a TB may span two physical pages */
712 if (n == 0) {
713 /* NOTE: tb_end may be after the end of the page, but
714 it is not a problem */
715 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
716 tb_end = tb_start + tb->size;
717 } else {
718 tb_start = tb->page_addr[1];
719 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
720 }
721 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
722#ifdef TARGET_HAS_PRECISE_SMC
723 if (current_tb_not_found) {
724 current_tb_not_found = 0;
725 current_tb = NULL;
726 if (env->mem_write_pc) {
727 /* now we have a real cpu fault */
728 current_tb = tb_find_pc(env->mem_write_pc);
729 }
730 }
731 if (current_tb == tb &&
732 !(current_tb->cflags & CF_SINGLE_INSN)) {
733 /* If we are modifying the current TB, we must stop
734 its execution. We could be more precise by checking
735 that the modification is after the current PC, but it
736 would require a specialized function to partially
737 restore the CPU state */
3b46e624 738
d720b93d 739 current_tb_modified = 1;
5fafdf24 740 cpu_restore_state(current_tb, env,
d720b93d
FB
741 env->mem_write_pc, NULL);
742#if defined(TARGET_I386)
743 current_flags = env->hflags;
744 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
745 current_cs_base = (target_ulong)env->segs[R_CS].base;
746 current_pc = current_cs_base + env->eip;
747#else
748#error unsupported CPU
749#endif
750 }
751#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
752 /* we need to do that to handle the case where a signal
753 occurs while doing tb_phys_invalidate() */
754 saved_tb = NULL;
755 if (env) {
756 saved_tb = env->current_tb;
757 env->current_tb = NULL;
758 }
9fa3e853 759 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
760 if (env) {
761 env->current_tb = saved_tb;
762 if (env->interrupt_request && env->current_tb)
763 cpu_interrupt(env, env->interrupt_request);
764 }
9fa3e853
FB
765 }
766 tb = tb_next;
767 }
768#if !defined(CONFIG_USER_ONLY)
769 /* if no code remaining, no need to continue to use slow writes */
770 if (!p->first_tb) {
771 invalidate_page_bitmap(p);
d720b93d
FB
772 if (is_cpu_write_access) {
773 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
774 }
775 }
776#endif
777#ifdef TARGET_HAS_PRECISE_SMC
778 if (current_tb_modified) {
779 /* we generate a block containing just the instruction
780 modifying the memory. It will ensure that it cannot modify
781 itself */
ea1c1802 782 env->current_tb = NULL;
5fafdf24 783 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
784 CF_SINGLE_INSN);
785 cpu_resume_from_signal(env, NULL);
9fa3e853 786 }
fd6ce8f6 787#endif
9fa3e853 788}
fd6ce8f6 789
9fa3e853 790/* len must be <= 8 and start must be a multiple of len */
00f82b8a 791static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
792{
793 PageDesc *p;
794 int offset, b;
59817ccb 795#if 0
a4193c8a
FB
796 if (1) {
797 if (loglevel) {
5fafdf24
TS
798 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
799 cpu_single_env->mem_write_vaddr, len,
800 cpu_single_env->eip,
a4193c8a
FB
801 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
802 }
59817ccb
FB
803 }
804#endif
9fa3e853 805 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 806 if (!p)
9fa3e853
FB
807 return;
808 if (p->code_bitmap) {
809 offset = start & ~TARGET_PAGE_MASK;
810 b = p->code_bitmap[offset >> 3] >> (offset & 7);
811 if (b & ((1 << len) - 1))
812 goto do_invalidate;
813 } else {
814 do_invalidate:
d720b93d 815 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
816 }
817}
818
9fa3e853 819#if !defined(CONFIG_SOFTMMU)
00f82b8a 820static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 821 unsigned long pc, void *puc)
9fa3e853 822{
d720b93d
FB
823 int n, current_flags, current_tb_modified;
824 target_ulong current_pc, current_cs_base;
9fa3e853 825 PageDesc *p;
d720b93d
FB
826 TranslationBlock *tb, *current_tb;
827#ifdef TARGET_HAS_PRECISE_SMC
828 CPUState *env = cpu_single_env;
829#endif
9fa3e853
FB
830
831 addr &= TARGET_PAGE_MASK;
832 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 833 if (!p)
9fa3e853
FB
834 return;
835 tb = p->first_tb;
d720b93d
FB
836 current_tb_modified = 0;
837 current_tb = NULL;
838 current_pc = 0; /* avoid warning */
839 current_cs_base = 0; /* avoid warning */
840 current_flags = 0; /* avoid warning */
841#ifdef TARGET_HAS_PRECISE_SMC
842 if (tb && pc != 0) {
843 current_tb = tb_find_pc(pc);
844 }
845#endif
9fa3e853
FB
846 while (tb != NULL) {
847 n = (long)tb & 3;
848 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
849#ifdef TARGET_HAS_PRECISE_SMC
850 if (current_tb == tb &&
851 !(current_tb->cflags & CF_SINGLE_INSN)) {
852 /* If we are modifying the current TB, we must stop
853 its execution. We could be more precise by checking
854 that the modification is after the current PC, but it
855 would require a specialized function to partially
856 restore the CPU state */
3b46e624 857
d720b93d
FB
858 current_tb_modified = 1;
859 cpu_restore_state(current_tb, env, pc, puc);
860#if defined(TARGET_I386)
861 current_flags = env->hflags;
862 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
863 current_cs_base = (target_ulong)env->segs[R_CS].base;
864 current_pc = current_cs_base + env->eip;
865#else
866#error unsupported CPU
867#endif
868 }
869#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
870 tb_phys_invalidate(tb, addr);
871 tb = tb->page_next[n];
872 }
fd6ce8f6 873 p->first_tb = NULL;
d720b93d
FB
874#ifdef TARGET_HAS_PRECISE_SMC
875 if (current_tb_modified) {
876 /* we generate a block containing just the instruction
877 modifying the memory. It will ensure that it cannot modify
878 itself */
ea1c1802 879 env->current_tb = NULL;
5fafdf24 880 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
881 CF_SINGLE_INSN);
882 cpu_resume_from_signal(env, puc);
883 }
884#endif
fd6ce8f6 885}
9fa3e853 886#endif
fd6ce8f6
FB
887
888/* add the tb in the target page and protect it if necessary */
5fafdf24 889static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 890 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
891{
892 PageDesc *p;
9fa3e853
FB
893 TranslationBlock *last_first_tb;
894
895 tb->page_addr[n] = page_addr;
3a7d929e 896 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
897 tb->page_next[n] = p->first_tb;
898 last_first_tb = p->first_tb;
899 p->first_tb = (TranslationBlock *)((long)tb | n);
900 invalidate_page_bitmap(p);
fd6ce8f6 901
107db443 902#if defined(TARGET_HAS_SMC) || 1
d720b93d 903
9fa3e853 904#if defined(CONFIG_USER_ONLY)
fd6ce8f6 905 if (p->flags & PAGE_WRITE) {
53a5960a
PB
906 target_ulong addr;
907 PageDesc *p2;
9fa3e853
FB
908 int prot;
909
fd6ce8f6
FB
910 /* force the host page as non writable (writes will have a
911 page fault + mprotect overhead) */
53a5960a 912 page_addr &= qemu_host_page_mask;
fd6ce8f6 913 prot = 0;
53a5960a
PB
914 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
915 addr += TARGET_PAGE_SIZE) {
916
917 p2 = page_find (addr >> TARGET_PAGE_BITS);
918 if (!p2)
919 continue;
920 prot |= p2->flags;
921 p2->flags &= ~PAGE_WRITE;
922 page_get_flags(addr);
923 }
5fafdf24 924 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
925 (prot & PAGE_BITS) & ~PAGE_WRITE);
926#ifdef DEBUG_TB_INVALIDATE
ab3d1727 927 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 928 page_addr);
fd6ce8f6 929#endif
fd6ce8f6 930 }
9fa3e853
FB
931#else
932 /* if some code is already present, then the pages are already
933 protected. So we handle the case where only the first TB is
934 allocated in a physical page */
935 if (!last_first_tb) {
6a00d601 936 tlb_protect_code(page_addr);
9fa3e853
FB
937 }
938#endif
d720b93d
FB
939
940#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
941}
942
943/* Allocate a new translation block. Flush the translation buffer if
944 too many translation blocks or too much generated code. */
c27004ec 945TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
946{
947 TranslationBlock *tb;
fd6ce8f6 948
5fafdf24 949 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
fd6ce8f6 950 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 951 return NULL;
fd6ce8f6
FB
952 tb = &tbs[nb_tbs++];
953 tb->pc = pc;
b448f2f3 954 tb->cflags = 0;
d4e8164f
FB
955 return tb;
956}
957
9fa3e853
FB
958/* add a new TB and link it to the physical page tables. phys_page2 is
959 (-1) to indicate that only one page contains the TB. */
5fafdf24 960void tb_link_phys(TranslationBlock *tb,
9fa3e853 961 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 962{
9fa3e853
FB
963 unsigned int h;
964 TranslationBlock **ptb;
965
966 /* add in the physical hash table */
967 h = tb_phys_hash_func(phys_pc);
968 ptb = &tb_phys_hash[h];
969 tb->phys_hash_next = *ptb;
970 *ptb = tb;
fd6ce8f6
FB
971
972 /* add in the page list */
9fa3e853
FB
973 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
974 if (phys_page2 != -1)
975 tb_alloc_page(tb, 1, phys_page2);
976 else
977 tb->page_addr[1] = -1;
9fa3e853 978
d4e8164f
FB
979 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
980 tb->jmp_next[0] = NULL;
981 tb->jmp_next[1] = NULL;
982
983 /* init original jump addresses */
984 if (tb->tb_next_offset[0] != 0xffff)
985 tb_reset_jump(tb, 0);
986 if (tb->tb_next_offset[1] != 0xffff)
987 tb_reset_jump(tb, 1);
8a40a180
FB
988
989#ifdef DEBUG_TB_CHECK
990 tb_page_check();
991#endif
fd6ce8f6
FB
992}
993
9fa3e853
FB
994/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
995 tb[1].tc_ptr. Return NULL if not found */
996TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 997{
9fa3e853
FB
998 int m_min, m_max, m;
999 unsigned long v;
1000 TranslationBlock *tb;
a513fe19
FB
1001
1002 if (nb_tbs <= 0)
1003 return NULL;
1004 if (tc_ptr < (unsigned long)code_gen_buffer ||
1005 tc_ptr >= (unsigned long)code_gen_ptr)
1006 return NULL;
1007 /* binary search (cf Knuth) */
1008 m_min = 0;
1009 m_max = nb_tbs - 1;
1010 while (m_min <= m_max) {
1011 m = (m_min + m_max) >> 1;
1012 tb = &tbs[m];
1013 v = (unsigned long)tb->tc_ptr;
1014 if (v == tc_ptr)
1015 return tb;
1016 else if (tc_ptr < v) {
1017 m_max = m - 1;
1018 } else {
1019 m_min = m + 1;
1020 }
5fafdf24 1021 }
a513fe19
FB
1022 return &tbs[m_max];
1023}
7501267e 1024
ea041c0e
FB
1025static void tb_reset_jump_recursive(TranslationBlock *tb);
1026
1027static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1028{
1029 TranslationBlock *tb1, *tb_next, **ptb;
1030 unsigned int n1;
1031
1032 tb1 = tb->jmp_next[n];
1033 if (tb1 != NULL) {
1034 /* find head of list */
1035 for(;;) {
1036 n1 = (long)tb1 & 3;
1037 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1038 if (n1 == 2)
1039 break;
1040 tb1 = tb1->jmp_next[n1];
1041 }
1042 /* we are now sure now that tb jumps to tb1 */
1043 tb_next = tb1;
1044
1045 /* remove tb from the jmp_first list */
1046 ptb = &tb_next->jmp_first;
1047 for(;;) {
1048 tb1 = *ptb;
1049 n1 = (long)tb1 & 3;
1050 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1051 if (n1 == n && tb1 == tb)
1052 break;
1053 ptb = &tb1->jmp_next[n1];
1054 }
1055 *ptb = tb->jmp_next[n];
1056 tb->jmp_next[n] = NULL;
3b46e624 1057
ea041c0e
FB
1058 /* suppress the jump to next tb in generated code */
1059 tb_reset_jump(tb, n);
1060
0124311e 1061 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1062 tb_reset_jump_recursive(tb_next);
1063 }
1064}
1065
1066static void tb_reset_jump_recursive(TranslationBlock *tb)
1067{
1068 tb_reset_jump_recursive2(tb, 0);
1069 tb_reset_jump_recursive2(tb, 1);
1070}
1071
1fddef4b 1072#if defined(TARGET_HAS_ICE)
d720b93d
FB
1073static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1074{
9b3c35e0
JM
1075 target_phys_addr_t addr;
1076 target_ulong pd;
c2f07f81
PB
1077 ram_addr_t ram_addr;
1078 PhysPageDesc *p;
d720b93d 1079
c2f07f81
PB
1080 addr = cpu_get_phys_page_debug(env, pc);
1081 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1082 if (!p) {
1083 pd = IO_MEM_UNASSIGNED;
1084 } else {
1085 pd = p->phys_offset;
1086 }
1087 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1088 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1089}
c27004ec 1090#endif
d720b93d 1091
6658ffb8
PB
1092/* Add a watchpoint. */
1093int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1094{
1095 int i;
1096
1097 for (i = 0; i < env->nb_watchpoints; i++) {
1098 if (addr == env->watchpoint[i].vaddr)
1099 return 0;
1100 }
1101 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1102 return -1;
1103
1104 i = env->nb_watchpoints++;
1105 env->watchpoint[i].vaddr = addr;
1106 tlb_flush_page(env, addr);
1107 /* FIXME: This flush is needed because of the hack to make memory ops
1108 terminate the TB. It can be removed once the proper IO trap and
1109 re-execute bits are in. */
1110 tb_flush(env);
1111 return i;
1112}
1113
1114/* Remove a watchpoint. */
1115int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1116{
1117 int i;
1118
1119 for (i = 0; i < env->nb_watchpoints; i++) {
1120 if (addr == env->watchpoint[i].vaddr) {
1121 env->nb_watchpoints--;
1122 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1123 tlb_flush_page(env, addr);
1124 return 0;
1125 }
1126 }
1127 return -1;
1128}
1129
c33a346e
FB
1130/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1131 breakpoint is reached */
2e12669a 1132int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1133{
1fddef4b 1134#if defined(TARGET_HAS_ICE)
4c3a88a2 1135 int i;
3b46e624 1136
4c3a88a2
FB
1137 for(i = 0; i < env->nb_breakpoints; i++) {
1138 if (env->breakpoints[i] == pc)
1139 return 0;
1140 }
1141
1142 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1143 return -1;
1144 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1145
d720b93d 1146 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1147 return 0;
1148#else
1149 return -1;
1150#endif
1151}
1152
1153/* remove a breakpoint */
2e12669a 1154int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1155{
1fddef4b 1156#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1157 int i;
1158 for(i = 0; i < env->nb_breakpoints; i++) {
1159 if (env->breakpoints[i] == pc)
1160 goto found;
1161 }
1162 return -1;
1163 found:
4c3a88a2 1164 env->nb_breakpoints--;
1fddef4b
FB
1165 if (i < env->nb_breakpoints)
1166 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1167
1168 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1169 return 0;
1170#else
1171 return -1;
1172#endif
1173}
1174
c33a346e
FB
1175/* enable or disable single step mode. EXCP_DEBUG is returned by the
1176 CPU loop after each instruction */
1177void cpu_single_step(CPUState *env, int enabled)
1178{
1fddef4b 1179#if defined(TARGET_HAS_ICE)
c33a346e
FB
1180 if (env->singlestep_enabled != enabled) {
1181 env->singlestep_enabled = enabled;
1182 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1183 /* XXX: only flush what is necessary */
0124311e 1184 tb_flush(env);
c33a346e
FB
1185 }
1186#endif
1187}
1188
34865134
FB
1189/* enable or disable low levels log */
1190void cpu_set_log(int log_flags)
1191{
1192 loglevel = log_flags;
1193 if (loglevel && !logfile) {
11fcfab4 1194 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1195 if (!logfile) {
1196 perror(logfilename);
1197 _exit(1);
1198 }
9fa3e853
FB
1199#if !defined(CONFIG_SOFTMMU)
1200 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1201 {
1202 static uint8_t logfile_buf[4096];
1203 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1204 }
1205#else
34865134 1206 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1207#endif
e735b91c
PB
1208 log_append = 1;
1209 }
1210 if (!loglevel && logfile) {
1211 fclose(logfile);
1212 logfile = NULL;
34865134
FB
1213 }
1214}
1215
1216void cpu_set_log_filename(const char *filename)
1217{
1218 logfilename = strdup(filename);
e735b91c
PB
1219 if (logfile) {
1220 fclose(logfile);
1221 logfile = NULL;
1222 }
1223 cpu_set_log(loglevel);
34865134 1224}
c33a346e 1225
0124311e 1226/* mask must never be zero, except for A20 change call */
68a79315 1227void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1228{
1229 TranslationBlock *tb;
15a51156 1230 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1231
68a79315 1232 env->interrupt_request |= mask;
ea041c0e
FB
1233 /* if the cpu is currently executing code, we must unlink it and
1234 all the potentially executing TB */
1235 tb = env->current_tb;
ee8b7021
FB
1236 if (tb && !testandset(&interrupt_lock)) {
1237 env->current_tb = NULL;
ea041c0e 1238 tb_reset_jump_recursive(tb);
15a51156 1239 resetlock(&interrupt_lock);
ea041c0e
FB
1240 }
1241}
1242
b54ad049
FB
1243void cpu_reset_interrupt(CPUState *env, int mask)
1244{
1245 env->interrupt_request &= ~mask;
1246}
1247
f193c797 1248CPULogItem cpu_log_items[] = {
5fafdf24 1249 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1250 "show generated host assembly code for each compiled TB" },
1251 { CPU_LOG_TB_IN_ASM, "in_asm",
1252 "show target assembly code for each compiled TB" },
5fafdf24 1253 { CPU_LOG_TB_OP, "op",
57fec1fe 1254 "show micro ops for each compiled TB" },
f193c797 1255 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1256 "show micro ops "
1257#ifdef TARGET_I386
1258 "before eflags optimization and "
f193c797 1259#endif
e01a1157 1260 "after liveness analysis" },
f193c797
FB
1261 { CPU_LOG_INT, "int",
1262 "show interrupts/exceptions in short format" },
1263 { CPU_LOG_EXEC, "exec",
1264 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1265 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1266 "show CPU state before block translation" },
f193c797
FB
1267#ifdef TARGET_I386
1268 { CPU_LOG_PCALL, "pcall",
1269 "show protected mode far calls/returns/exceptions" },
1270#endif
8e3a9fd2 1271#ifdef DEBUG_IOPORT
fd872598
FB
1272 { CPU_LOG_IOPORT, "ioport",
1273 "show all i/o ports accesses" },
8e3a9fd2 1274#endif
f193c797
FB
1275 { 0, NULL, NULL },
1276};
1277
1278static int cmp1(const char *s1, int n, const char *s2)
1279{
1280 if (strlen(s2) != n)
1281 return 0;
1282 return memcmp(s1, s2, n) == 0;
1283}
3b46e624 1284
f193c797
FB
1285/* takes a comma separated list of log masks. Return 0 if error. */
1286int cpu_str_to_log_mask(const char *str)
1287{
1288 CPULogItem *item;
1289 int mask;
1290 const char *p, *p1;
1291
1292 p = str;
1293 mask = 0;
1294 for(;;) {
1295 p1 = strchr(p, ',');
1296 if (!p1)
1297 p1 = p + strlen(p);
8e3a9fd2
FB
1298 if(cmp1(p,p1-p,"all")) {
1299 for(item = cpu_log_items; item->mask != 0; item++) {
1300 mask |= item->mask;
1301 }
1302 } else {
f193c797
FB
1303 for(item = cpu_log_items; item->mask != 0; item++) {
1304 if (cmp1(p, p1 - p, item->name))
1305 goto found;
1306 }
1307 return 0;
8e3a9fd2 1308 }
f193c797
FB
1309 found:
1310 mask |= item->mask;
1311 if (*p1 != ',')
1312 break;
1313 p = p1 + 1;
1314 }
1315 return mask;
1316}
ea041c0e 1317
7501267e
FB
1318void cpu_abort(CPUState *env, const char *fmt, ...)
1319{
1320 va_list ap;
493ae1f0 1321 va_list ap2;
7501267e
FB
1322
1323 va_start(ap, fmt);
493ae1f0 1324 va_copy(ap2, ap);
7501267e
FB
1325 fprintf(stderr, "qemu: fatal: ");
1326 vfprintf(stderr, fmt, ap);
1327 fprintf(stderr, "\n");
1328#ifdef TARGET_I386
0573fbfc
TS
1329 if(env->intercept & INTERCEPT_SVM_MASK) {
1330 /* most probably the virtual machine should not
1331 be shut down but rather caught by the VMM */
1332 vmexit(SVM_EXIT_SHUTDOWN, 0);
1333 }
7fe48483
FB
1334 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1335#else
1336 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1337#endif
924edcae 1338 if (logfile) {
f9373291 1339 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1340 vfprintf(logfile, fmt, ap2);
f9373291
JM
1341 fprintf(logfile, "\n");
1342#ifdef TARGET_I386
1343 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1344#else
1345 cpu_dump_state(env, logfile, fprintf, 0);
1346#endif
924edcae
AZ
1347 fflush(logfile);
1348 fclose(logfile);
1349 }
493ae1f0 1350 va_end(ap2);
f9373291 1351 va_end(ap);
7501267e
FB
1352 abort();
1353}
1354
c5be9f08
TS
1355CPUState *cpu_copy(CPUState *env)
1356{
01ba9816 1357 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1358 /* preserve chaining and index */
1359 CPUState *next_cpu = new_env->next_cpu;
1360 int cpu_index = new_env->cpu_index;
1361 memcpy(new_env, env, sizeof(CPUState));
1362 new_env->next_cpu = next_cpu;
1363 new_env->cpu_index = cpu_index;
1364 return new_env;
1365}
1366
0124311e
FB
1367#if !defined(CONFIG_USER_ONLY)
1368
5c751e99
EI
1369static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1370{
1371 unsigned int i;
1372
1373 /* Discard jump cache entries for any tb which might potentially
1374 overlap the flushed page. */
1375 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1376 memset (&env->tb_jmp_cache[i], 0,
1377 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1378
1379 i = tb_jmp_cache_hash_page(addr);
1380 memset (&env->tb_jmp_cache[i], 0,
1381 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1382}
1383
ee8b7021
FB
1384/* NOTE: if flush_global is true, also flush global entries (not
1385 implemented yet) */
1386void tlb_flush(CPUState *env, int flush_global)
33417e70 1387{
33417e70 1388 int i;
0124311e 1389
9fa3e853
FB
1390#if defined(DEBUG_TLB)
1391 printf("tlb_flush:\n");
1392#endif
0124311e
FB
1393 /* must reset current TB so that interrupts cannot modify the
1394 links while we are modifying them */
1395 env->current_tb = NULL;
1396
33417e70 1397 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1398 env->tlb_table[0][i].addr_read = -1;
1399 env->tlb_table[0][i].addr_write = -1;
1400 env->tlb_table[0][i].addr_code = -1;
1401 env->tlb_table[1][i].addr_read = -1;
1402 env->tlb_table[1][i].addr_write = -1;
1403 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1404#if (NB_MMU_MODES >= 3)
1405 env->tlb_table[2][i].addr_read = -1;
1406 env->tlb_table[2][i].addr_write = -1;
1407 env->tlb_table[2][i].addr_code = -1;
1408#if (NB_MMU_MODES == 4)
1409 env->tlb_table[3][i].addr_read = -1;
1410 env->tlb_table[3][i].addr_write = -1;
1411 env->tlb_table[3][i].addr_code = -1;
1412#endif
1413#endif
33417e70 1414 }
9fa3e853 1415
8a40a180 1416 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1417
1418#if !defined(CONFIG_SOFTMMU)
1419 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1420#endif
1421#ifdef USE_KQEMU
1422 if (env->kqemu_enabled) {
1423 kqemu_flush(env, flush_global);
1424 }
9fa3e853 1425#endif
e3db7226 1426 tlb_flush_count++;
33417e70
FB
1427}
1428
274da6b2 1429static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1430{
5fafdf24 1431 if (addr == (tlb_entry->addr_read &
84b7b8e7 1432 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1433 addr == (tlb_entry->addr_write &
84b7b8e7 1434 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1435 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1436 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1437 tlb_entry->addr_read = -1;
1438 tlb_entry->addr_write = -1;
1439 tlb_entry->addr_code = -1;
1440 }
61382a50
FB
1441}
1442
2e12669a 1443void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1444{
8a40a180 1445 int i;
0124311e 1446
9fa3e853 1447#if defined(DEBUG_TLB)
108c49b8 1448 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1449#endif
0124311e
FB
1450 /* must reset current TB so that interrupts cannot modify the
1451 links while we are modifying them */
1452 env->current_tb = NULL;
61382a50
FB
1453
1454 addr &= TARGET_PAGE_MASK;
1455 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1456 tlb_flush_entry(&env->tlb_table[0][i], addr);
1457 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1458#if (NB_MMU_MODES >= 3)
1459 tlb_flush_entry(&env->tlb_table[2][i], addr);
1460#if (NB_MMU_MODES == 4)
1461 tlb_flush_entry(&env->tlb_table[3][i], addr);
1462#endif
1463#endif
0124311e 1464
5c751e99 1465 tlb_flush_jmp_cache(env, addr);
9fa3e853 1466
0124311e 1467#if !defined(CONFIG_SOFTMMU)
9fa3e853 1468 if (addr < MMAP_AREA_END)
0124311e 1469 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1470#endif
0a962c02
FB
1471#ifdef USE_KQEMU
1472 if (env->kqemu_enabled) {
1473 kqemu_flush_page(env, addr);
1474 }
1475#endif
9fa3e853
FB
1476}
1477
9fa3e853
FB
1478/* update the TLBs so that writes to code in the virtual page 'addr'
1479 can be detected */
6a00d601 1480static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1481{
5fafdf24 1482 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1483 ram_addr + TARGET_PAGE_SIZE,
1484 CODE_DIRTY_FLAG);
9fa3e853
FB
1485}
1486
9fa3e853 1487/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1488 tested for self modifying code */
5fafdf24 1489static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1490 target_ulong vaddr)
9fa3e853 1491{
3a7d929e 1492 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1493}
1494
5fafdf24 1495static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1496 unsigned long start, unsigned long length)
1497{
1498 unsigned long addr;
84b7b8e7
FB
1499 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1500 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1501 if ((addr - start) < length) {
84b7b8e7 1502 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1503 }
1504 }
1505}
1506
3a7d929e 1507void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1508 int dirty_flags)
1ccde1cb
FB
1509{
1510 CPUState *env;
4f2ac237 1511 unsigned long length, start1;
0a962c02
FB
1512 int i, mask, len;
1513 uint8_t *p;
1ccde1cb
FB
1514
1515 start &= TARGET_PAGE_MASK;
1516 end = TARGET_PAGE_ALIGN(end);
1517
1518 length = end - start;
1519 if (length == 0)
1520 return;
0a962c02 1521 len = length >> TARGET_PAGE_BITS;
3a7d929e 1522#ifdef USE_KQEMU
6a00d601
FB
1523 /* XXX: should not depend on cpu context */
1524 env = first_cpu;
3a7d929e 1525 if (env->kqemu_enabled) {
f23db169
FB
1526 ram_addr_t addr;
1527 addr = start;
1528 for(i = 0; i < len; i++) {
1529 kqemu_set_notdirty(env, addr);
1530 addr += TARGET_PAGE_SIZE;
1531 }
3a7d929e
FB
1532 }
1533#endif
f23db169
FB
1534 mask = ~dirty_flags;
1535 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1536 for(i = 0; i < len; i++)
1537 p[i] &= mask;
1538
1ccde1cb
FB
1539 /* we modify the TLB cache so that the dirty bit will be set again
1540 when accessing the range */
59817ccb 1541 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1542 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1543 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1544 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1545 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1546 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1547#if (NB_MMU_MODES >= 3)
1548 for(i = 0; i < CPU_TLB_SIZE; i++)
1549 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1550#if (NB_MMU_MODES == 4)
1551 for(i = 0; i < CPU_TLB_SIZE; i++)
1552 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1553#endif
1554#endif
6a00d601 1555 }
59817ccb
FB
1556
1557#if !defined(CONFIG_SOFTMMU)
1558 /* XXX: this is expensive */
1559 {
1560 VirtPageDesc *p;
1561 int j;
1562 target_ulong addr;
1563
1564 for(i = 0; i < L1_SIZE; i++) {
1565 p = l1_virt_map[i];
1566 if (p) {
1567 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1568 for(j = 0; j < L2_SIZE; j++) {
1569 if (p->valid_tag == virt_valid_tag &&
1570 p->phys_addr >= start && p->phys_addr < end &&
1571 (p->prot & PROT_WRITE)) {
1572 if (addr < MMAP_AREA_END) {
5fafdf24 1573 mprotect((void *)addr, TARGET_PAGE_SIZE,
59817ccb
FB
1574 p->prot & ~PROT_WRITE);
1575 }
1576 }
1577 addr += TARGET_PAGE_SIZE;
1578 p++;
1579 }
1580 }
1581 }
1582 }
1583#endif
1ccde1cb
FB
1584}
1585
3a7d929e
FB
1586static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1587{
1588 ram_addr_t ram_addr;
1589
84b7b8e7 1590 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1591 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1592 tlb_entry->addend - (unsigned long)phys_ram_base;
1593 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1594 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1595 }
1596 }
1597}
1598
1599/* update the TLB according to the current state of the dirty bits */
1600void cpu_tlb_update_dirty(CPUState *env)
1601{
1602 int i;
1603 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1604 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1605 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1606 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1607#if (NB_MMU_MODES >= 3)
1608 for(i = 0; i < CPU_TLB_SIZE; i++)
1609 tlb_update_dirty(&env->tlb_table[2][i]);
1610#if (NB_MMU_MODES == 4)
1611 for(i = 0; i < CPU_TLB_SIZE; i++)
1612 tlb_update_dirty(&env->tlb_table[3][i]);
1613#endif
1614#endif
3a7d929e
FB
1615}
1616
5fafdf24 1617static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1618 unsigned long start)
1ccde1cb
FB
1619{
1620 unsigned long addr;
84b7b8e7
FB
1621 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1622 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1623 if (addr == start) {
84b7b8e7 1624 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1625 }
1626 }
1627}
1628
1629/* update the TLB corresponding to virtual page vaddr and phys addr
1630 addr so that it is no longer dirty */
6a00d601
FB
1631static inline void tlb_set_dirty(CPUState *env,
1632 unsigned long addr, target_ulong vaddr)
1ccde1cb 1633{
1ccde1cb
FB
1634 int i;
1635
1ccde1cb
FB
1636 addr &= TARGET_PAGE_MASK;
1637 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1638 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1639 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1640#if (NB_MMU_MODES >= 3)
1641 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1642#if (NB_MMU_MODES == 4)
1643 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1644#endif
1645#endif
9fa3e853
FB
1646}
1647
59817ccb
FB
1648/* add a new TLB entry. At most one entry for a given virtual address
1649 is permitted. Return 0 if OK or 2 if the page could not be mapped
1650 (can only happen in non SOFTMMU mode for I/O pages or pages
1651 conflicting with the host address space). */
5fafdf24
TS
1652int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1653 target_phys_addr_t paddr, int prot,
6ebbf390 1654 int mmu_idx, int is_softmmu)
9fa3e853 1655{
92e873b9 1656 PhysPageDesc *p;
4f2ac237 1657 unsigned long pd;
9fa3e853 1658 unsigned int index;
4f2ac237 1659 target_ulong address;
108c49b8 1660 target_phys_addr_t addend;
9fa3e853 1661 int ret;
84b7b8e7 1662 CPUTLBEntry *te;
6658ffb8 1663 int i;
9fa3e853 1664
92e873b9 1665 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1666 if (!p) {
1667 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1668 } else {
1669 pd = p->phys_offset;
9fa3e853
FB
1670 }
1671#if defined(DEBUG_TLB)
6ebbf390
JM
1672 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1673 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1674#endif
1675
1676 ret = 0;
1677#if !defined(CONFIG_SOFTMMU)
5fafdf24 1678 if (is_softmmu)
9fa3e853
FB
1679#endif
1680 {
2a4188a3 1681 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1682 /* IO memory case */
1683 address = vaddr | pd;
1684 addend = paddr;
1685 } else {
1686 /* standard memory */
1687 address = vaddr;
1688 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1689 }
6658ffb8
PB
1690
1691 /* Make accesses to pages with watchpoints go via the
1692 watchpoint trap routines. */
1693 for (i = 0; i < env->nb_watchpoints; i++) {
1694 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1695 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1696 env->watchpoint[i].addend = 0;
6658ffb8
PB
1697 address = vaddr | io_mem_watch;
1698 } else {
d79acba4
AZ
1699 env->watchpoint[i].addend = pd - paddr +
1700 (unsigned long) phys_ram_base;
6658ffb8
PB
1701 /* TODO: Figure out how to make read watchpoints coexist
1702 with code. */
1703 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1704 }
1705 }
1706 }
d79acba4 1707
90f18422 1708 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1709 addend -= vaddr;
6ebbf390 1710 te = &env->tlb_table[mmu_idx][index];
84b7b8e7 1711 te->addend = addend;
67b915a5 1712 if (prot & PAGE_READ) {
84b7b8e7
FB
1713 te->addr_read = address;
1714 } else {
1715 te->addr_read = -1;
1716 }
5c751e99
EI
1717
1718 if (te->addr_code != -1) {
1719 tlb_flush_jmp_cache(env, te->addr_code);
1720 }
84b7b8e7
FB
1721 if (prot & PAGE_EXEC) {
1722 te->addr_code = address;
9fa3e853 1723 } else {
84b7b8e7 1724 te->addr_code = -1;
9fa3e853 1725 }
67b915a5 1726 if (prot & PAGE_WRITE) {
5fafdf24 1727 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
856074ec
FB
1728 (pd & IO_MEM_ROMD)) {
1729 /* write access calls the I/O callback */
5fafdf24 1730 te->addr_write = vaddr |
856074ec 1731 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
5fafdf24 1732 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1733 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1734 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1735 } else {
84b7b8e7 1736 te->addr_write = address;
9fa3e853
FB
1737 }
1738 } else {
84b7b8e7 1739 te->addr_write = -1;
9fa3e853
FB
1740 }
1741 }
1742#if !defined(CONFIG_SOFTMMU)
1743 else {
1744 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1745 /* IO access: no mapping is done as it will be handled by the
1746 soft MMU */
1747 if (!(env->hflags & HF_SOFTMMU_MASK))
1748 ret = 2;
1749 } else {
1750 void *map_addr;
59817ccb
FB
1751
1752 if (vaddr >= MMAP_AREA_END) {
1753 ret = 2;
1754 } else {
1755 if (prot & PROT_WRITE) {
5fafdf24 1756 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1757#if defined(TARGET_HAS_SMC) || 1
59817ccb 1758 first_tb ||
d720b93d 1759#endif
5fafdf24 1760 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
59817ccb
FB
1761 !cpu_physical_memory_is_dirty(pd))) {
1762 /* ROM: we do as if code was inside */
1763 /* if code is present, we only map as read only and save the
1764 original mapping */
1765 VirtPageDesc *vp;
3b46e624 1766
90f18422 1767 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1768 vp->phys_addr = pd;
1769 vp->prot = prot;
1770 vp->valid_tag = virt_valid_tag;
1771 prot &= ~PAGE_WRITE;
1772 }
1773 }
5fafdf24 1774 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
59817ccb
FB
1775 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1776 if (map_addr == MAP_FAILED) {
1777 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1778 paddr, vaddr);
9fa3e853 1779 }
9fa3e853
FB
1780 }
1781 }
1782 }
1783#endif
1784 return ret;
1785}
1786
1787/* called from signal handler: invalidate the code and unprotect the
1788 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1789int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1790{
1791#if !defined(CONFIG_SOFTMMU)
1792 VirtPageDesc *vp;
1793
1794#if defined(DEBUG_TLB)
1795 printf("page_unprotect: addr=0x%08x\n", addr);
1796#endif
1797 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1798
1799 /* if it is not mapped, no need to worry here */
1800 if (addr >= MMAP_AREA_END)
1801 return 0;
9fa3e853
FB
1802 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1803 if (!vp)
1804 return 0;
1805 /* NOTE: in this case, validate_tag is _not_ tested as it
1806 validates only the code TLB */
1807 if (vp->valid_tag != virt_valid_tag)
1808 return 0;
1809 if (!(vp->prot & PAGE_WRITE))
1810 return 0;
1811#if defined(DEBUG_TLB)
5fafdf24 1812 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
9fa3e853
FB
1813 addr, vp->phys_addr, vp->prot);
1814#endif
59817ccb
FB
1815 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1816 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1817 (unsigned long)addr, vp->prot);
d720b93d 1818 /* set the dirty bit */
0a962c02 1819 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1820 /* flush the code inside */
1821 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1822 return 1;
1823#else
1824 return 0;
1825#endif
33417e70
FB
1826}
1827
0124311e
FB
1828#else
1829
ee8b7021 1830void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1831{
1832}
1833
2e12669a 1834void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1835{
1836}
1837
5fafdf24
TS
1838int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1839 target_phys_addr_t paddr, int prot,
6ebbf390 1840 int mmu_idx, int is_softmmu)
9fa3e853
FB
1841{
1842 return 0;
1843}
0124311e 1844
9fa3e853
FB
1845/* dump memory mappings */
1846void page_dump(FILE *f)
33417e70 1847{
9fa3e853
FB
1848 unsigned long start, end;
1849 int i, j, prot, prot1;
1850 PageDesc *p;
33417e70 1851
9fa3e853
FB
1852 fprintf(f, "%-8s %-8s %-8s %s\n",
1853 "start", "end", "size", "prot");
1854 start = -1;
1855 end = -1;
1856 prot = 0;
1857 for(i = 0; i <= L1_SIZE; i++) {
1858 if (i < L1_SIZE)
1859 p = l1_map[i];
1860 else
1861 p = NULL;
1862 for(j = 0;j < L2_SIZE; j++) {
1863 if (!p)
1864 prot1 = 0;
1865 else
1866 prot1 = p[j].flags;
1867 if (prot1 != prot) {
1868 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1869 if (start != -1) {
1870 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1871 start, end, end - start,
9fa3e853
FB
1872 prot & PAGE_READ ? 'r' : '-',
1873 prot & PAGE_WRITE ? 'w' : '-',
1874 prot & PAGE_EXEC ? 'x' : '-');
1875 }
1876 if (prot1 != 0)
1877 start = end;
1878 else
1879 start = -1;
1880 prot = prot1;
1881 }
1882 if (!p)
1883 break;
1884 }
33417e70 1885 }
33417e70
FB
1886}
1887
53a5960a 1888int page_get_flags(target_ulong address)
33417e70 1889{
9fa3e853
FB
1890 PageDesc *p;
1891
1892 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1893 if (!p)
9fa3e853
FB
1894 return 0;
1895 return p->flags;
1896}
1897
1898/* modify the flags of a page and invalidate the code if
1899 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1900 depending on PAGE_WRITE */
53a5960a 1901void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1902{
1903 PageDesc *p;
53a5960a 1904 target_ulong addr;
9fa3e853
FB
1905
1906 start = start & TARGET_PAGE_MASK;
1907 end = TARGET_PAGE_ALIGN(end);
1908 if (flags & PAGE_WRITE)
1909 flags |= PAGE_WRITE_ORG;
1910 spin_lock(&tb_lock);
1911 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1912 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1913 /* if the write protection is set, then we invalidate the code
1914 inside */
5fafdf24 1915 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
1916 (flags & PAGE_WRITE) &&
1917 p->first_tb) {
d720b93d 1918 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1919 }
1920 p->flags = flags;
1921 }
1922 spin_unlock(&tb_lock);
33417e70
FB
1923}
1924
3d97b40b
TS
1925int page_check_range(target_ulong start, target_ulong len, int flags)
1926{
1927 PageDesc *p;
1928 target_ulong end;
1929 target_ulong addr;
1930
1931 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1932 start = start & TARGET_PAGE_MASK;
1933
1934 if( end < start )
1935 /* we've wrapped around */
1936 return -1;
1937 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1938 p = page_find(addr >> TARGET_PAGE_BITS);
1939 if( !p )
1940 return -1;
1941 if( !(p->flags & PAGE_VALID) )
1942 return -1;
1943
dae3270c 1944 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 1945 return -1;
dae3270c
FB
1946 if (flags & PAGE_WRITE) {
1947 if (!(p->flags & PAGE_WRITE_ORG))
1948 return -1;
1949 /* unprotect the page if it was put read-only because it
1950 contains translated code */
1951 if (!(p->flags & PAGE_WRITE)) {
1952 if (!page_unprotect(addr, 0, NULL))
1953 return -1;
1954 }
1955 return 0;
1956 }
3d97b40b
TS
1957 }
1958 return 0;
1959}
1960
9fa3e853
FB
1961/* called from signal handler: invalidate the code and unprotect the
1962 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1963int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1964{
1965 unsigned int page_index, prot, pindex;
1966 PageDesc *p, *p1;
53a5960a 1967 target_ulong host_start, host_end, addr;
9fa3e853 1968
83fb7adf 1969 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1970 page_index = host_start >> TARGET_PAGE_BITS;
1971 p1 = page_find(page_index);
1972 if (!p1)
1973 return 0;
83fb7adf 1974 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1975 p = p1;
1976 prot = 0;
1977 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1978 prot |= p->flags;
1979 p++;
1980 }
1981 /* if the page was really writable, then we change its
1982 protection back to writable */
1983 if (prot & PAGE_WRITE_ORG) {
1984 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1985 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 1986 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1987 (prot & PAGE_BITS) | PAGE_WRITE);
1988 p1[pindex].flags |= PAGE_WRITE;
1989 /* and since the content will be modified, we must invalidate
1990 the corresponding translated code. */
d720b93d 1991 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1992#ifdef DEBUG_TB_CHECK
1993 tb_invalidate_check(address);
1994#endif
1995 return 1;
1996 }
1997 }
1998 return 0;
1999}
2000
6a00d601
FB
2001static inline void tlb_set_dirty(CPUState *env,
2002 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2003{
2004}
9fa3e853
FB
2005#endif /* defined(CONFIG_USER_ONLY) */
2006
db7b5426 2007static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2008 ram_addr_t memory);
2009static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2010 ram_addr_t orig_memory);
db7b5426
BS
2011#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2012 need_subpage) \
2013 do { \
2014 if (addr > start_addr) \
2015 start_addr2 = 0; \
2016 else { \
2017 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2018 if (start_addr2 > 0) \
2019 need_subpage = 1; \
2020 } \
2021 \
49e9fba2 2022 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2023 end_addr2 = TARGET_PAGE_SIZE - 1; \
2024 else { \
2025 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2026 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2027 need_subpage = 1; \
2028 } \
2029 } while (0)
2030
33417e70
FB
2031/* register physical memory. 'size' must be a multiple of the target
2032 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2033 io memory page */
5fafdf24 2034void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2035 ram_addr_t size,
2036 ram_addr_t phys_offset)
33417e70 2037{
108c49b8 2038 target_phys_addr_t addr, end_addr;
92e873b9 2039 PhysPageDesc *p;
9d42037b 2040 CPUState *env;
00f82b8a 2041 ram_addr_t orig_size = size;
db7b5426 2042 void *subpage;
33417e70 2043
5fd386f6 2044 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2045 end_addr = start_addr + (target_phys_addr_t)size;
2046 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2047 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2048 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2049 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2050 target_phys_addr_t start_addr2, end_addr2;
2051 int need_subpage = 0;
2052
2053 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2054 need_subpage);
4254fab8 2055 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2056 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2057 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2058 &p->phys_offset, orig_memory);
2059 } else {
2060 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2061 >> IO_MEM_SHIFT];
2062 }
2063 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2064 } else {
2065 p->phys_offset = phys_offset;
2066 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2067 (phys_offset & IO_MEM_ROMD))
2068 phys_offset += TARGET_PAGE_SIZE;
2069 }
2070 } else {
2071 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2072 p->phys_offset = phys_offset;
2073 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2074 (phys_offset & IO_MEM_ROMD))
2075 phys_offset += TARGET_PAGE_SIZE;
2076 else {
2077 target_phys_addr_t start_addr2, end_addr2;
2078 int need_subpage = 0;
2079
2080 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2081 end_addr2, need_subpage);
2082
4254fab8 2083 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2084 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2085 &p->phys_offset, IO_MEM_UNASSIGNED);
2086 subpage_register(subpage, start_addr2, end_addr2,
2087 phys_offset);
2088 }
2089 }
2090 }
33417e70 2091 }
3b46e624 2092
9d42037b
FB
2093 /* since each CPU stores ram addresses in its TLB cache, we must
2094 reset the modified entries */
2095 /* XXX: slow ! */
2096 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2097 tlb_flush(env, 1);
2098 }
33417e70
FB
2099}
2100
ba863458 2101/* XXX: temporary until new memory mapping API */
00f82b8a 2102ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2103{
2104 PhysPageDesc *p;
2105
2106 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2107 if (!p)
2108 return IO_MEM_UNASSIGNED;
2109 return p->phys_offset;
2110}
2111
e9a1ab19 2112/* XXX: better than nothing */
00f82b8a 2113ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2114{
2115 ram_addr_t addr;
7fb4fdcf 2116 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
00f82b8a 2117 fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %ld)\n",
03875444 2118 size, phys_ram_size);
e9a1ab19
FB
2119 abort();
2120 }
2121 addr = phys_ram_alloc_offset;
2122 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2123 return addr;
2124}
2125
2126void qemu_ram_free(ram_addr_t addr)
2127{
2128}
2129
a4193c8a 2130static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2131{
67d3b957 2132#ifdef DEBUG_UNASSIGNED
ab3d1727 2133 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2134#endif
2135#ifdef TARGET_SPARC
6c36d3fa 2136 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2137#elif TARGET_CRIS
2138 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2139#endif
33417e70
FB
2140 return 0;
2141}
2142
a4193c8a 2143static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2144{
67d3b957 2145#ifdef DEBUG_UNASSIGNED
ab3d1727 2146 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2147#endif
b4f0a316 2148#ifdef TARGET_SPARC
6c36d3fa 2149 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2150#elif TARGET_CRIS
2151 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2152#endif
33417e70
FB
2153}
2154
2155static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2156 unassigned_mem_readb,
2157 unassigned_mem_readb,
2158 unassigned_mem_readb,
2159};
2160
2161static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2162 unassigned_mem_writeb,
2163 unassigned_mem_writeb,
2164 unassigned_mem_writeb,
2165};
2166
3a7d929e 2167static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2168{
3a7d929e
FB
2169 unsigned long ram_addr;
2170 int dirty_flags;
2171 ram_addr = addr - (unsigned long)phys_ram_base;
2172 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2173 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2174#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2175 tb_invalidate_phys_page_fast(ram_addr, 1);
2176 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2177#endif
3a7d929e 2178 }
c27004ec 2179 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2180#ifdef USE_KQEMU
2181 if (cpu_single_env->kqemu_enabled &&
2182 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2183 kqemu_modify_page(cpu_single_env, ram_addr);
2184#endif
f23db169
FB
2185 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2186 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2187 /* we remove the notdirty callback only if the code has been
2188 flushed */
2189 if (dirty_flags == 0xff)
6a00d601 2190 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2191}
2192
3a7d929e 2193static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2194{
3a7d929e
FB
2195 unsigned long ram_addr;
2196 int dirty_flags;
2197 ram_addr = addr - (unsigned long)phys_ram_base;
2198 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2199 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2200#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2201 tb_invalidate_phys_page_fast(ram_addr, 2);
2202 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2203#endif
3a7d929e 2204 }
c27004ec 2205 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2206#ifdef USE_KQEMU
2207 if (cpu_single_env->kqemu_enabled &&
2208 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2209 kqemu_modify_page(cpu_single_env, ram_addr);
2210#endif
f23db169
FB
2211 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2212 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2213 /* we remove the notdirty callback only if the code has been
2214 flushed */
2215 if (dirty_flags == 0xff)
6a00d601 2216 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2217}
2218
3a7d929e 2219static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2220{
3a7d929e
FB
2221 unsigned long ram_addr;
2222 int dirty_flags;
2223 ram_addr = addr - (unsigned long)phys_ram_base;
2224 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2225 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2226#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2227 tb_invalidate_phys_page_fast(ram_addr, 4);
2228 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2229#endif
3a7d929e 2230 }
c27004ec 2231 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2232#ifdef USE_KQEMU
2233 if (cpu_single_env->kqemu_enabled &&
2234 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2235 kqemu_modify_page(cpu_single_env, ram_addr);
2236#endif
f23db169
FB
2237 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2238 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2239 /* we remove the notdirty callback only if the code has been
2240 flushed */
2241 if (dirty_flags == 0xff)
6a00d601 2242 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2243}
2244
3a7d929e 2245static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2246 NULL, /* never used */
2247 NULL, /* never used */
2248 NULL, /* never used */
2249};
2250
1ccde1cb
FB
2251static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2252 notdirty_mem_writeb,
2253 notdirty_mem_writew,
2254 notdirty_mem_writel,
2255};
2256
6658ffb8
PB
2257#if defined(CONFIG_SOFTMMU)
2258/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2259 so these check for a hit then pass through to the normal out-of-line
2260 phys routines. */
2261static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2262{
2263 return ldub_phys(addr);
2264}
2265
2266static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2267{
2268 return lduw_phys(addr);
2269}
2270
2271static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2272{
2273 return ldl_phys(addr);
2274}
2275
2276/* Generate a debug exception if a watchpoint has been hit.
2277 Returns the real physical address of the access. addr will be a host
d79acba4 2278 address in case of a RAM location. */
6658ffb8
PB
2279static target_ulong check_watchpoint(target_phys_addr_t addr)
2280{
2281 CPUState *env = cpu_single_env;
2282 target_ulong watch;
2283 target_ulong retaddr;
2284 int i;
2285
2286 retaddr = addr;
2287 for (i = 0; i < env->nb_watchpoints; i++) {
2288 watch = env->watchpoint[i].vaddr;
2289 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2290 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2291 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2292 cpu_single_env->watchpoint_hit = i + 1;
2293 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2294 break;
2295 }
2296 }
2297 }
2298 return retaddr;
2299}
2300
2301static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2302 uint32_t val)
2303{
2304 addr = check_watchpoint(addr);
2305 stb_phys(addr, val);
2306}
2307
2308static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2309 uint32_t val)
2310{
2311 addr = check_watchpoint(addr);
2312 stw_phys(addr, val);
2313}
2314
2315static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2316 uint32_t val)
2317{
2318 addr = check_watchpoint(addr);
2319 stl_phys(addr, val);
2320}
2321
2322static CPUReadMemoryFunc *watch_mem_read[3] = {
2323 watch_mem_readb,
2324 watch_mem_readw,
2325 watch_mem_readl,
2326};
2327
2328static CPUWriteMemoryFunc *watch_mem_write[3] = {
2329 watch_mem_writeb,
2330 watch_mem_writew,
2331 watch_mem_writel,
2332};
2333#endif
2334
db7b5426
BS
2335static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2336 unsigned int len)
2337{
db7b5426
BS
2338 uint32_t ret;
2339 unsigned int idx;
2340
2341 idx = SUBPAGE_IDX(addr - mmio->base);
2342#if defined(DEBUG_SUBPAGE)
2343 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2344 mmio, len, addr, idx);
2345#endif
3ee89922 2346 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2347
2348 return ret;
2349}
2350
2351static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2352 uint32_t value, unsigned int len)
2353{
db7b5426
BS
2354 unsigned int idx;
2355
2356 idx = SUBPAGE_IDX(addr - mmio->base);
2357#if defined(DEBUG_SUBPAGE)
2358 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2359 mmio, len, addr, idx, value);
2360#endif
3ee89922 2361 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2362}
2363
2364static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2365{
2366#if defined(DEBUG_SUBPAGE)
2367 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2368#endif
2369
2370 return subpage_readlen(opaque, addr, 0);
2371}
2372
2373static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2374 uint32_t value)
2375{
2376#if defined(DEBUG_SUBPAGE)
2377 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2378#endif
2379 subpage_writelen(opaque, addr, value, 0);
2380}
2381
2382static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2383{
2384#if defined(DEBUG_SUBPAGE)
2385 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2386#endif
2387
2388 return subpage_readlen(opaque, addr, 1);
2389}
2390
2391static void subpage_writew (void *opaque, target_phys_addr_t addr,
2392 uint32_t value)
2393{
2394#if defined(DEBUG_SUBPAGE)
2395 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2396#endif
2397 subpage_writelen(opaque, addr, value, 1);
2398}
2399
2400static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2401{
2402#if defined(DEBUG_SUBPAGE)
2403 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2404#endif
2405
2406 return subpage_readlen(opaque, addr, 2);
2407}
2408
2409static void subpage_writel (void *opaque,
2410 target_phys_addr_t addr, uint32_t value)
2411{
2412#if defined(DEBUG_SUBPAGE)
2413 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2414#endif
2415 subpage_writelen(opaque, addr, value, 2);
2416}
2417
2418static CPUReadMemoryFunc *subpage_read[] = {
2419 &subpage_readb,
2420 &subpage_readw,
2421 &subpage_readl,
2422};
2423
2424static CPUWriteMemoryFunc *subpage_write[] = {
2425 &subpage_writeb,
2426 &subpage_writew,
2427 &subpage_writel,
2428};
2429
2430static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2431 ram_addr_t memory)
db7b5426
BS
2432{
2433 int idx, eidx;
4254fab8 2434 unsigned int i;
db7b5426
BS
2435
2436 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2437 return -1;
2438 idx = SUBPAGE_IDX(start);
2439 eidx = SUBPAGE_IDX(end);
2440#if defined(DEBUG_SUBPAGE)
2441 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2442 mmio, start, end, idx, eidx, memory);
2443#endif
2444 memory >>= IO_MEM_SHIFT;
2445 for (; idx <= eidx; idx++) {
4254fab8 2446 for (i = 0; i < 4; i++) {
3ee89922
BS
2447 if (io_mem_read[memory][i]) {
2448 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2449 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2450 }
2451 if (io_mem_write[memory][i]) {
2452 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2453 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2454 }
4254fab8 2455 }
db7b5426
BS
2456 }
2457
2458 return 0;
2459}
2460
00f82b8a
AJ
2461static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2462 ram_addr_t orig_memory)
db7b5426
BS
2463{
2464 subpage_t *mmio;
2465 int subpage_memory;
2466
2467 mmio = qemu_mallocz(sizeof(subpage_t));
2468 if (mmio != NULL) {
2469 mmio->base = base;
2470 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2471#if defined(DEBUG_SUBPAGE)
2472 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2473 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2474#endif
2475 *phys = subpage_memory | IO_MEM_SUBPAGE;
2476 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2477 }
2478
2479 return mmio;
2480}
2481
33417e70
FB
2482static void io_mem_init(void)
2483{
3a7d929e 2484 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2485 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2486 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2487 io_mem_nb = 5;
2488
6658ffb8
PB
2489#if defined(CONFIG_SOFTMMU)
2490 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2491 watch_mem_write, NULL);
2492#endif
1ccde1cb 2493 /* alloc dirty bits array */
0a962c02 2494 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2495 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2496}
2497
2498/* mem_read and mem_write are arrays of functions containing the
2499 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2500 2). Functions can be omitted with a NULL function pointer. The
2501 registered functions may be modified dynamically later.
2502 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2503 modified. If it is zero, a new io zone is allocated. The return
2504 value can be used with cpu_register_physical_memory(). (-1) is
2505 returned if error. */
33417e70
FB
2506int cpu_register_io_memory(int io_index,
2507 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2508 CPUWriteMemoryFunc **mem_write,
2509 void *opaque)
33417e70 2510{
4254fab8 2511 int i, subwidth = 0;
33417e70
FB
2512
2513 if (io_index <= 0) {
b5ff1b31 2514 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2515 return -1;
2516 io_index = io_mem_nb++;
2517 } else {
2518 if (io_index >= IO_MEM_NB_ENTRIES)
2519 return -1;
2520 }
b5ff1b31 2521
33417e70 2522 for(i = 0;i < 3; i++) {
4254fab8
BS
2523 if (!mem_read[i] || !mem_write[i])
2524 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2525 io_mem_read[io_index][i] = mem_read[i];
2526 io_mem_write[io_index][i] = mem_write[i];
2527 }
a4193c8a 2528 io_mem_opaque[io_index] = opaque;
4254fab8 2529 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2530}
61382a50 2531
8926b517
FB
2532CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2533{
2534 return io_mem_write[io_index >> IO_MEM_SHIFT];
2535}
2536
2537CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2538{
2539 return io_mem_read[io_index >> IO_MEM_SHIFT];
2540}
2541
13eb76e0
FB
2542/* physical memory access (slow version, mainly for debug) */
2543#if defined(CONFIG_USER_ONLY)
5fafdf24 2544void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2545 int len, int is_write)
2546{
2547 int l, flags;
2548 target_ulong page;
53a5960a 2549 void * p;
13eb76e0
FB
2550
2551 while (len > 0) {
2552 page = addr & TARGET_PAGE_MASK;
2553 l = (page + TARGET_PAGE_SIZE) - addr;
2554 if (l > len)
2555 l = len;
2556 flags = page_get_flags(page);
2557 if (!(flags & PAGE_VALID))
2558 return;
2559 if (is_write) {
2560 if (!(flags & PAGE_WRITE))
2561 return;
579a97f7 2562 /* XXX: this code should not depend on lock_user */
72fb7daa 2563 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2564 /* FIXME - should this return an error rather than just fail? */
2565 return;
72fb7daa
AJ
2566 memcpy(p, buf, l);
2567 unlock_user(p, addr, l);
13eb76e0
FB
2568 } else {
2569 if (!(flags & PAGE_READ))
2570 return;
579a97f7 2571 /* XXX: this code should not depend on lock_user */
72fb7daa 2572 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2573 /* FIXME - should this return an error rather than just fail? */
2574 return;
72fb7daa 2575 memcpy(buf, p, l);
5b257578 2576 unlock_user(p, addr, 0);
13eb76e0
FB
2577 }
2578 len -= l;
2579 buf += l;
2580 addr += l;
2581 }
2582}
8df1cd07 2583
13eb76e0 2584#else
5fafdf24 2585void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2586 int len, int is_write)
2587{
2588 int l, io_index;
2589 uint8_t *ptr;
2590 uint32_t val;
2e12669a
FB
2591 target_phys_addr_t page;
2592 unsigned long pd;
92e873b9 2593 PhysPageDesc *p;
3b46e624 2594
13eb76e0
FB
2595 while (len > 0) {
2596 page = addr & TARGET_PAGE_MASK;
2597 l = (page + TARGET_PAGE_SIZE) - addr;
2598 if (l > len)
2599 l = len;
92e873b9 2600 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2601 if (!p) {
2602 pd = IO_MEM_UNASSIGNED;
2603 } else {
2604 pd = p->phys_offset;
2605 }
3b46e624 2606
13eb76e0 2607 if (is_write) {
3a7d929e 2608 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2609 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2610 /* XXX: could force cpu_single_env to NULL to avoid
2611 potential bugs */
13eb76e0 2612 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2613 /* 32 bit write access */
c27004ec 2614 val = ldl_p(buf);
a4193c8a 2615 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2616 l = 4;
2617 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2618 /* 16 bit write access */
c27004ec 2619 val = lduw_p(buf);
a4193c8a 2620 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2621 l = 2;
2622 } else {
1c213d19 2623 /* 8 bit write access */
c27004ec 2624 val = ldub_p(buf);
a4193c8a 2625 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2626 l = 1;
2627 }
2628 } else {
b448f2f3
FB
2629 unsigned long addr1;
2630 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2631 /* RAM case */
b448f2f3 2632 ptr = phys_ram_base + addr1;
13eb76e0 2633 memcpy(ptr, buf, l);
3a7d929e
FB
2634 if (!cpu_physical_memory_is_dirty(addr1)) {
2635 /* invalidate code */
2636 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2637 /* set dirty bit */
5fafdf24 2638 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2639 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2640 }
13eb76e0
FB
2641 }
2642 } else {
5fafdf24 2643 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2644 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2645 /* I/O case */
2646 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2647 if (l >= 4 && ((addr & 3) == 0)) {
2648 /* 32 bit read access */
a4193c8a 2649 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2650 stl_p(buf, val);
13eb76e0
FB
2651 l = 4;
2652 } else if (l >= 2 && ((addr & 1) == 0)) {
2653 /* 16 bit read access */
a4193c8a 2654 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2655 stw_p(buf, val);
13eb76e0
FB
2656 l = 2;
2657 } else {
1c213d19 2658 /* 8 bit read access */
a4193c8a 2659 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2660 stb_p(buf, val);
13eb76e0
FB
2661 l = 1;
2662 }
2663 } else {
2664 /* RAM case */
5fafdf24 2665 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2666 (addr & ~TARGET_PAGE_MASK);
2667 memcpy(buf, ptr, l);
2668 }
2669 }
2670 len -= l;
2671 buf += l;
2672 addr += l;
2673 }
2674}
8df1cd07 2675
d0ecd2aa 2676/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2677void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2678 const uint8_t *buf, int len)
2679{
2680 int l;
2681 uint8_t *ptr;
2682 target_phys_addr_t page;
2683 unsigned long pd;
2684 PhysPageDesc *p;
3b46e624 2685
d0ecd2aa
FB
2686 while (len > 0) {
2687 page = addr & TARGET_PAGE_MASK;
2688 l = (page + TARGET_PAGE_SIZE) - addr;
2689 if (l > len)
2690 l = len;
2691 p = phys_page_find(page >> TARGET_PAGE_BITS);
2692 if (!p) {
2693 pd = IO_MEM_UNASSIGNED;
2694 } else {
2695 pd = p->phys_offset;
2696 }
3b46e624 2697
d0ecd2aa 2698 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2699 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2700 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2701 /* do nothing */
2702 } else {
2703 unsigned long addr1;
2704 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2705 /* ROM/RAM case */
2706 ptr = phys_ram_base + addr1;
2707 memcpy(ptr, buf, l);
2708 }
2709 len -= l;
2710 buf += l;
2711 addr += l;
2712 }
2713}
2714
2715
8df1cd07
FB
2716/* warning: addr must be aligned */
2717uint32_t ldl_phys(target_phys_addr_t addr)
2718{
2719 int io_index;
2720 uint8_t *ptr;
2721 uint32_t val;
2722 unsigned long pd;
2723 PhysPageDesc *p;
2724
2725 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2726 if (!p) {
2727 pd = IO_MEM_UNASSIGNED;
2728 } else {
2729 pd = p->phys_offset;
2730 }
3b46e624 2731
5fafdf24 2732 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2733 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2734 /* I/O case */
2735 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2736 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2737 } else {
2738 /* RAM case */
5fafdf24 2739 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2740 (addr & ~TARGET_PAGE_MASK);
2741 val = ldl_p(ptr);
2742 }
2743 return val;
2744}
2745
84b7b8e7
FB
2746/* warning: addr must be aligned */
2747uint64_t ldq_phys(target_phys_addr_t addr)
2748{
2749 int io_index;
2750 uint8_t *ptr;
2751 uint64_t val;
2752 unsigned long pd;
2753 PhysPageDesc *p;
2754
2755 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2756 if (!p) {
2757 pd = IO_MEM_UNASSIGNED;
2758 } else {
2759 pd = p->phys_offset;
2760 }
3b46e624 2761
2a4188a3
FB
2762 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2763 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2764 /* I/O case */
2765 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2766#ifdef TARGET_WORDS_BIGENDIAN
2767 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2768 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2769#else
2770 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2771 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2772#endif
2773 } else {
2774 /* RAM case */
5fafdf24 2775 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2776 (addr & ~TARGET_PAGE_MASK);
2777 val = ldq_p(ptr);
2778 }
2779 return val;
2780}
2781
aab33094
FB
2782/* XXX: optimize */
2783uint32_t ldub_phys(target_phys_addr_t addr)
2784{
2785 uint8_t val;
2786 cpu_physical_memory_read(addr, &val, 1);
2787 return val;
2788}
2789
2790/* XXX: optimize */
2791uint32_t lduw_phys(target_phys_addr_t addr)
2792{
2793 uint16_t val;
2794 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2795 return tswap16(val);
2796}
2797
8df1cd07
FB
2798/* warning: addr must be aligned. The ram page is not masked as dirty
2799 and the code inside is not invalidated. It is useful if the dirty
2800 bits are used to track modified PTEs */
2801void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2802{
2803 int io_index;
2804 uint8_t *ptr;
2805 unsigned long pd;
2806 PhysPageDesc *p;
2807
2808 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2809 if (!p) {
2810 pd = IO_MEM_UNASSIGNED;
2811 } else {
2812 pd = p->phys_offset;
2813 }
3b46e624 2814
3a7d929e 2815 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2816 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2817 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2818 } else {
5fafdf24 2819 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2820 (addr & ~TARGET_PAGE_MASK);
2821 stl_p(ptr, val);
2822 }
2823}
2824
bc98a7ef
JM
2825void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2826{
2827 int io_index;
2828 uint8_t *ptr;
2829 unsigned long pd;
2830 PhysPageDesc *p;
2831
2832 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2833 if (!p) {
2834 pd = IO_MEM_UNASSIGNED;
2835 } else {
2836 pd = p->phys_offset;
2837 }
3b46e624 2838
bc98a7ef
JM
2839 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2840 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2841#ifdef TARGET_WORDS_BIGENDIAN
2842 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2843 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2844#else
2845 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2846 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2847#endif
2848 } else {
5fafdf24 2849 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2850 (addr & ~TARGET_PAGE_MASK);
2851 stq_p(ptr, val);
2852 }
2853}
2854
8df1cd07 2855/* warning: addr must be aligned */
8df1cd07
FB
2856void stl_phys(target_phys_addr_t addr, uint32_t val)
2857{
2858 int io_index;
2859 uint8_t *ptr;
2860 unsigned long pd;
2861 PhysPageDesc *p;
2862
2863 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2864 if (!p) {
2865 pd = IO_MEM_UNASSIGNED;
2866 } else {
2867 pd = p->phys_offset;
2868 }
3b46e624 2869
3a7d929e 2870 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2871 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2872 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2873 } else {
2874 unsigned long addr1;
2875 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2876 /* RAM case */
2877 ptr = phys_ram_base + addr1;
2878 stl_p(ptr, val);
3a7d929e
FB
2879 if (!cpu_physical_memory_is_dirty(addr1)) {
2880 /* invalidate code */
2881 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2882 /* set dirty bit */
f23db169
FB
2883 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2884 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2885 }
8df1cd07
FB
2886 }
2887}
2888
aab33094
FB
2889/* XXX: optimize */
2890void stb_phys(target_phys_addr_t addr, uint32_t val)
2891{
2892 uint8_t v = val;
2893 cpu_physical_memory_write(addr, &v, 1);
2894}
2895
2896/* XXX: optimize */
2897void stw_phys(target_phys_addr_t addr, uint32_t val)
2898{
2899 uint16_t v = tswap16(val);
2900 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2901}
2902
2903/* XXX: optimize */
2904void stq_phys(target_phys_addr_t addr, uint64_t val)
2905{
2906 val = tswap64(val);
2907 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2908}
2909
13eb76e0
FB
2910#endif
2911
2912/* virtual memory access for debug */
5fafdf24 2913int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 2914 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2915{
2916 int l;
9b3c35e0
JM
2917 target_phys_addr_t phys_addr;
2918 target_ulong page;
13eb76e0
FB
2919
2920 while (len > 0) {
2921 page = addr & TARGET_PAGE_MASK;
2922 phys_addr = cpu_get_phys_page_debug(env, page);
2923 /* if no physical page mapped, return an error */
2924 if (phys_addr == -1)
2925 return -1;
2926 l = (page + TARGET_PAGE_SIZE) - addr;
2927 if (l > len)
2928 l = len;
5fafdf24 2929 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 2930 buf, l, is_write);
13eb76e0
FB
2931 len -= l;
2932 buf += l;
2933 addr += l;
2934 }
2935 return 0;
2936}
2937
e3db7226
FB
2938void dump_exec_info(FILE *f,
2939 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2940{
2941 int i, target_code_size, max_target_code_size;
2942 int direct_jmp_count, direct_jmp2_count, cross_page;
2943 TranslationBlock *tb;
3b46e624 2944
e3db7226
FB
2945 target_code_size = 0;
2946 max_target_code_size = 0;
2947 cross_page = 0;
2948 direct_jmp_count = 0;
2949 direct_jmp2_count = 0;
2950 for(i = 0; i < nb_tbs; i++) {
2951 tb = &tbs[i];
2952 target_code_size += tb->size;
2953 if (tb->size > max_target_code_size)
2954 max_target_code_size = tb->size;
2955 if (tb->page_addr[1] != -1)
2956 cross_page++;
2957 if (tb->tb_next_offset[0] != 0xffff) {
2958 direct_jmp_count++;
2959 if (tb->tb_next_offset[1] != 0xffff) {
2960 direct_jmp2_count++;
2961 }
2962 }
2963 }
2964 /* XXX: avoid using doubles ? */
57fec1fe 2965 cpu_fprintf(f, "Translation buffer state:\n");
e3db7226 2966 cpu_fprintf(f, "TB count %d\n", nb_tbs);
5fafdf24 2967 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
2968 nb_tbs ? target_code_size / nb_tbs : 0,
2969 max_target_code_size);
5fafdf24 2970 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
2971 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2972 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
2973 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2974 cross_page,
e3db7226
FB
2975 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2976 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 2977 direct_jmp_count,
e3db7226
FB
2978 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2979 direct_jmp2_count,
2980 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 2981 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
2982 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2983 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2984 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
57fec1fe
FB
2985#ifdef CONFIG_PROFILER
2986 {
2987 int64_t tot;
2988 tot = dyngen_interm_time + dyngen_code_time;
2989 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2990 tot, tot / 2.4e9);
2991 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2992 dyngen_tb_count,
2993 dyngen_tb_count1 - dyngen_tb_count,
2994 dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
2995 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2996 dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
2997 cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
2998 dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
2999 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
3000 dyngen_tb_count ?
3001 (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
3002 cpu_fprintf(f, "cycles/op %0.1f\n",
3003 dyngen_op_count ? (double)tot / dyngen_op_count : 0);
3004 cpu_fprintf(f, "cycles/in byte %0.1f\n",
3005 dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
3006 cpu_fprintf(f, "cycles/out byte %0.1f\n",
3007 dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
3008 if (tot == 0)
3009 tot = 1;
3010 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
3011 (double)dyngen_interm_time / tot * 100.0);
3012 cpu_fprintf(f, " gen_code time %0.1f%%\n",
3013 (double)dyngen_code_time / tot * 100.0);
3014 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
3015 dyngen_restore_count);
3016 cpu_fprintf(f, " avg cycles %0.1f\n",
3017 dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
3018 {
3019 extern void dump_op_count(void);
3020 dump_op_count();
3021 }
3022 }
3023#endif
e3db7226
FB
3024}
3025
5fafdf24 3026#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3027
3028#define MMUSUFFIX _cmmu
3029#define GETPC() NULL
3030#define env cpu_single_env
b769d8fe 3031#define SOFTMMU_CODE_ACCESS
61382a50
FB
3032
3033#define SHIFT 0
3034#include "softmmu_template.h"
3035
3036#define SHIFT 1
3037#include "softmmu_template.h"
3038
3039#define SHIFT 2
3040#include "softmmu_template.h"
3041
3042#define SHIFT 3
3043#include "softmmu_template.h"
3044
3045#undef env
3046
3047#endif