]> git.proxmox.com Git - qemu.git/blame - exec.c
Define stack offsets
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
42#endif
54936004 43
fd6ce8f6 44//#define DEBUG_TB_INVALIDATE
66e85a21 45//#define DEBUG_FLUSH
9fa3e853 46//#define DEBUG_TLB
67d3b957 47//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
48
49/* make various TB consistency checks */
5fafdf24
TS
50//#define DEBUG_TB_CHECK
51//#define DEBUG_TLB_CHECK
fd6ce8f6 52
1196be37 53//#define DEBUG_IOPORT
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
99773bd4
PB
56#if !defined(CONFIG_USER_ONLY)
57/* TB consistency checks only implemented for usermode emulation. */
58#undef DEBUG_TB_CHECK
59#endif
60
fd6ce8f6 61/* threshold to flush the translated code buffer */
d07bde88 62#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
fd6ce8f6 63
9fa3e853
FB
64#define SMC_BITMAP_USE_THRESHOLD 10
65
66#define MMAP_AREA_START 0x00000000
67#define MMAP_AREA_END 0xa8000000
fd6ce8f6 68
108c49b8
FB
69#if defined(TARGET_SPARC64)
70#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
71#elif defined(TARGET_SPARC)
72#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
73#elif defined(TARGET_ALPHA)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
76#elif defined(TARGET_PPC64)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
78#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 42
80#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
82#else
83/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84#define TARGET_PHYS_ADDR_SPACE_BITS 32
85#endif
86
fd6ce8f6 87TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 88TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 89int nb_tbs;
eb51d102
FB
90/* any access to the tbs or the page table must use this lock */
91spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 92
7cb69cae 93uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
b8076a74 94uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
95uint8_t *code_gen_ptr;
96
00f82b8a 97ram_addr_t phys_ram_size;
9fa3e853
FB
98int phys_ram_fd;
99uint8_t *phys_ram_base;
1ccde1cb 100uint8_t *phys_ram_dirty;
e9a1ab19 101static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 102
6a00d601
FB
103CPUState *first_cpu;
104/* current CPU in the current thread. It is only valid inside
105 cpu_exec() */
5fafdf24 106CPUState *cpu_single_env;
6a00d601 107
54936004 108typedef struct PageDesc {
92e873b9 109 /* list of TBs intersecting this ram page */
fd6ce8f6 110 TranslationBlock *first_tb;
9fa3e853
FB
111 /* in order to optimize self modifying code, we count the number
112 of lookups we do to a given page to use a bitmap */
113 unsigned int code_write_count;
114 uint8_t *code_bitmap;
115#if defined(CONFIG_USER_ONLY)
116 unsigned long flags;
117#endif
54936004
FB
118} PageDesc;
119
92e873b9
FB
120typedef struct PhysPageDesc {
121 /* offset in host memory of the page + io_index in the low 12 bits */
00f82b8a 122 ram_addr_t phys_offset;
92e873b9
FB
123} PhysPageDesc;
124
54936004 125#define L2_BITS 10
bedb69ea
JM
126#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
127/* XXX: this is a temporary hack for alpha target.
128 * In the future, this is to be replaced by a multi-level table
129 * to actually be able to handle the complete 64 bits address space.
130 */
131#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
132#else
03875444 133#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 134#endif
54936004
FB
135
136#define L1_SIZE (1 << L1_BITS)
137#define L2_SIZE (1 << L2_BITS)
138
33417e70 139static void io_mem_init(void);
fd6ce8f6 140
83fb7adf
FB
141unsigned long qemu_real_host_page_size;
142unsigned long qemu_host_page_bits;
143unsigned long qemu_host_page_size;
144unsigned long qemu_host_page_mask;
54936004 145
92e873b9 146/* XXX: for system emulation, it could just be an array */
54936004 147static PageDesc *l1_map[L1_SIZE];
0a962c02 148PhysPageDesc **l1_phys_map;
54936004 149
33417e70 150/* io memory support */
33417e70
FB
151CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
152CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 153void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 154static int io_mem_nb;
6658ffb8
PB
155#if defined(CONFIG_SOFTMMU)
156static int io_mem_watch;
157#endif
33417e70 158
34865134
FB
159/* log support */
160char *logfilename = "/tmp/qemu.log";
161FILE *logfile;
162int loglevel;
e735b91c 163static int log_append = 0;
34865134 164
e3db7226
FB
165/* statistics */
166static int tlb_flush_count;
167static int tb_flush_count;
168static int tb_phys_invalidate_count;
169
db7b5426
BS
170#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
171typedef struct subpage_t {
172 target_phys_addr_t base;
3ee89922
BS
173 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
174 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
175 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
176} subpage_t;
177
7cb69cae
FB
178#ifdef _WIN32
179static void map_exec(void *addr, long size)
180{
181 DWORD old_protect;
182 VirtualProtect(addr, size,
183 PAGE_EXECUTE_READWRITE, &old_protect);
184
185}
186#else
187static void map_exec(void *addr, long size)
188{
189 unsigned long start, end;
190
191 start = (unsigned long)addr;
192 start &= ~(qemu_real_host_page_size - 1);
193
194 end = (unsigned long)addr + size;
195 end += qemu_real_host_page_size - 1;
196 end &= ~(qemu_real_host_page_size - 1);
197
198 mprotect((void *)start, end - start,
199 PROT_READ | PROT_WRITE | PROT_EXEC);
200}
201#endif
202
b346ff46 203static void page_init(void)
54936004 204{
83fb7adf 205 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 206 TARGET_PAGE_SIZE */
67b915a5 207#ifdef _WIN32
d5a8f07c
FB
208 {
209 SYSTEM_INFO system_info;
210 DWORD old_protect;
3b46e624 211
d5a8f07c
FB
212 GetSystemInfo(&system_info);
213 qemu_real_host_page_size = system_info.dwPageSize;
d5a8f07c 214 }
67b915a5 215#else
83fb7adf 216 qemu_real_host_page_size = getpagesize();
67b915a5 217#endif
7cb69cae
FB
218 map_exec(code_gen_buffer, sizeof(code_gen_buffer));
219 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
d5a8f07c 220
83fb7adf
FB
221 if (qemu_host_page_size == 0)
222 qemu_host_page_size = qemu_real_host_page_size;
223 if (qemu_host_page_size < TARGET_PAGE_SIZE)
224 qemu_host_page_size = TARGET_PAGE_SIZE;
225 qemu_host_page_bits = 0;
226 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
227 qemu_host_page_bits++;
228 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
229 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
230 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
231
232#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
233 {
234 long long startaddr, endaddr;
235 FILE *f;
236 int n;
237
238 f = fopen("/proc/self/maps", "r");
239 if (f) {
240 do {
241 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
242 if (n == 2) {
e0b8d65a
BS
243 startaddr = MIN(startaddr,
244 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
245 endaddr = MIN(endaddr,
246 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
50a9569b
AZ
247 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
248 TARGET_PAGE_ALIGN(endaddr),
249 PAGE_RESERVED);
250 }
251 } while (!feof(f));
252 fclose(f);
253 }
254 }
255#endif
54936004
FB
256}
257
00f82b8a 258static inline PageDesc *page_find_alloc(target_ulong index)
54936004 259{
54936004
FB
260 PageDesc **lp, *p;
261
54936004
FB
262 lp = &l1_map[index >> L2_BITS];
263 p = *lp;
264 if (!p) {
265 /* allocate if not found */
59817ccb 266 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 267 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
268 *lp = p;
269 }
270 return p + (index & (L2_SIZE - 1));
271}
272
00f82b8a 273static inline PageDesc *page_find(target_ulong index)
54936004 274{
54936004
FB
275 PageDesc *p;
276
54936004
FB
277 p = l1_map[index >> L2_BITS];
278 if (!p)
279 return 0;
fd6ce8f6
FB
280 return p + (index & (L2_SIZE - 1));
281}
282
108c49b8 283static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 284{
108c49b8 285 void **lp, **p;
e3f4e2a4 286 PhysPageDesc *pd;
92e873b9 287
108c49b8
FB
288 p = (void **)l1_phys_map;
289#if TARGET_PHYS_ADDR_SPACE_BITS > 32
290
291#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
292#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
293#endif
294 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
295 p = *lp;
296 if (!p) {
297 /* allocate if not found */
108c49b8
FB
298 if (!alloc)
299 return NULL;
300 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
301 memset(p, 0, sizeof(void *) * L1_SIZE);
302 *lp = p;
303 }
304#endif
305 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
306 pd = *lp;
307 if (!pd) {
308 int i;
108c49b8
FB
309 /* allocate if not found */
310 if (!alloc)
311 return NULL;
e3f4e2a4
PB
312 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
313 *lp = pd;
314 for (i = 0; i < L2_SIZE; i++)
315 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 316 }
e3f4e2a4 317 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
318}
319
108c49b8 320static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 321{
108c49b8 322 return phys_page_find_alloc(index, 0);
92e873b9
FB
323}
324
9fa3e853 325#if !defined(CONFIG_USER_ONLY)
6a00d601 326static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 327static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 328 target_ulong vaddr);
9fa3e853 329#endif
fd6ce8f6 330
6a00d601 331void cpu_exec_init(CPUState *env)
fd6ce8f6 332{
6a00d601
FB
333 CPUState **penv;
334 int cpu_index;
335
fd6ce8f6 336 if (!code_gen_ptr) {
57fec1fe 337 cpu_gen_init();
fd6ce8f6 338 code_gen_ptr = code_gen_buffer;
b346ff46 339 page_init();
33417e70 340 io_mem_init();
fd6ce8f6 341 }
6a00d601
FB
342 env->next_cpu = NULL;
343 penv = &first_cpu;
344 cpu_index = 0;
345 while (*penv != NULL) {
346 penv = (CPUState **)&(*penv)->next_cpu;
347 cpu_index++;
348 }
349 env->cpu_index = cpu_index;
6658ffb8 350 env->nb_watchpoints = 0;
6a00d601 351 *penv = env;
fd6ce8f6
FB
352}
353
9fa3e853
FB
354static inline void invalidate_page_bitmap(PageDesc *p)
355{
356 if (p->code_bitmap) {
59817ccb 357 qemu_free(p->code_bitmap);
9fa3e853
FB
358 p->code_bitmap = NULL;
359 }
360 p->code_write_count = 0;
361}
362
fd6ce8f6
FB
363/* set to NULL all the 'first_tb' fields in all PageDescs */
364static void page_flush_tb(void)
365{
366 int i, j;
367 PageDesc *p;
368
369 for(i = 0; i < L1_SIZE; i++) {
370 p = l1_map[i];
371 if (p) {
9fa3e853
FB
372 for(j = 0; j < L2_SIZE; j++) {
373 p->first_tb = NULL;
374 invalidate_page_bitmap(p);
375 p++;
376 }
fd6ce8f6
FB
377 }
378 }
379}
380
381/* flush all the translation blocks */
d4e8164f 382/* XXX: tb_flush is currently not thread safe */
6a00d601 383void tb_flush(CPUState *env1)
fd6ce8f6 384{
6a00d601 385 CPUState *env;
0124311e 386#if defined(DEBUG_FLUSH)
ab3d1727
BS
387 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
388 (unsigned long)(code_gen_ptr - code_gen_buffer),
389 nb_tbs, nb_tbs > 0 ?
390 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 391#endif
a208e54a
PB
392 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
393 cpu_abort(env1, "Internal error: code buffer overflow\n");
394
fd6ce8f6 395 nb_tbs = 0;
3b46e624 396
6a00d601
FB
397 for(env = first_cpu; env != NULL; env = env->next_cpu) {
398 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
399 }
9fa3e853 400
8a8a608f 401 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 402 page_flush_tb();
9fa3e853 403
fd6ce8f6 404 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
405 /* XXX: flush processor icache at this point if cache flush is
406 expensive */
e3db7226 407 tb_flush_count++;
fd6ce8f6
FB
408}
409
410#ifdef DEBUG_TB_CHECK
411
bc98a7ef 412static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
413{
414 TranslationBlock *tb;
415 int i;
416 address &= TARGET_PAGE_MASK;
99773bd4
PB
417 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
418 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
419 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
420 address >= tb->pc + tb->size)) {
421 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 422 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
423 }
424 }
425 }
426}
427
428/* verify that all the pages have correct rights for code */
429static void tb_page_check(void)
430{
431 TranslationBlock *tb;
432 int i, flags1, flags2;
3b46e624 433
99773bd4
PB
434 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
435 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
436 flags1 = page_get_flags(tb->pc);
437 flags2 = page_get_flags(tb->pc + tb->size - 1);
438 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
439 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 440 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
441 }
442 }
443 }
444}
445
d4e8164f
FB
446void tb_jmp_check(TranslationBlock *tb)
447{
448 TranslationBlock *tb1;
449 unsigned int n1;
450
451 /* suppress any remaining jumps to this TB */
452 tb1 = tb->jmp_first;
453 for(;;) {
454 n1 = (long)tb1 & 3;
455 tb1 = (TranslationBlock *)((long)tb1 & ~3);
456 if (n1 == 2)
457 break;
458 tb1 = tb1->jmp_next[n1];
459 }
460 /* check end of list */
461 if (tb1 != tb) {
462 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
463 }
464}
465
fd6ce8f6
FB
466#endif
467
468/* invalidate one TB */
469static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
470 int next_offset)
471{
472 TranslationBlock *tb1;
473 for(;;) {
474 tb1 = *ptb;
475 if (tb1 == tb) {
476 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
477 break;
478 }
479 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
480 }
481}
482
9fa3e853
FB
483static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
484{
485 TranslationBlock *tb1;
486 unsigned int n1;
487
488 for(;;) {
489 tb1 = *ptb;
490 n1 = (long)tb1 & 3;
491 tb1 = (TranslationBlock *)((long)tb1 & ~3);
492 if (tb1 == tb) {
493 *ptb = tb1->page_next[n1];
494 break;
495 }
496 ptb = &tb1->page_next[n1];
497 }
498}
499
d4e8164f
FB
500static inline void tb_jmp_remove(TranslationBlock *tb, int n)
501{
502 TranslationBlock *tb1, **ptb;
503 unsigned int n1;
504
505 ptb = &tb->jmp_next[n];
506 tb1 = *ptb;
507 if (tb1) {
508 /* find tb(n) in circular list */
509 for(;;) {
510 tb1 = *ptb;
511 n1 = (long)tb1 & 3;
512 tb1 = (TranslationBlock *)((long)tb1 & ~3);
513 if (n1 == n && tb1 == tb)
514 break;
515 if (n1 == 2) {
516 ptb = &tb1->jmp_first;
517 } else {
518 ptb = &tb1->jmp_next[n1];
519 }
520 }
521 /* now we can suppress tb(n) from the list */
522 *ptb = tb->jmp_next[n];
523
524 tb->jmp_next[n] = NULL;
525 }
526}
527
528/* reset the jump entry 'n' of a TB so that it is not chained to
529 another TB */
530static inline void tb_reset_jump(TranslationBlock *tb, int n)
531{
532 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
533}
534
00f82b8a 535static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 536{
6a00d601 537 CPUState *env;
8a40a180 538 PageDesc *p;
d4e8164f 539 unsigned int h, n1;
00f82b8a 540 target_phys_addr_t phys_pc;
8a40a180 541 TranslationBlock *tb1, *tb2;
3b46e624 542
8a40a180
FB
543 /* remove the TB from the hash list */
544 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
545 h = tb_phys_hash_func(phys_pc);
5fafdf24 546 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
547 offsetof(TranslationBlock, phys_hash_next));
548
549 /* remove the TB from the page list */
550 if (tb->page_addr[0] != page_addr) {
551 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
552 tb_page_remove(&p->first_tb, tb);
553 invalidate_page_bitmap(p);
554 }
555 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
556 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
557 tb_page_remove(&p->first_tb, tb);
558 invalidate_page_bitmap(p);
559 }
560
36bdbe54 561 tb_invalidated_flag = 1;
59817ccb 562
fd6ce8f6 563 /* remove the TB from the hash list */
8a40a180 564 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
565 for(env = first_cpu; env != NULL; env = env->next_cpu) {
566 if (env->tb_jmp_cache[h] == tb)
567 env->tb_jmp_cache[h] = NULL;
568 }
d4e8164f
FB
569
570 /* suppress this TB from the two jump lists */
571 tb_jmp_remove(tb, 0);
572 tb_jmp_remove(tb, 1);
573
574 /* suppress any remaining jumps to this TB */
575 tb1 = tb->jmp_first;
576 for(;;) {
577 n1 = (long)tb1 & 3;
578 if (n1 == 2)
579 break;
580 tb1 = (TranslationBlock *)((long)tb1 & ~3);
581 tb2 = tb1->jmp_next[n1];
582 tb_reset_jump(tb1, n1);
583 tb1->jmp_next[n1] = NULL;
584 tb1 = tb2;
585 }
586 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 587
e3db7226 588 tb_phys_invalidate_count++;
9fa3e853
FB
589}
590
591static inline void set_bits(uint8_t *tab, int start, int len)
592{
593 int end, mask, end1;
594
595 end = start + len;
596 tab += start >> 3;
597 mask = 0xff << (start & 7);
598 if ((start & ~7) == (end & ~7)) {
599 if (start < end) {
600 mask &= ~(0xff << (end & 7));
601 *tab |= mask;
602 }
603 } else {
604 *tab++ |= mask;
605 start = (start + 8) & ~7;
606 end1 = end & ~7;
607 while (start < end1) {
608 *tab++ = 0xff;
609 start += 8;
610 }
611 if (start < end) {
612 mask = ~(0xff << (end & 7));
613 *tab |= mask;
614 }
615 }
616}
617
618static void build_page_bitmap(PageDesc *p)
619{
620 int n, tb_start, tb_end;
621 TranslationBlock *tb;
3b46e624 622
59817ccb 623 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
624 if (!p->code_bitmap)
625 return;
626 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
627
628 tb = p->first_tb;
629 while (tb != NULL) {
630 n = (long)tb & 3;
631 tb = (TranslationBlock *)((long)tb & ~3);
632 /* NOTE: this is subtle as a TB may span two physical pages */
633 if (n == 0) {
634 /* NOTE: tb_end may be after the end of the page, but
635 it is not a problem */
636 tb_start = tb->pc & ~TARGET_PAGE_MASK;
637 tb_end = tb_start + tb->size;
638 if (tb_end > TARGET_PAGE_SIZE)
639 tb_end = TARGET_PAGE_SIZE;
640 } else {
641 tb_start = 0;
642 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
643 }
644 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
645 tb = tb->page_next[n];
646 }
647}
648
d720b93d
FB
649#ifdef TARGET_HAS_PRECISE_SMC
650
5fafdf24 651static void tb_gen_code(CPUState *env,
d720b93d
FB
652 target_ulong pc, target_ulong cs_base, int flags,
653 int cflags)
654{
655 TranslationBlock *tb;
656 uint8_t *tc_ptr;
657 target_ulong phys_pc, phys_page2, virt_page2;
658 int code_gen_size;
659
c27004ec
FB
660 phys_pc = get_phys_addr_code(env, pc);
661 tb = tb_alloc(pc);
d720b93d
FB
662 if (!tb) {
663 /* flush must be done */
664 tb_flush(env);
665 /* cannot fail at this point */
c27004ec 666 tb = tb_alloc(pc);
d720b93d
FB
667 }
668 tc_ptr = code_gen_ptr;
669 tb->tc_ptr = tc_ptr;
670 tb->cs_base = cs_base;
671 tb->flags = flags;
672 tb->cflags = cflags;
d07bde88 673 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 674 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 675
d720b93d 676 /* check next page if needed */
c27004ec 677 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 678 phys_page2 = -1;
c27004ec 679 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
680 phys_page2 = get_phys_addr_code(env, virt_page2);
681 }
682 tb_link_phys(tb, phys_pc, phys_page2);
683}
684#endif
3b46e624 685
9fa3e853
FB
686/* invalidate all TBs which intersect with the target physical page
687 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
688 the same physical page. 'is_cpu_write_access' should be true if called
689 from a real cpu write access: the virtual CPU will exit the current
690 TB if code is modified inside this TB. */
00f82b8a 691void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
692 int is_cpu_write_access)
693{
694 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 695 CPUState *env = cpu_single_env;
9fa3e853 696 PageDesc *p;
ea1c1802 697 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 698 target_ulong tb_start, tb_end;
d720b93d 699 target_ulong current_pc, current_cs_base;
9fa3e853
FB
700
701 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 702 if (!p)
9fa3e853 703 return;
5fafdf24 704 if (!p->code_bitmap &&
d720b93d
FB
705 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
706 is_cpu_write_access) {
9fa3e853
FB
707 /* build code bitmap */
708 build_page_bitmap(p);
709 }
710
711 /* we remove all the TBs in the range [start, end[ */
712 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
713 current_tb_not_found = is_cpu_write_access;
714 current_tb_modified = 0;
715 current_tb = NULL; /* avoid warning */
716 current_pc = 0; /* avoid warning */
717 current_cs_base = 0; /* avoid warning */
718 current_flags = 0; /* avoid warning */
9fa3e853
FB
719 tb = p->first_tb;
720 while (tb != NULL) {
721 n = (long)tb & 3;
722 tb = (TranslationBlock *)((long)tb & ~3);
723 tb_next = tb->page_next[n];
724 /* NOTE: this is subtle as a TB may span two physical pages */
725 if (n == 0) {
726 /* NOTE: tb_end may be after the end of the page, but
727 it is not a problem */
728 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
729 tb_end = tb_start + tb->size;
730 } else {
731 tb_start = tb->page_addr[1];
732 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
733 }
734 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
735#ifdef TARGET_HAS_PRECISE_SMC
736 if (current_tb_not_found) {
737 current_tb_not_found = 0;
738 current_tb = NULL;
739 if (env->mem_write_pc) {
740 /* now we have a real cpu fault */
741 current_tb = tb_find_pc(env->mem_write_pc);
742 }
743 }
744 if (current_tb == tb &&
745 !(current_tb->cflags & CF_SINGLE_INSN)) {
746 /* If we are modifying the current TB, we must stop
747 its execution. We could be more precise by checking
748 that the modification is after the current PC, but it
749 would require a specialized function to partially
750 restore the CPU state */
3b46e624 751
d720b93d 752 current_tb_modified = 1;
5fafdf24 753 cpu_restore_state(current_tb, env,
d720b93d
FB
754 env->mem_write_pc, NULL);
755#if defined(TARGET_I386)
756 current_flags = env->hflags;
757 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
758 current_cs_base = (target_ulong)env->segs[R_CS].base;
759 current_pc = current_cs_base + env->eip;
760#else
761#error unsupported CPU
762#endif
763 }
764#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
765 /* we need to do that to handle the case where a signal
766 occurs while doing tb_phys_invalidate() */
767 saved_tb = NULL;
768 if (env) {
769 saved_tb = env->current_tb;
770 env->current_tb = NULL;
771 }
9fa3e853 772 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
773 if (env) {
774 env->current_tb = saved_tb;
775 if (env->interrupt_request && env->current_tb)
776 cpu_interrupt(env, env->interrupt_request);
777 }
9fa3e853
FB
778 }
779 tb = tb_next;
780 }
781#if !defined(CONFIG_USER_ONLY)
782 /* if no code remaining, no need to continue to use slow writes */
783 if (!p->first_tb) {
784 invalidate_page_bitmap(p);
d720b93d
FB
785 if (is_cpu_write_access) {
786 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
787 }
788 }
789#endif
790#ifdef TARGET_HAS_PRECISE_SMC
791 if (current_tb_modified) {
792 /* we generate a block containing just the instruction
793 modifying the memory. It will ensure that it cannot modify
794 itself */
ea1c1802 795 env->current_tb = NULL;
5fafdf24 796 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
797 CF_SINGLE_INSN);
798 cpu_resume_from_signal(env, NULL);
9fa3e853 799 }
fd6ce8f6 800#endif
9fa3e853 801}
fd6ce8f6 802
9fa3e853 803/* len must be <= 8 and start must be a multiple of len */
00f82b8a 804static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
805{
806 PageDesc *p;
807 int offset, b;
59817ccb 808#if 0
a4193c8a
FB
809 if (1) {
810 if (loglevel) {
5fafdf24
TS
811 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
812 cpu_single_env->mem_write_vaddr, len,
813 cpu_single_env->eip,
a4193c8a
FB
814 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
815 }
59817ccb
FB
816 }
817#endif
9fa3e853 818 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 819 if (!p)
9fa3e853
FB
820 return;
821 if (p->code_bitmap) {
822 offset = start & ~TARGET_PAGE_MASK;
823 b = p->code_bitmap[offset >> 3] >> (offset & 7);
824 if (b & ((1 << len) - 1))
825 goto do_invalidate;
826 } else {
827 do_invalidate:
d720b93d 828 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
829 }
830}
831
9fa3e853 832#if !defined(CONFIG_SOFTMMU)
00f82b8a 833static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 834 unsigned long pc, void *puc)
9fa3e853 835{
d720b93d
FB
836 int n, current_flags, current_tb_modified;
837 target_ulong current_pc, current_cs_base;
9fa3e853 838 PageDesc *p;
d720b93d
FB
839 TranslationBlock *tb, *current_tb;
840#ifdef TARGET_HAS_PRECISE_SMC
841 CPUState *env = cpu_single_env;
842#endif
9fa3e853
FB
843
844 addr &= TARGET_PAGE_MASK;
845 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 846 if (!p)
9fa3e853
FB
847 return;
848 tb = p->first_tb;
d720b93d
FB
849 current_tb_modified = 0;
850 current_tb = NULL;
851 current_pc = 0; /* avoid warning */
852 current_cs_base = 0; /* avoid warning */
853 current_flags = 0; /* avoid warning */
854#ifdef TARGET_HAS_PRECISE_SMC
855 if (tb && pc != 0) {
856 current_tb = tb_find_pc(pc);
857 }
858#endif
9fa3e853
FB
859 while (tb != NULL) {
860 n = (long)tb & 3;
861 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
862#ifdef TARGET_HAS_PRECISE_SMC
863 if (current_tb == tb &&
864 !(current_tb->cflags & CF_SINGLE_INSN)) {
865 /* If we are modifying the current TB, we must stop
866 its execution. We could be more precise by checking
867 that the modification is after the current PC, but it
868 would require a specialized function to partially
869 restore the CPU state */
3b46e624 870
d720b93d
FB
871 current_tb_modified = 1;
872 cpu_restore_state(current_tb, env, pc, puc);
873#if defined(TARGET_I386)
874 current_flags = env->hflags;
875 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
876 current_cs_base = (target_ulong)env->segs[R_CS].base;
877 current_pc = current_cs_base + env->eip;
878#else
879#error unsupported CPU
880#endif
881 }
882#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
883 tb_phys_invalidate(tb, addr);
884 tb = tb->page_next[n];
885 }
fd6ce8f6 886 p->first_tb = NULL;
d720b93d
FB
887#ifdef TARGET_HAS_PRECISE_SMC
888 if (current_tb_modified) {
889 /* we generate a block containing just the instruction
890 modifying the memory. It will ensure that it cannot modify
891 itself */
ea1c1802 892 env->current_tb = NULL;
5fafdf24 893 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
894 CF_SINGLE_INSN);
895 cpu_resume_from_signal(env, puc);
896 }
897#endif
fd6ce8f6 898}
9fa3e853 899#endif
fd6ce8f6
FB
900
901/* add the tb in the target page and protect it if necessary */
5fafdf24 902static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 903 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
904{
905 PageDesc *p;
9fa3e853
FB
906 TranslationBlock *last_first_tb;
907
908 tb->page_addr[n] = page_addr;
3a7d929e 909 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
910 tb->page_next[n] = p->first_tb;
911 last_first_tb = p->first_tb;
912 p->first_tb = (TranslationBlock *)((long)tb | n);
913 invalidate_page_bitmap(p);
fd6ce8f6 914
107db443 915#if defined(TARGET_HAS_SMC) || 1
d720b93d 916
9fa3e853 917#if defined(CONFIG_USER_ONLY)
fd6ce8f6 918 if (p->flags & PAGE_WRITE) {
53a5960a
PB
919 target_ulong addr;
920 PageDesc *p2;
9fa3e853
FB
921 int prot;
922
fd6ce8f6
FB
923 /* force the host page as non writable (writes will have a
924 page fault + mprotect overhead) */
53a5960a 925 page_addr &= qemu_host_page_mask;
fd6ce8f6 926 prot = 0;
53a5960a
PB
927 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
928 addr += TARGET_PAGE_SIZE) {
929
930 p2 = page_find (addr >> TARGET_PAGE_BITS);
931 if (!p2)
932 continue;
933 prot |= p2->flags;
934 p2->flags &= ~PAGE_WRITE;
935 page_get_flags(addr);
936 }
5fafdf24 937 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
938 (prot & PAGE_BITS) & ~PAGE_WRITE);
939#ifdef DEBUG_TB_INVALIDATE
ab3d1727 940 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 941 page_addr);
fd6ce8f6 942#endif
fd6ce8f6 943 }
9fa3e853
FB
944#else
945 /* if some code is already present, then the pages are already
946 protected. So we handle the case where only the first TB is
947 allocated in a physical page */
948 if (!last_first_tb) {
6a00d601 949 tlb_protect_code(page_addr);
9fa3e853
FB
950 }
951#endif
d720b93d
FB
952
953#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
954}
955
956/* Allocate a new translation block. Flush the translation buffer if
957 too many translation blocks or too much generated code. */
c27004ec 958TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
959{
960 TranslationBlock *tb;
fd6ce8f6 961
5fafdf24 962 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
fd6ce8f6 963 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 964 return NULL;
fd6ce8f6
FB
965 tb = &tbs[nb_tbs++];
966 tb->pc = pc;
b448f2f3 967 tb->cflags = 0;
d4e8164f
FB
968 return tb;
969}
970
9fa3e853
FB
971/* add a new TB and link it to the physical page tables. phys_page2 is
972 (-1) to indicate that only one page contains the TB. */
5fafdf24 973void tb_link_phys(TranslationBlock *tb,
9fa3e853 974 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 975{
9fa3e853
FB
976 unsigned int h;
977 TranslationBlock **ptb;
978
979 /* add in the physical hash table */
980 h = tb_phys_hash_func(phys_pc);
981 ptb = &tb_phys_hash[h];
982 tb->phys_hash_next = *ptb;
983 *ptb = tb;
fd6ce8f6
FB
984
985 /* add in the page list */
9fa3e853
FB
986 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
987 if (phys_page2 != -1)
988 tb_alloc_page(tb, 1, phys_page2);
989 else
990 tb->page_addr[1] = -1;
9fa3e853 991
d4e8164f
FB
992 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
993 tb->jmp_next[0] = NULL;
994 tb->jmp_next[1] = NULL;
995
996 /* init original jump addresses */
997 if (tb->tb_next_offset[0] != 0xffff)
998 tb_reset_jump(tb, 0);
999 if (tb->tb_next_offset[1] != 0xffff)
1000 tb_reset_jump(tb, 1);
8a40a180
FB
1001
1002#ifdef DEBUG_TB_CHECK
1003 tb_page_check();
1004#endif
fd6ce8f6
FB
1005}
1006
9fa3e853
FB
1007/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1008 tb[1].tc_ptr. Return NULL if not found */
1009TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1010{
9fa3e853
FB
1011 int m_min, m_max, m;
1012 unsigned long v;
1013 TranslationBlock *tb;
a513fe19
FB
1014
1015 if (nb_tbs <= 0)
1016 return NULL;
1017 if (tc_ptr < (unsigned long)code_gen_buffer ||
1018 tc_ptr >= (unsigned long)code_gen_ptr)
1019 return NULL;
1020 /* binary search (cf Knuth) */
1021 m_min = 0;
1022 m_max = nb_tbs - 1;
1023 while (m_min <= m_max) {
1024 m = (m_min + m_max) >> 1;
1025 tb = &tbs[m];
1026 v = (unsigned long)tb->tc_ptr;
1027 if (v == tc_ptr)
1028 return tb;
1029 else if (tc_ptr < v) {
1030 m_max = m - 1;
1031 } else {
1032 m_min = m + 1;
1033 }
5fafdf24 1034 }
a513fe19
FB
1035 return &tbs[m_max];
1036}
7501267e 1037
ea041c0e
FB
1038static void tb_reset_jump_recursive(TranslationBlock *tb);
1039
1040static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1041{
1042 TranslationBlock *tb1, *tb_next, **ptb;
1043 unsigned int n1;
1044
1045 tb1 = tb->jmp_next[n];
1046 if (tb1 != NULL) {
1047 /* find head of list */
1048 for(;;) {
1049 n1 = (long)tb1 & 3;
1050 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1051 if (n1 == 2)
1052 break;
1053 tb1 = tb1->jmp_next[n1];
1054 }
1055 /* we are now sure now that tb jumps to tb1 */
1056 tb_next = tb1;
1057
1058 /* remove tb from the jmp_first list */
1059 ptb = &tb_next->jmp_first;
1060 for(;;) {
1061 tb1 = *ptb;
1062 n1 = (long)tb1 & 3;
1063 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1064 if (n1 == n && tb1 == tb)
1065 break;
1066 ptb = &tb1->jmp_next[n1];
1067 }
1068 *ptb = tb->jmp_next[n];
1069 tb->jmp_next[n] = NULL;
3b46e624 1070
ea041c0e
FB
1071 /* suppress the jump to next tb in generated code */
1072 tb_reset_jump(tb, n);
1073
0124311e 1074 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1075 tb_reset_jump_recursive(tb_next);
1076 }
1077}
1078
1079static void tb_reset_jump_recursive(TranslationBlock *tb)
1080{
1081 tb_reset_jump_recursive2(tb, 0);
1082 tb_reset_jump_recursive2(tb, 1);
1083}
1084
1fddef4b 1085#if defined(TARGET_HAS_ICE)
d720b93d
FB
1086static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1087{
9b3c35e0
JM
1088 target_phys_addr_t addr;
1089 target_ulong pd;
c2f07f81
PB
1090 ram_addr_t ram_addr;
1091 PhysPageDesc *p;
d720b93d 1092
c2f07f81
PB
1093 addr = cpu_get_phys_page_debug(env, pc);
1094 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1095 if (!p) {
1096 pd = IO_MEM_UNASSIGNED;
1097 } else {
1098 pd = p->phys_offset;
1099 }
1100 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1101 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1102}
c27004ec 1103#endif
d720b93d 1104
6658ffb8
PB
1105/* Add a watchpoint. */
1106int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1107{
1108 int i;
1109
1110 for (i = 0; i < env->nb_watchpoints; i++) {
1111 if (addr == env->watchpoint[i].vaddr)
1112 return 0;
1113 }
1114 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1115 return -1;
1116
1117 i = env->nb_watchpoints++;
1118 env->watchpoint[i].vaddr = addr;
1119 tlb_flush_page(env, addr);
1120 /* FIXME: This flush is needed because of the hack to make memory ops
1121 terminate the TB. It can be removed once the proper IO trap and
1122 re-execute bits are in. */
1123 tb_flush(env);
1124 return i;
1125}
1126
1127/* Remove a watchpoint. */
1128int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1129{
1130 int i;
1131
1132 for (i = 0; i < env->nb_watchpoints; i++) {
1133 if (addr == env->watchpoint[i].vaddr) {
1134 env->nb_watchpoints--;
1135 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1136 tlb_flush_page(env, addr);
1137 return 0;
1138 }
1139 }
1140 return -1;
1141}
1142
7d03f82f
EI
1143/* Remove all watchpoints. */
1144void cpu_watchpoint_remove_all(CPUState *env) {
1145 int i;
1146
1147 for (i = 0; i < env->nb_watchpoints; i++) {
1148 tlb_flush_page(env, env->watchpoint[i].vaddr);
1149 }
1150 env->nb_watchpoints = 0;
1151}
1152
c33a346e
FB
1153/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1154 breakpoint is reached */
2e12669a 1155int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1156{
1fddef4b 1157#if defined(TARGET_HAS_ICE)
4c3a88a2 1158 int i;
3b46e624 1159
4c3a88a2
FB
1160 for(i = 0; i < env->nb_breakpoints; i++) {
1161 if (env->breakpoints[i] == pc)
1162 return 0;
1163 }
1164
1165 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1166 return -1;
1167 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1168
d720b93d 1169 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1170 return 0;
1171#else
1172 return -1;
1173#endif
1174}
1175
7d03f82f
EI
1176/* remove all breakpoints */
1177void cpu_breakpoint_remove_all(CPUState *env) {
1178#if defined(TARGET_HAS_ICE)
1179 int i;
1180 for(i = 0; i < env->nb_breakpoints; i++) {
1181 breakpoint_invalidate(env, env->breakpoints[i]);
1182 }
1183 env->nb_breakpoints = 0;
1184#endif
1185}
1186
4c3a88a2 1187/* remove a breakpoint */
2e12669a 1188int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1189{
1fddef4b 1190#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1191 int i;
1192 for(i = 0; i < env->nb_breakpoints; i++) {
1193 if (env->breakpoints[i] == pc)
1194 goto found;
1195 }
1196 return -1;
1197 found:
4c3a88a2 1198 env->nb_breakpoints--;
1fddef4b
FB
1199 if (i < env->nb_breakpoints)
1200 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1201
1202 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1203 return 0;
1204#else
1205 return -1;
1206#endif
1207}
1208
c33a346e
FB
1209/* enable or disable single step mode. EXCP_DEBUG is returned by the
1210 CPU loop after each instruction */
1211void cpu_single_step(CPUState *env, int enabled)
1212{
1fddef4b 1213#if defined(TARGET_HAS_ICE)
c33a346e
FB
1214 if (env->singlestep_enabled != enabled) {
1215 env->singlestep_enabled = enabled;
1216 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1217 /* XXX: only flush what is necessary */
0124311e 1218 tb_flush(env);
c33a346e
FB
1219 }
1220#endif
1221}
1222
34865134
FB
1223/* enable or disable low levels log */
1224void cpu_set_log(int log_flags)
1225{
1226 loglevel = log_flags;
1227 if (loglevel && !logfile) {
11fcfab4 1228 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1229 if (!logfile) {
1230 perror(logfilename);
1231 _exit(1);
1232 }
9fa3e853
FB
1233#if !defined(CONFIG_SOFTMMU)
1234 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1235 {
1236 static uint8_t logfile_buf[4096];
1237 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1238 }
1239#else
34865134 1240 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1241#endif
e735b91c
PB
1242 log_append = 1;
1243 }
1244 if (!loglevel && logfile) {
1245 fclose(logfile);
1246 logfile = NULL;
34865134
FB
1247 }
1248}
1249
1250void cpu_set_log_filename(const char *filename)
1251{
1252 logfilename = strdup(filename);
e735b91c
PB
1253 if (logfile) {
1254 fclose(logfile);
1255 logfile = NULL;
1256 }
1257 cpu_set_log(loglevel);
34865134 1258}
c33a346e 1259
0124311e 1260/* mask must never be zero, except for A20 change call */
68a79315 1261void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1262{
1263 TranslationBlock *tb;
15a51156 1264 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1265
68a79315 1266 env->interrupt_request |= mask;
ea041c0e
FB
1267 /* if the cpu is currently executing code, we must unlink it and
1268 all the potentially executing TB */
1269 tb = env->current_tb;
ee8b7021
FB
1270 if (tb && !testandset(&interrupt_lock)) {
1271 env->current_tb = NULL;
ea041c0e 1272 tb_reset_jump_recursive(tb);
15a51156 1273 resetlock(&interrupt_lock);
ea041c0e
FB
1274 }
1275}
1276
b54ad049
FB
1277void cpu_reset_interrupt(CPUState *env, int mask)
1278{
1279 env->interrupt_request &= ~mask;
1280}
1281
f193c797 1282CPULogItem cpu_log_items[] = {
5fafdf24 1283 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1284 "show generated host assembly code for each compiled TB" },
1285 { CPU_LOG_TB_IN_ASM, "in_asm",
1286 "show target assembly code for each compiled TB" },
5fafdf24 1287 { CPU_LOG_TB_OP, "op",
57fec1fe 1288 "show micro ops for each compiled TB" },
f193c797 1289 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1290 "show micro ops "
1291#ifdef TARGET_I386
1292 "before eflags optimization and "
f193c797 1293#endif
e01a1157 1294 "after liveness analysis" },
f193c797
FB
1295 { CPU_LOG_INT, "int",
1296 "show interrupts/exceptions in short format" },
1297 { CPU_LOG_EXEC, "exec",
1298 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1299 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1300 "show CPU state before block translation" },
f193c797
FB
1301#ifdef TARGET_I386
1302 { CPU_LOG_PCALL, "pcall",
1303 "show protected mode far calls/returns/exceptions" },
1304#endif
8e3a9fd2 1305#ifdef DEBUG_IOPORT
fd872598
FB
1306 { CPU_LOG_IOPORT, "ioport",
1307 "show all i/o ports accesses" },
8e3a9fd2 1308#endif
f193c797
FB
1309 { 0, NULL, NULL },
1310};
1311
1312static int cmp1(const char *s1, int n, const char *s2)
1313{
1314 if (strlen(s2) != n)
1315 return 0;
1316 return memcmp(s1, s2, n) == 0;
1317}
3b46e624 1318
f193c797
FB
1319/* takes a comma separated list of log masks. Return 0 if error. */
1320int cpu_str_to_log_mask(const char *str)
1321{
1322 CPULogItem *item;
1323 int mask;
1324 const char *p, *p1;
1325
1326 p = str;
1327 mask = 0;
1328 for(;;) {
1329 p1 = strchr(p, ',');
1330 if (!p1)
1331 p1 = p + strlen(p);
8e3a9fd2
FB
1332 if(cmp1(p,p1-p,"all")) {
1333 for(item = cpu_log_items; item->mask != 0; item++) {
1334 mask |= item->mask;
1335 }
1336 } else {
f193c797
FB
1337 for(item = cpu_log_items; item->mask != 0; item++) {
1338 if (cmp1(p, p1 - p, item->name))
1339 goto found;
1340 }
1341 return 0;
8e3a9fd2 1342 }
f193c797
FB
1343 found:
1344 mask |= item->mask;
1345 if (*p1 != ',')
1346 break;
1347 p = p1 + 1;
1348 }
1349 return mask;
1350}
ea041c0e 1351
7501267e
FB
1352void cpu_abort(CPUState *env, const char *fmt, ...)
1353{
1354 va_list ap;
493ae1f0 1355 va_list ap2;
7501267e
FB
1356
1357 va_start(ap, fmt);
493ae1f0 1358 va_copy(ap2, ap);
7501267e
FB
1359 fprintf(stderr, "qemu: fatal: ");
1360 vfprintf(stderr, fmt, ap);
1361 fprintf(stderr, "\n");
1362#ifdef TARGET_I386
7fe48483
FB
1363 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1364#else
1365 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1366#endif
924edcae 1367 if (logfile) {
f9373291 1368 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1369 vfprintf(logfile, fmt, ap2);
f9373291
JM
1370 fprintf(logfile, "\n");
1371#ifdef TARGET_I386
1372 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1373#else
1374 cpu_dump_state(env, logfile, fprintf, 0);
1375#endif
924edcae
AZ
1376 fflush(logfile);
1377 fclose(logfile);
1378 }
493ae1f0 1379 va_end(ap2);
f9373291 1380 va_end(ap);
7501267e
FB
1381 abort();
1382}
1383
c5be9f08
TS
1384CPUState *cpu_copy(CPUState *env)
1385{
01ba9816 1386 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1387 /* preserve chaining and index */
1388 CPUState *next_cpu = new_env->next_cpu;
1389 int cpu_index = new_env->cpu_index;
1390 memcpy(new_env, env, sizeof(CPUState));
1391 new_env->next_cpu = next_cpu;
1392 new_env->cpu_index = cpu_index;
1393 return new_env;
1394}
1395
0124311e
FB
1396#if !defined(CONFIG_USER_ONLY)
1397
5c751e99
EI
1398static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1399{
1400 unsigned int i;
1401
1402 /* Discard jump cache entries for any tb which might potentially
1403 overlap the flushed page. */
1404 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1405 memset (&env->tb_jmp_cache[i], 0,
1406 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1407
1408 i = tb_jmp_cache_hash_page(addr);
1409 memset (&env->tb_jmp_cache[i], 0,
1410 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1411}
1412
ee8b7021
FB
1413/* NOTE: if flush_global is true, also flush global entries (not
1414 implemented yet) */
1415void tlb_flush(CPUState *env, int flush_global)
33417e70 1416{
33417e70 1417 int i;
0124311e 1418
9fa3e853
FB
1419#if defined(DEBUG_TLB)
1420 printf("tlb_flush:\n");
1421#endif
0124311e
FB
1422 /* must reset current TB so that interrupts cannot modify the
1423 links while we are modifying them */
1424 env->current_tb = NULL;
1425
33417e70 1426 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1427 env->tlb_table[0][i].addr_read = -1;
1428 env->tlb_table[0][i].addr_write = -1;
1429 env->tlb_table[0][i].addr_code = -1;
1430 env->tlb_table[1][i].addr_read = -1;
1431 env->tlb_table[1][i].addr_write = -1;
1432 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1433#if (NB_MMU_MODES >= 3)
1434 env->tlb_table[2][i].addr_read = -1;
1435 env->tlb_table[2][i].addr_write = -1;
1436 env->tlb_table[2][i].addr_code = -1;
1437#if (NB_MMU_MODES == 4)
1438 env->tlb_table[3][i].addr_read = -1;
1439 env->tlb_table[3][i].addr_write = -1;
1440 env->tlb_table[3][i].addr_code = -1;
1441#endif
1442#endif
33417e70 1443 }
9fa3e853 1444
8a40a180 1445 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1446
1447#if !defined(CONFIG_SOFTMMU)
1448 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1449#endif
1450#ifdef USE_KQEMU
1451 if (env->kqemu_enabled) {
1452 kqemu_flush(env, flush_global);
1453 }
9fa3e853 1454#endif
e3db7226 1455 tlb_flush_count++;
33417e70
FB
1456}
1457
274da6b2 1458static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1459{
5fafdf24 1460 if (addr == (tlb_entry->addr_read &
84b7b8e7 1461 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1462 addr == (tlb_entry->addr_write &
84b7b8e7 1463 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1464 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1465 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1466 tlb_entry->addr_read = -1;
1467 tlb_entry->addr_write = -1;
1468 tlb_entry->addr_code = -1;
1469 }
61382a50
FB
1470}
1471
2e12669a 1472void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1473{
8a40a180 1474 int i;
0124311e 1475
9fa3e853 1476#if defined(DEBUG_TLB)
108c49b8 1477 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1478#endif
0124311e
FB
1479 /* must reset current TB so that interrupts cannot modify the
1480 links while we are modifying them */
1481 env->current_tb = NULL;
61382a50
FB
1482
1483 addr &= TARGET_PAGE_MASK;
1484 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1485 tlb_flush_entry(&env->tlb_table[0][i], addr);
1486 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1487#if (NB_MMU_MODES >= 3)
1488 tlb_flush_entry(&env->tlb_table[2][i], addr);
1489#if (NB_MMU_MODES == 4)
1490 tlb_flush_entry(&env->tlb_table[3][i], addr);
1491#endif
1492#endif
0124311e 1493
5c751e99 1494 tlb_flush_jmp_cache(env, addr);
9fa3e853 1495
0124311e 1496#if !defined(CONFIG_SOFTMMU)
9fa3e853 1497 if (addr < MMAP_AREA_END)
0124311e 1498 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1499#endif
0a962c02
FB
1500#ifdef USE_KQEMU
1501 if (env->kqemu_enabled) {
1502 kqemu_flush_page(env, addr);
1503 }
1504#endif
9fa3e853
FB
1505}
1506
9fa3e853
FB
1507/* update the TLBs so that writes to code in the virtual page 'addr'
1508 can be detected */
6a00d601 1509static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1510{
5fafdf24 1511 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1512 ram_addr + TARGET_PAGE_SIZE,
1513 CODE_DIRTY_FLAG);
9fa3e853
FB
1514}
1515
9fa3e853 1516/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1517 tested for self modifying code */
5fafdf24 1518static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1519 target_ulong vaddr)
9fa3e853 1520{
3a7d929e 1521 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1522}
1523
5fafdf24 1524static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1525 unsigned long start, unsigned long length)
1526{
1527 unsigned long addr;
84b7b8e7
FB
1528 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1529 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1530 if ((addr - start) < length) {
84b7b8e7 1531 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1532 }
1533 }
1534}
1535
3a7d929e 1536void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1537 int dirty_flags)
1ccde1cb
FB
1538{
1539 CPUState *env;
4f2ac237 1540 unsigned long length, start1;
0a962c02
FB
1541 int i, mask, len;
1542 uint8_t *p;
1ccde1cb
FB
1543
1544 start &= TARGET_PAGE_MASK;
1545 end = TARGET_PAGE_ALIGN(end);
1546
1547 length = end - start;
1548 if (length == 0)
1549 return;
0a962c02 1550 len = length >> TARGET_PAGE_BITS;
3a7d929e 1551#ifdef USE_KQEMU
6a00d601
FB
1552 /* XXX: should not depend on cpu context */
1553 env = first_cpu;
3a7d929e 1554 if (env->kqemu_enabled) {
f23db169
FB
1555 ram_addr_t addr;
1556 addr = start;
1557 for(i = 0; i < len; i++) {
1558 kqemu_set_notdirty(env, addr);
1559 addr += TARGET_PAGE_SIZE;
1560 }
3a7d929e
FB
1561 }
1562#endif
f23db169
FB
1563 mask = ~dirty_flags;
1564 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1565 for(i = 0; i < len; i++)
1566 p[i] &= mask;
1567
1ccde1cb
FB
1568 /* we modify the TLB cache so that the dirty bit will be set again
1569 when accessing the range */
59817ccb 1570 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1571 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1572 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1573 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1574 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1575 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1576#if (NB_MMU_MODES >= 3)
1577 for(i = 0; i < CPU_TLB_SIZE; i++)
1578 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1579#if (NB_MMU_MODES == 4)
1580 for(i = 0; i < CPU_TLB_SIZE; i++)
1581 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1582#endif
1583#endif
6a00d601 1584 }
59817ccb
FB
1585
1586#if !defined(CONFIG_SOFTMMU)
1587 /* XXX: this is expensive */
1588 {
1589 VirtPageDesc *p;
1590 int j;
1591 target_ulong addr;
1592
1593 for(i = 0; i < L1_SIZE; i++) {
1594 p = l1_virt_map[i];
1595 if (p) {
1596 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1597 for(j = 0; j < L2_SIZE; j++) {
1598 if (p->valid_tag == virt_valid_tag &&
1599 p->phys_addr >= start && p->phys_addr < end &&
1600 (p->prot & PROT_WRITE)) {
1601 if (addr < MMAP_AREA_END) {
5fafdf24 1602 mprotect((void *)addr, TARGET_PAGE_SIZE,
59817ccb
FB
1603 p->prot & ~PROT_WRITE);
1604 }
1605 }
1606 addr += TARGET_PAGE_SIZE;
1607 p++;
1608 }
1609 }
1610 }
1611 }
1612#endif
1ccde1cb
FB
1613}
1614
3a7d929e
FB
1615static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1616{
1617 ram_addr_t ram_addr;
1618
84b7b8e7 1619 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1620 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1621 tlb_entry->addend - (unsigned long)phys_ram_base;
1622 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1623 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1624 }
1625 }
1626}
1627
1628/* update the TLB according to the current state of the dirty bits */
1629void cpu_tlb_update_dirty(CPUState *env)
1630{
1631 int i;
1632 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1633 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1634 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1635 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1636#if (NB_MMU_MODES >= 3)
1637 for(i = 0; i < CPU_TLB_SIZE; i++)
1638 tlb_update_dirty(&env->tlb_table[2][i]);
1639#if (NB_MMU_MODES == 4)
1640 for(i = 0; i < CPU_TLB_SIZE; i++)
1641 tlb_update_dirty(&env->tlb_table[3][i]);
1642#endif
1643#endif
3a7d929e
FB
1644}
1645
5fafdf24 1646static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1647 unsigned long start)
1ccde1cb
FB
1648{
1649 unsigned long addr;
84b7b8e7
FB
1650 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1651 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1652 if (addr == start) {
84b7b8e7 1653 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1654 }
1655 }
1656}
1657
1658/* update the TLB corresponding to virtual page vaddr and phys addr
1659 addr so that it is no longer dirty */
6a00d601
FB
1660static inline void tlb_set_dirty(CPUState *env,
1661 unsigned long addr, target_ulong vaddr)
1ccde1cb 1662{
1ccde1cb
FB
1663 int i;
1664
1ccde1cb
FB
1665 addr &= TARGET_PAGE_MASK;
1666 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1667 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1668 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1669#if (NB_MMU_MODES >= 3)
1670 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1671#if (NB_MMU_MODES == 4)
1672 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1673#endif
1674#endif
9fa3e853
FB
1675}
1676
59817ccb
FB
1677/* add a new TLB entry. At most one entry for a given virtual address
1678 is permitted. Return 0 if OK or 2 if the page could not be mapped
1679 (can only happen in non SOFTMMU mode for I/O pages or pages
1680 conflicting with the host address space). */
5fafdf24
TS
1681int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1682 target_phys_addr_t paddr, int prot,
6ebbf390 1683 int mmu_idx, int is_softmmu)
9fa3e853 1684{
92e873b9 1685 PhysPageDesc *p;
4f2ac237 1686 unsigned long pd;
9fa3e853 1687 unsigned int index;
4f2ac237 1688 target_ulong address;
108c49b8 1689 target_phys_addr_t addend;
9fa3e853 1690 int ret;
84b7b8e7 1691 CPUTLBEntry *te;
6658ffb8 1692 int i;
9fa3e853 1693
92e873b9 1694 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1695 if (!p) {
1696 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1697 } else {
1698 pd = p->phys_offset;
9fa3e853
FB
1699 }
1700#if defined(DEBUG_TLB)
6ebbf390
JM
1701 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1702 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1703#endif
1704
1705 ret = 0;
1706#if !defined(CONFIG_SOFTMMU)
5fafdf24 1707 if (is_softmmu)
9fa3e853
FB
1708#endif
1709 {
2a4188a3 1710 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1711 /* IO memory case */
1712 address = vaddr | pd;
1713 addend = paddr;
1714 } else {
1715 /* standard memory */
1716 address = vaddr;
1717 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1718 }
6658ffb8
PB
1719
1720 /* Make accesses to pages with watchpoints go via the
1721 watchpoint trap routines. */
1722 for (i = 0; i < env->nb_watchpoints; i++) {
1723 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1724 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1725 env->watchpoint[i].addend = 0;
6658ffb8
PB
1726 address = vaddr | io_mem_watch;
1727 } else {
d79acba4
AZ
1728 env->watchpoint[i].addend = pd - paddr +
1729 (unsigned long) phys_ram_base;
6658ffb8
PB
1730 /* TODO: Figure out how to make read watchpoints coexist
1731 with code. */
1732 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1733 }
1734 }
1735 }
d79acba4 1736
90f18422 1737 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1738 addend -= vaddr;
6ebbf390 1739 te = &env->tlb_table[mmu_idx][index];
84b7b8e7 1740 te->addend = addend;
67b915a5 1741 if (prot & PAGE_READ) {
84b7b8e7
FB
1742 te->addr_read = address;
1743 } else {
1744 te->addr_read = -1;
1745 }
5c751e99
EI
1746
1747 if (te->addr_code != -1) {
1748 tlb_flush_jmp_cache(env, te->addr_code);
1749 }
84b7b8e7
FB
1750 if (prot & PAGE_EXEC) {
1751 te->addr_code = address;
9fa3e853 1752 } else {
84b7b8e7 1753 te->addr_code = -1;
9fa3e853 1754 }
67b915a5 1755 if (prot & PAGE_WRITE) {
5fafdf24 1756 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
856074ec
FB
1757 (pd & IO_MEM_ROMD)) {
1758 /* write access calls the I/O callback */
5fafdf24 1759 te->addr_write = vaddr |
856074ec 1760 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
5fafdf24 1761 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1762 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1763 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1764 } else {
84b7b8e7 1765 te->addr_write = address;
9fa3e853
FB
1766 }
1767 } else {
84b7b8e7 1768 te->addr_write = -1;
9fa3e853
FB
1769 }
1770 }
1771#if !defined(CONFIG_SOFTMMU)
1772 else {
1773 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1774 /* IO access: no mapping is done as it will be handled by the
1775 soft MMU */
1776 if (!(env->hflags & HF_SOFTMMU_MASK))
1777 ret = 2;
1778 } else {
1779 void *map_addr;
59817ccb
FB
1780
1781 if (vaddr >= MMAP_AREA_END) {
1782 ret = 2;
1783 } else {
1784 if (prot & PROT_WRITE) {
5fafdf24 1785 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1786#if defined(TARGET_HAS_SMC) || 1
59817ccb 1787 first_tb ||
d720b93d 1788#endif
5fafdf24 1789 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
59817ccb
FB
1790 !cpu_physical_memory_is_dirty(pd))) {
1791 /* ROM: we do as if code was inside */
1792 /* if code is present, we only map as read only and save the
1793 original mapping */
1794 VirtPageDesc *vp;
3b46e624 1795
90f18422 1796 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1797 vp->phys_addr = pd;
1798 vp->prot = prot;
1799 vp->valid_tag = virt_valid_tag;
1800 prot &= ~PAGE_WRITE;
1801 }
1802 }
5fafdf24 1803 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
59817ccb
FB
1804 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1805 if (map_addr == MAP_FAILED) {
1806 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1807 paddr, vaddr);
9fa3e853 1808 }
9fa3e853
FB
1809 }
1810 }
1811 }
1812#endif
1813 return ret;
1814}
1815
1816/* called from signal handler: invalidate the code and unprotect the
1817 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1818int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1819{
1820#if !defined(CONFIG_SOFTMMU)
1821 VirtPageDesc *vp;
1822
1823#if defined(DEBUG_TLB)
1824 printf("page_unprotect: addr=0x%08x\n", addr);
1825#endif
1826 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1827
1828 /* if it is not mapped, no need to worry here */
1829 if (addr >= MMAP_AREA_END)
1830 return 0;
9fa3e853
FB
1831 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1832 if (!vp)
1833 return 0;
1834 /* NOTE: in this case, validate_tag is _not_ tested as it
1835 validates only the code TLB */
1836 if (vp->valid_tag != virt_valid_tag)
1837 return 0;
1838 if (!(vp->prot & PAGE_WRITE))
1839 return 0;
1840#if defined(DEBUG_TLB)
5fafdf24 1841 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
9fa3e853
FB
1842 addr, vp->phys_addr, vp->prot);
1843#endif
59817ccb
FB
1844 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1845 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1846 (unsigned long)addr, vp->prot);
d720b93d 1847 /* set the dirty bit */
0a962c02 1848 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1849 /* flush the code inside */
1850 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1851 return 1;
1852#else
1853 return 0;
1854#endif
33417e70
FB
1855}
1856
0124311e
FB
1857#else
1858
ee8b7021 1859void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1860{
1861}
1862
2e12669a 1863void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1864{
1865}
1866
5fafdf24
TS
1867int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1868 target_phys_addr_t paddr, int prot,
6ebbf390 1869 int mmu_idx, int is_softmmu)
9fa3e853
FB
1870{
1871 return 0;
1872}
0124311e 1873
9fa3e853
FB
1874/* dump memory mappings */
1875void page_dump(FILE *f)
33417e70 1876{
9fa3e853
FB
1877 unsigned long start, end;
1878 int i, j, prot, prot1;
1879 PageDesc *p;
33417e70 1880
9fa3e853
FB
1881 fprintf(f, "%-8s %-8s %-8s %s\n",
1882 "start", "end", "size", "prot");
1883 start = -1;
1884 end = -1;
1885 prot = 0;
1886 for(i = 0; i <= L1_SIZE; i++) {
1887 if (i < L1_SIZE)
1888 p = l1_map[i];
1889 else
1890 p = NULL;
1891 for(j = 0;j < L2_SIZE; j++) {
1892 if (!p)
1893 prot1 = 0;
1894 else
1895 prot1 = p[j].flags;
1896 if (prot1 != prot) {
1897 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1898 if (start != -1) {
1899 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1900 start, end, end - start,
9fa3e853
FB
1901 prot & PAGE_READ ? 'r' : '-',
1902 prot & PAGE_WRITE ? 'w' : '-',
1903 prot & PAGE_EXEC ? 'x' : '-');
1904 }
1905 if (prot1 != 0)
1906 start = end;
1907 else
1908 start = -1;
1909 prot = prot1;
1910 }
1911 if (!p)
1912 break;
1913 }
33417e70 1914 }
33417e70
FB
1915}
1916
53a5960a 1917int page_get_flags(target_ulong address)
33417e70 1918{
9fa3e853
FB
1919 PageDesc *p;
1920
1921 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1922 if (!p)
9fa3e853
FB
1923 return 0;
1924 return p->flags;
1925}
1926
1927/* modify the flags of a page and invalidate the code if
1928 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1929 depending on PAGE_WRITE */
53a5960a 1930void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1931{
1932 PageDesc *p;
53a5960a 1933 target_ulong addr;
9fa3e853
FB
1934
1935 start = start & TARGET_PAGE_MASK;
1936 end = TARGET_PAGE_ALIGN(end);
1937 if (flags & PAGE_WRITE)
1938 flags |= PAGE_WRITE_ORG;
1939 spin_lock(&tb_lock);
1940 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1941 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1942 /* if the write protection is set, then we invalidate the code
1943 inside */
5fafdf24 1944 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
1945 (flags & PAGE_WRITE) &&
1946 p->first_tb) {
d720b93d 1947 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1948 }
1949 p->flags = flags;
1950 }
1951 spin_unlock(&tb_lock);
33417e70
FB
1952}
1953
3d97b40b
TS
1954int page_check_range(target_ulong start, target_ulong len, int flags)
1955{
1956 PageDesc *p;
1957 target_ulong end;
1958 target_ulong addr;
1959
1960 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1961 start = start & TARGET_PAGE_MASK;
1962
1963 if( end < start )
1964 /* we've wrapped around */
1965 return -1;
1966 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1967 p = page_find(addr >> TARGET_PAGE_BITS);
1968 if( !p )
1969 return -1;
1970 if( !(p->flags & PAGE_VALID) )
1971 return -1;
1972
dae3270c 1973 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 1974 return -1;
dae3270c
FB
1975 if (flags & PAGE_WRITE) {
1976 if (!(p->flags & PAGE_WRITE_ORG))
1977 return -1;
1978 /* unprotect the page if it was put read-only because it
1979 contains translated code */
1980 if (!(p->flags & PAGE_WRITE)) {
1981 if (!page_unprotect(addr, 0, NULL))
1982 return -1;
1983 }
1984 return 0;
1985 }
3d97b40b
TS
1986 }
1987 return 0;
1988}
1989
9fa3e853
FB
1990/* called from signal handler: invalidate the code and unprotect the
1991 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1992int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1993{
1994 unsigned int page_index, prot, pindex;
1995 PageDesc *p, *p1;
53a5960a 1996 target_ulong host_start, host_end, addr;
9fa3e853 1997
83fb7adf 1998 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1999 page_index = host_start >> TARGET_PAGE_BITS;
2000 p1 = page_find(page_index);
2001 if (!p1)
2002 return 0;
83fb7adf 2003 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2004 p = p1;
2005 prot = 0;
2006 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2007 prot |= p->flags;
2008 p++;
2009 }
2010 /* if the page was really writable, then we change its
2011 protection back to writable */
2012 if (prot & PAGE_WRITE_ORG) {
2013 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2014 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2015 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2016 (prot & PAGE_BITS) | PAGE_WRITE);
2017 p1[pindex].flags |= PAGE_WRITE;
2018 /* and since the content will be modified, we must invalidate
2019 the corresponding translated code. */
d720b93d 2020 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2021#ifdef DEBUG_TB_CHECK
2022 tb_invalidate_check(address);
2023#endif
2024 return 1;
2025 }
2026 }
2027 return 0;
2028}
2029
6a00d601
FB
2030static inline void tlb_set_dirty(CPUState *env,
2031 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2032{
2033}
9fa3e853
FB
2034#endif /* defined(CONFIG_USER_ONLY) */
2035
db7b5426 2036static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2037 ram_addr_t memory);
2038static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2039 ram_addr_t orig_memory);
db7b5426
BS
2040#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2041 need_subpage) \
2042 do { \
2043 if (addr > start_addr) \
2044 start_addr2 = 0; \
2045 else { \
2046 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2047 if (start_addr2 > 0) \
2048 need_subpage = 1; \
2049 } \
2050 \
49e9fba2 2051 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2052 end_addr2 = TARGET_PAGE_SIZE - 1; \
2053 else { \
2054 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2055 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2056 need_subpage = 1; \
2057 } \
2058 } while (0)
2059
33417e70
FB
2060/* register physical memory. 'size' must be a multiple of the target
2061 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2062 io memory page */
5fafdf24 2063void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2064 ram_addr_t size,
2065 ram_addr_t phys_offset)
33417e70 2066{
108c49b8 2067 target_phys_addr_t addr, end_addr;
92e873b9 2068 PhysPageDesc *p;
9d42037b 2069 CPUState *env;
00f82b8a 2070 ram_addr_t orig_size = size;
db7b5426 2071 void *subpage;
33417e70 2072
5fd386f6 2073 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2074 end_addr = start_addr + (target_phys_addr_t)size;
2075 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2076 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2077 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2078 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2079 target_phys_addr_t start_addr2, end_addr2;
2080 int need_subpage = 0;
2081
2082 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2083 need_subpage);
4254fab8 2084 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2085 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2086 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2087 &p->phys_offset, orig_memory);
2088 } else {
2089 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2090 >> IO_MEM_SHIFT];
2091 }
2092 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2093 } else {
2094 p->phys_offset = phys_offset;
2095 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2096 (phys_offset & IO_MEM_ROMD))
2097 phys_offset += TARGET_PAGE_SIZE;
2098 }
2099 } else {
2100 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2101 p->phys_offset = phys_offset;
2102 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2103 (phys_offset & IO_MEM_ROMD))
2104 phys_offset += TARGET_PAGE_SIZE;
2105 else {
2106 target_phys_addr_t start_addr2, end_addr2;
2107 int need_subpage = 0;
2108
2109 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2110 end_addr2, need_subpage);
2111
4254fab8 2112 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2113 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2114 &p->phys_offset, IO_MEM_UNASSIGNED);
2115 subpage_register(subpage, start_addr2, end_addr2,
2116 phys_offset);
2117 }
2118 }
2119 }
33417e70 2120 }
3b46e624 2121
9d42037b
FB
2122 /* since each CPU stores ram addresses in its TLB cache, we must
2123 reset the modified entries */
2124 /* XXX: slow ! */
2125 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2126 tlb_flush(env, 1);
2127 }
33417e70
FB
2128}
2129
ba863458 2130/* XXX: temporary until new memory mapping API */
00f82b8a 2131ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2132{
2133 PhysPageDesc *p;
2134
2135 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2136 if (!p)
2137 return IO_MEM_UNASSIGNED;
2138 return p->phys_offset;
2139}
2140
e9a1ab19 2141/* XXX: better than nothing */
00f82b8a 2142ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2143{
2144 ram_addr_t addr;
7fb4fdcf 2145 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
ed441467
FB
2146 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2147 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2148 abort();
2149 }
2150 addr = phys_ram_alloc_offset;
2151 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2152 return addr;
2153}
2154
2155void qemu_ram_free(ram_addr_t addr)
2156{
2157}
2158
a4193c8a 2159static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2160{
67d3b957 2161#ifdef DEBUG_UNASSIGNED
ab3d1727 2162 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2163#endif
2164#ifdef TARGET_SPARC
6c36d3fa 2165 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2166#elif TARGET_CRIS
2167 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2168#endif
33417e70
FB
2169 return 0;
2170}
2171
a4193c8a 2172static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2173{
67d3b957 2174#ifdef DEBUG_UNASSIGNED
ab3d1727 2175 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2176#endif
b4f0a316 2177#ifdef TARGET_SPARC
6c36d3fa 2178 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2179#elif TARGET_CRIS
2180 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2181#endif
33417e70
FB
2182}
2183
2184static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2185 unassigned_mem_readb,
2186 unassigned_mem_readb,
2187 unassigned_mem_readb,
2188};
2189
2190static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2191 unassigned_mem_writeb,
2192 unassigned_mem_writeb,
2193 unassigned_mem_writeb,
2194};
2195
3a7d929e 2196static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2197{
3a7d929e
FB
2198 unsigned long ram_addr;
2199 int dirty_flags;
2200 ram_addr = addr - (unsigned long)phys_ram_base;
2201 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2202 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2203#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2204 tb_invalidate_phys_page_fast(ram_addr, 1);
2205 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2206#endif
3a7d929e 2207 }
c27004ec 2208 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2209#ifdef USE_KQEMU
2210 if (cpu_single_env->kqemu_enabled &&
2211 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2212 kqemu_modify_page(cpu_single_env, ram_addr);
2213#endif
f23db169
FB
2214 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2215 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2216 /* we remove the notdirty callback only if the code has been
2217 flushed */
2218 if (dirty_flags == 0xff)
6a00d601 2219 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2220}
2221
3a7d929e 2222static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2223{
3a7d929e
FB
2224 unsigned long ram_addr;
2225 int dirty_flags;
2226 ram_addr = addr - (unsigned long)phys_ram_base;
2227 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2228 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2229#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2230 tb_invalidate_phys_page_fast(ram_addr, 2);
2231 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2232#endif
3a7d929e 2233 }
c27004ec 2234 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2235#ifdef USE_KQEMU
2236 if (cpu_single_env->kqemu_enabled &&
2237 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2238 kqemu_modify_page(cpu_single_env, ram_addr);
2239#endif
f23db169
FB
2240 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2241 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2242 /* we remove the notdirty callback only if the code has been
2243 flushed */
2244 if (dirty_flags == 0xff)
6a00d601 2245 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2246}
2247
3a7d929e 2248static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2249{
3a7d929e
FB
2250 unsigned long ram_addr;
2251 int dirty_flags;
2252 ram_addr = addr - (unsigned long)phys_ram_base;
2253 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2254 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2255#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2256 tb_invalidate_phys_page_fast(ram_addr, 4);
2257 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2258#endif
3a7d929e 2259 }
c27004ec 2260 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2261#ifdef USE_KQEMU
2262 if (cpu_single_env->kqemu_enabled &&
2263 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2264 kqemu_modify_page(cpu_single_env, ram_addr);
2265#endif
f23db169
FB
2266 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2267 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2268 /* we remove the notdirty callback only if the code has been
2269 flushed */
2270 if (dirty_flags == 0xff)
6a00d601 2271 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2272}
2273
3a7d929e 2274static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2275 NULL, /* never used */
2276 NULL, /* never used */
2277 NULL, /* never used */
2278};
2279
1ccde1cb
FB
2280static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2281 notdirty_mem_writeb,
2282 notdirty_mem_writew,
2283 notdirty_mem_writel,
2284};
2285
6658ffb8
PB
2286#if defined(CONFIG_SOFTMMU)
2287/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2288 so these check for a hit then pass through to the normal out-of-line
2289 phys routines. */
2290static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2291{
2292 return ldub_phys(addr);
2293}
2294
2295static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2296{
2297 return lduw_phys(addr);
2298}
2299
2300static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2301{
2302 return ldl_phys(addr);
2303}
2304
2305/* Generate a debug exception if a watchpoint has been hit.
2306 Returns the real physical address of the access. addr will be a host
d79acba4 2307 address in case of a RAM location. */
6658ffb8
PB
2308static target_ulong check_watchpoint(target_phys_addr_t addr)
2309{
2310 CPUState *env = cpu_single_env;
2311 target_ulong watch;
2312 target_ulong retaddr;
2313 int i;
2314
2315 retaddr = addr;
2316 for (i = 0; i < env->nb_watchpoints; i++) {
2317 watch = env->watchpoint[i].vaddr;
2318 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2319 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2320 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2321 cpu_single_env->watchpoint_hit = i + 1;
2322 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2323 break;
2324 }
2325 }
2326 }
2327 return retaddr;
2328}
2329
2330static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2331 uint32_t val)
2332{
2333 addr = check_watchpoint(addr);
2334 stb_phys(addr, val);
2335}
2336
2337static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2338 uint32_t val)
2339{
2340 addr = check_watchpoint(addr);
2341 stw_phys(addr, val);
2342}
2343
2344static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2345 uint32_t val)
2346{
2347 addr = check_watchpoint(addr);
2348 stl_phys(addr, val);
2349}
2350
2351static CPUReadMemoryFunc *watch_mem_read[3] = {
2352 watch_mem_readb,
2353 watch_mem_readw,
2354 watch_mem_readl,
2355};
2356
2357static CPUWriteMemoryFunc *watch_mem_write[3] = {
2358 watch_mem_writeb,
2359 watch_mem_writew,
2360 watch_mem_writel,
2361};
2362#endif
2363
db7b5426
BS
2364static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2365 unsigned int len)
2366{
db7b5426
BS
2367 uint32_t ret;
2368 unsigned int idx;
2369
2370 idx = SUBPAGE_IDX(addr - mmio->base);
2371#if defined(DEBUG_SUBPAGE)
2372 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2373 mmio, len, addr, idx);
2374#endif
3ee89922 2375 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2376
2377 return ret;
2378}
2379
2380static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2381 uint32_t value, unsigned int len)
2382{
db7b5426
BS
2383 unsigned int idx;
2384
2385 idx = SUBPAGE_IDX(addr - mmio->base);
2386#if defined(DEBUG_SUBPAGE)
2387 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2388 mmio, len, addr, idx, value);
2389#endif
3ee89922 2390 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2391}
2392
2393static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2394{
2395#if defined(DEBUG_SUBPAGE)
2396 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2397#endif
2398
2399 return subpage_readlen(opaque, addr, 0);
2400}
2401
2402static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2403 uint32_t value)
2404{
2405#if defined(DEBUG_SUBPAGE)
2406 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2407#endif
2408 subpage_writelen(opaque, addr, value, 0);
2409}
2410
2411static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2412{
2413#if defined(DEBUG_SUBPAGE)
2414 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2415#endif
2416
2417 return subpage_readlen(opaque, addr, 1);
2418}
2419
2420static void subpage_writew (void *opaque, target_phys_addr_t addr,
2421 uint32_t value)
2422{
2423#if defined(DEBUG_SUBPAGE)
2424 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2425#endif
2426 subpage_writelen(opaque, addr, value, 1);
2427}
2428
2429static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2430{
2431#if defined(DEBUG_SUBPAGE)
2432 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2433#endif
2434
2435 return subpage_readlen(opaque, addr, 2);
2436}
2437
2438static void subpage_writel (void *opaque,
2439 target_phys_addr_t addr, uint32_t value)
2440{
2441#if defined(DEBUG_SUBPAGE)
2442 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2443#endif
2444 subpage_writelen(opaque, addr, value, 2);
2445}
2446
2447static CPUReadMemoryFunc *subpage_read[] = {
2448 &subpage_readb,
2449 &subpage_readw,
2450 &subpage_readl,
2451};
2452
2453static CPUWriteMemoryFunc *subpage_write[] = {
2454 &subpage_writeb,
2455 &subpage_writew,
2456 &subpage_writel,
2457};
2458
2459static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2460 ram_addr_t memory)
db7b5426
BS
2461{
2462 int idx, eidx;
4254fab8 2463 unsigned int i;
db7b5426
BS
2464
2465 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2466 return -1;
2467 idx = SUBPAGE_IDX(start);
2468 eidx = SUBPAGE_IDX(end);
2469#if defined(DEBUG_SUBPAGE)
2470 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2471 mmio, start, end, idx, eidx, memory);
2472#endif
2473 memory >>= IO_MEM_SHIFT;
2474 for (; idx <= eidx; idx++) {
4254fab8 2475 for (i = 0; i < 4; i++) {
3ee89922
BS
2476 if (io_mem_read[memory][i]) {
2477 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2478 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2479 }
2480 if (io_mem_write[memory][i]) {
2481 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2482 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2483 }
4254fab8 2484 }
db7b5426
BS
2485 }
2486
2487 return 0;
2488}
2489
00f82b8a
AJ
2490static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2491 ram_addr_t orig_memory)
db7b5426
BS
2492{
2493 subpage_t *mmio;
2494 int subpage_memory;
2495
2496 mmio = qemu_mallocz(sizeof(subpage_t));
2497 if (mmio != NULL) {
2498 mmio->base = base;
2499 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2500#if defined(DEBUG_SUBPAGE)
2501 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2502 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2503#endif
2504 *phys = subpage_memory | IO_MEM_SUBPAGE;
2505 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2506 }
2507
2508 return mmio;
2509}
2510
33417e70
FB
2511static void io_mem_init(void)
2512{
3a7d929e 2513 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2514 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2515 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2516 io_mem_nb = 5;
2517
6658ffb8
PB
2518#if defined(CONFIG_SOFTMMU)
2519 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2520 watch_mem_write, NULL);
2521#endif
1ccde1cb 2522 /* alloc dirty bits array */
0a962c02 2523 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2524 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2525}
2526
2527/* mem_read and mem_write are arrays of functions containing the
2528 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2529 2). Functions can be omitted with a NULL function pointer. The
2530 registered functions may be modified dynamically later.
2531 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2532 modified. If it is zero, a new io zone is allocated. The return
2533 value can be used with cpu_register_physical_memory(). (-1) is
2534 returned if error. */
33417e70
FB
2535int cpu_register_io_memory(int io_index,
2536 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2537 CPUWriteMemoryFunc **mem_write,
2538 void *opaque)
33417e70 2539{
4254fab8 2540 int i, subwidth = 0;
33417e70
FB
2541
2542 if (io_index <= 0) {
b5ff1b31 2543 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2544 return -1;
2545 io_index = io_mem_nb++;
2546 } else {
2547 if (io_index >= IO_MEM_NB_ENTRIES)
2548 return -1;
2549 }
b5ff1b31 2550
33417e70 2551 for(i = 0;i < 3; i++) {
4254fab8
BS
2552 if (!mem_read[i] || !mem_write[i])
2553 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2554 io_mem_read[io_index][i] = mem_read[i];
2555 io_mem_write[io_index][i] = mem_write[i];
2556 }
a4193c8a 2557 io_mem_opaque[io_index] = opaque;
4254fab8 2558 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2559}
61382a50 2560
8926b517
FB
2561CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2562{
2563 return io_mem_write[io_index >> IO_MEM_SHIFT];
2564}
2565
2566CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2567{
2568 return io_mem_read[io_index >> IO_MEM_SHIFT];
2569}
2570
13eb76e0
FB
2571/* physical memory access (slow version, mainly for debug) */
2572#if defined(CONFIG_USER_ONLY)
5fafdf24 2573void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2574 int len, int is_write)
2575{
2576 int l, flags;
2577 target_ulong page;
53a5960a 2578 void * p;
13eb76e0
FB
2579
2580 while (len > 0) {
2581 page = addr & TARGET_PAGE_MASK;
2582 l = (page + TARGET_PAGE_SIZE) - addr;
2583 if (l > len)
2584 l = len;
2585 flags = page_get_flags(page);
2586 if (!(flags & PAGE_VALID))
2587 return;
2588 if (is_write) {
2589 if (!(flags & PAGE_WRITE))
2590 return;
579a97f7 2591 /* XXX: this code should not depend on lock_user */
72fb7daa 2592 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2593 /* FIXME - should this return an error rather than just fail? */
2594 return;
72fb7daa
AJ
2595 memcpy(p, buf, l);
2596 unlock_user(p, addr, l);
13eb76e0
FB
2597 } else {
2598 if (!(flags & PAGE_READ))
2599 return;
579a97f7 2600 /* XXX: this code should not depend on lock_user */
72fb7daa 2601 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2602 /* FIXME - should this return an error rather than just fail? */
2603 return;
72fb7daa 2604 memcpy(buf, p, l);
5b257578 2605 unlock_user(p, addr, 0);
13eb76e0
FB
2606 }
2607 len -= l;
2608 buf += l;
2609 addr += l;
2610 }
2611}
8df1cd07 2612
13eb76e0 2613#else
5fafdf24 2614void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2615 int len, int is_write)
2616{
2617 int l, io_index;
2618 uint8_t *ptr;
2619 uint32_t val;
2e12669a
FB
2620 target_phys_addr_t page;
2621 unsigned long pd;
92e873b9 2622 PhysPageDesc *p;
3b46e624 2623
13eb76e0
FB
2624 while (len > 0) {
2625 page = addr & TARGET_PAGE_MASK;
2626 l = (page + TARGET_PAGE_SIZE) - addr;
2627 if (l > len)
2628 l = len;
92e873b9 2629 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2630 if (!p) {
2631 pd = IO_MEM_UNASSIGNED;
2632 } else {
2633 pd = p->phys_offset;
2634 }
3b46e624 2635
13eb76e0 2636 if (is_write) {
3a7d929e 2637 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2638 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2639 /* XXX: could force cpu_single_env to NULL to avoid
2640 potential bugs */
13eb76e0 2641 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2642 /* 32 bit write access */
c27004ec 2643 val = ldl_p(buf);
a4193c8a 2644 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2645 l = 4;
2646 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2647 /* 16 bit write access */
c27004ec 2648 val = lduw_p(buf);
a4193c8a 2649 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2650 l = 2;
2651 } else {
1c213d19 2652 /* 8 bit write access */
c27004ec 2653 val = ldub_p(buf);
a4193c8a 2654 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2655 l = 1;
2656 }
2657 } else {
b448f2f3
FB
2658 unsigned long addr1;
2659 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2660 /* RAM case */
b448f2f3 2661 ptr = phys_ram_base + addr1;
13eb76e0 2662 memcpy(ptr, buf, l);
3a7d929e
FB
2663 if (!cpu_physical_memory_is_dirty(addr1)) {
2664 /* invalidate code */
2665 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2666 /* set dirty bit */
5fafdf24 2667 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2668 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2669 }
13eb76e0
FB
2670 }
2671 } else {
5fafdf24 2672 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2673 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2674 /* I/O case */
2675 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2676 if (l >= 4 && ((addr & 3) == 0)) {
2677 /* 32 bit read access */
a4193c8a 2678 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2679 stl_p(buf, val);
13eb76e0
FB
2680 l = 4;
2681 } else if (l >= 2 && ((addr & 1) == 0)) {
2682 /* 16 bit read access */
a4193c8a 2683 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2684 stw_p(buf, val);
13eb76e0
FB
2685 l = 2;
2686 } else {
1c213d19 2687 /* 8 bit read access */
a4193c8a 2688 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2689 stb_p(buf, val);
13eb76e0
FB
2690 l = 1;
2691 }
2692 } else {
2693 /* RAM case */
5fafdf24 2694 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2695 (addr & ~TARGET_PAGE_MASK);
2696 memcpy(buf, ptr, l);
2697 }
2698 }
2699 len -= l;
2700 buf += l;
2701 addr += l;
2702 }
2703}
8df1cd07 2704
d0ecd2aa 2705/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2706void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2707 const uint8_t *buf, int len)
2708{
2709 int l;
2710 uint8_t *ptr;
2711 target_phys_addr_t page;
2712 unsigned long pd;
2713 PhysPageDesc *p;
3b46e624 2714
d0ecd2aa
FB
2715 while (len > 0) {
2716 page = addr & TARGET_PAGE_MASK;
2717 l = (page + TARGET_PAGE_SIZE) - addr;
2718 if (l > len)
2719 l = len;
2720 p = phys_page_find(page >> TARGET_PAGE_BITS);
2721 if (!p) {
2722 pd = IO_MEM_UNASSIGNED;
2723 } else {
2724 pd = p->phys_offset;
2725 }
3b46e624 2726
d0ecd2aa 2727 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2728 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2729 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2730 /* do nothing */
2731 } else {
2732 unsigned long addr1;
2733 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2734 /* ROM/RAM case */
2735 ptr = phys_ram_base + addr1;
2736 memcpy(ptr, buf, l);
2737 }
2738 len -= l;
2739 buf += l;
2740 addr += l;
2741 }
2742}
2743
2744
8df1cd07
FB
2745/* warning: addr must be aligned */
2746uint32_t ldl_phys(target_phys_addr_t addr)
2747{
2748 int io_index;
2749 uint8_t *ptr;
2750 uint32_t val;
2751 unsigned long pd;
2752 PhysPageDesc *p;
2753
2754 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2755 if (!p) {
2756 pd = IO_MEM_UNASSIGNED;
2757 } else {
2758 pd = p->phys_offset;
2759 }
3b46e624 2760
5fafdf24 2761 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2762 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2763 /* I/O case */
2764 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2765 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2766 } else {
2767 /* RAM case */
5fafdf24 2768 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2769 (addr & ~TARGET_PAGE_MASK);
2770 val = ldl_p(ptr);
2771 }
2772 return val;
2773}
2774
84b7b8e7
FB
2775/* warning: addr must be aligned */
2776uint64_t ldq_phys(target_phys_addr_t addr)
2777{
2778 int io_index;
2779 uint8_t *ptr;
2780 uint64_t val;
2781 unsigned long pd;
2782 PhysPageDesc *p;
2783
2784 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2785 if (!p) {
2786 pd = IO_MEM_UNASSIGNED;
2787 } else {
2788 pd = p->phys_offset;
2789 }
3b46e624 2790
2a4188a3
FB
2791 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2792 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2793 /* I/O case */
2794 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2795#ifdef TARGET_WORDS_BIGENDIAN
2796 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2797 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2798#else
2799 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2800 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2801#endif
2802 } else {
2803 /* RAM case */
5fafdf24 2804 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2805 (addr & ~TARGET_PAGE_MASK);
2806 val = ldq_p(ptr);
2807 }
2808 return val;
2809}
2810
aab33094
FB
2811/* XXX: optimize */
2812uint32_t ldub_phys(target_phys_addr_t addr)
2813{
2814 uint8_t val;
2815 cpu_physical_memory_read(addr, &val, 1);
2816 return val;
2817}
2818
2819/* XXX: optimize */
2820uint32_t lduw_phys(target_phys_addr_t addr)
2821{
2822 uint16_t val;
2823 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2824 return tswap16(val);
2825}
2826
8df1cd07
FB
2827/* warning: addr must be aligned. The ram page is not masked as dirty
2828 and the code inside is not invalidated. It is useful if the dirty
2829 bits are used to track modified PTEs */
2830void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2831{
2832 int io_index;
2833 uint8_t *ptr;
2834 unsigned long pd;
2835 PhysPageDesc *p;
2836
2837 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2838 if (!p) {
2839 pd = IO_MEM_UNASSIGNED;
2840 } else {
2841 pd = p->phys_offset;
2842 }
3b46e624 2843
3a7d929e 2844 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2845 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2846 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2847 } else {
5fafdf24 2848 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2849 (addr & ~TARGET_PAGE_MASK);
2850 stl_p(ptr, val);
2851 }
2852}
2853
bc98a7ef
JM
2854void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2855{
2856 int io_index;
2857 uint8_t *ptr;
2858 unsigned long pd;
2859 PhysPageDesc *p;
2860
2861 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2862 if (!p) {
2863 pd = IO_MEM_UNASSIGNED;
2864 } else {
2865 pd = p->phys_offset;
2866 }
3b46e624 2867
bc98a7ef
JM
2868 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2869 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2870#ifdef TARGET_WORDS_BIGENDIAN
2871 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2872 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2873#else
2874 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2875 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2876#endif
2877 } else {
5fafdf24 2878 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2879 (addr & ~TARGET_PAGE_MASK);
2880 stq_p(ptr, val);
2881 }
2882}
2883
8df1cd07 2884/* warning: addr must be aligned */
8df1cd07
FB
2885void stl_phys(target_phys_addr_t addr, uint32_t val)
2886{
2887 int io_index;
2888 uint8_t *ptr;
2889 unsigned long pd;
2890 PhysPageDesc *p;
2891
2892 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2893 if (!p) {
2894 pd = IO_MEM_UNASSIGNED;
2895 } else {
2896 pd = p->phys_offset;
2897 }
3b46e624 2898
3a7d929e 2899 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2900 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2901 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2902 } else {
2903 unsigned long addr1;
2904 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2905 /* RAM case */
2906 ptr = phys_ram_base + addr1;
2907 stl_p(ptr, val);
3a7d929e
FB
2908 if (!cpu_physical_memory_is_dirty(addr1)) {
2909 /* invalidate code */
2910 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2911 /* set dirty bit */
f23db169
FB
2912 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2913 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2914 }
8df1cd07
FB
2915 }
2916}
2917
aab33094
FB
2918/* XXX: optimize */
2919void stb_phys(target_phys_addr_t addr, uint32_t val)
2920{
2921 uint8_t v = val;
2922 cpu_physical_memory_write(addr, &v, 1);
2923}
2924
2925/* XXX: optimize */
2926void stw_phys(target_phys_addr_t addr, uint32_t val)
2927{
2928 uint16_t v = tswap16(val);
2929 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2930}
2931
2932/* XXX: optimize */
2933void stq_phys(target_phys_addr_t addr, uint64_t val)
2934{
2935 val = tswap64(val);
2936 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2937}
2938
13eb76e0
FB
2939#endif
2940
2941/* virtual memory access for debug */
5fafdf24 2942int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 2943 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2944{
2945 int l;
9b3c35e0
JM
2946 target_phys_addr_t phys_addr;
2947 target_ulong page;
13eb76e0
FB
2948
2949 while (len > 0) {
2950 page = addr & TARGET_PAGE_MASK;
2951 phys_addr = cpu_get_phys_page_debug(env, page);
2952 /* if no physical page mapped, return an error */
2953 if (phys_addr == -1)
2954 return -1;
2955 l = (page + TARGET_PAGE_SIZE) - addr;
2956 if (l > len)
2957 l = len;
5fafdf24 2958 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 2959 buf, l, is_write);
13eb76e0
FB
2960 len -= l;
2961 buf += l;
2962 addr += l;
2963 }
2964 return 0;
2965}
2966
e3db7226
FB
2967void dump_exec_info(FILE *f,
2968 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2969{
2970 int i, target_code_size, max_target_code_size;
2971 int direct_jmp_count, direct_jmp2_count, cross_page;
2972 TranslationBlock *tb;
3b46e624 2973
e3db7226
FB
2974 target_code_size = 0;
2975 max_target_code_size = 0;
2976 cross_page = 0;
2977 direct_jmp_count = 0;
2978 direct_jmp2_count = 0;
2979 for(i = 0; i < nb_tbs; i++) {
2980 tb = &tbs[i];
2981 target_code_size += tb->size;
2982 if (tb->size > max_target_code_size)
2983 max_target_code_size = tb->size;
2984 if (tb->page_addr[1] != -1)
2985 cross_page++;
2986 if (tb->tb_next_offset[0] != 0xffff) {
2987 direct_jmp_count++;
2988 if (tb->tb_next_offset[1] != 0xffff) {
2989 direct_jmp2_count++;
2990 }
2991 }
2992 }
2993 /* XXX: avoid using doubles ? */
57fec1fe 2994 cpu_fprintf(f, "Translation buffer state:\n");
e3db7226 2995 cpu_fprintf(f, "TB count %d\n", nb_tbs);
5fafdf24 2996 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
2997 nb_tbs ? target_code_size / nb_tbs : 0,
2998 max_target_code_size);
5fafdf24 2999 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3000 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3001 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3002 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3003 cross_page,
e3db7226
FB
3004 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3005 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3006 direct_jmp_count,
e3db7226
FB
3007 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3008 direct_jmp2_count,
3009 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3010 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3011 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3012 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3013 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3014 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3015}
3016
5fafdf24 3017#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3018
3019#define MMUSUFFIX _cmmu
3020#define GETPC() NULL
3021#define env cpu_single_env
b769d8fe 3022#define SOFTMMU_CODE_ACCESS
61382a50
FB
3023
3024#define SHIFT 0
3025#include "softmmu_template.h"
3026
3027#define SHIFT 1
3028#include "softmmu_template.h"
3029
3030#define SHIFT 2
3031#include "softmmu_template.h"
3032
3033#define SHIFT 3
3034#include "softmmu_template.h"
3035
3036#undef env
3037
3038#endif