]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Align file accesses with cache=off (O_DIRECT) (Kevin Wolf, Laurent Vivier)
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
42#endif
54936004 43
fd6ce8f6 44//#define DEBUG_TB_INVALIDATE
66e85a21 45//#define DEBUG_FLUSH
9fa3e853 46//#define DEBUG_TLB
67d3b957 47//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
48
49/* make various TB consistency checks */
5fafdf24
TS
50//#define DEBUG_TB_CHECK
51//#define DEBUG_TLB_CHECK
fd6ce8f6 52
1196be37 53//#define DEBUG_IOPORT
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
99773bd4
PB
56#if !defined(CONFIG_USER_ONLY)
57/* TB consistency checks only implemented for usermode emulation. */
58#undef DEBUG_TB_CHECK
59#endif
60
fd6ce8f6 61/* threshold to flush the translated code buffer */
d07bde88 62#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
fd6ce8f6 63
9fa3e853
FB
64#define SMC_BITMAP_USE_THRESHOLD 10
65
66#define MMAP_AREA_START 0x00000000
67#define MMAP_AREA_END 0xa8000000
fd6ce8f6 68
108c49b8
FB
69#if defined(TARGET_SPARC64)
70#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
71#elif defined(TARGET_SPARC)
72#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
73#elif defined(TARGET_ALPHA)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
76#elif defined(TARGET_PPC64)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
78#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 42
80#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
82#else
83/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84#define TARGET_PHYS_ADDR_SPACE_BITS 32
85#endif
86
fab94c0e 87TranslationBlock *tbs;
9fa3e853 88TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 89int nb_tbs;
eb51d102
FB
90/* any access to the tbs or the page table must use this lock */
91spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 92
7cb69cae 93uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
b8076a74 94uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
95uint8_t *code_gen_ptr;
96
00f82b8a 97ram_addr_t phys_ram_size;
9fa3e853
FB
98int phys_ram_fd;
99uint8_t *phys_ram_base;
1ccde1cb 100uint8_t *phys_ram_dirty;
e9a1ab19 101static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 102
6a00d601
FB
103CPUState *first_cpu;
104/* current CPU in the current thread. It is only valid inside
105 cpu_exec() */
5fafdf24 106CPUState *cpu_single_env;
6a00d601 107
54936004 108typedef struct PageDesc {
92e873b9 109 /* list of TBs intersecting this ram page */
fd6ce8f6 110 TranslationBlock *first_tb;
9fa3e853
FB
111 /* in order to optimize self modifying code, we count the number
112 of lookups we do to a given page to use a bitmap */
113 unsigned int code_write_count;
114 uint8_t *code_bitmap;
115#if defined(CONFIG_USER_ONLY)
116 unsigned long flags;
117#endif
54936004
FB
118} PageDesc;
119
92e873b9
FB
120typedef struct PhysPageDesc {
121 /* offset in host memory of the page + io_index in the low 12 bits */
00f82b8a 122 ram_addr_t phys_offset;
92e873b9
FB
123} PhysPageDesc;
124
54936004 125#define L2_BITS 10
bedb69ea
JM
126#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
127/* XXX: this is a temporary hack for alpha target.
128 * In the future, this is to be replaced by a multi-level table
129 * to actually be able to handle the complete 64 bits address space.
130 */
131#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
132#else
03875444 133#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 134#endif
54936004
FB
135
136#define L1_SIZE (1 << L1_BITS)
137#define L2_SIZE (1 << L2_BITS)
138
33417e70 139static void io_mem_init(void);
fd6ce8f6 140
83fb7adf
FB
141unsigned long qemu_real_host_page_size;
142unsigned long qemu_host_page_bits;
143unsigned long qemu_host_page_size;
144unsigned long qemu_host_page_mask;
54936004 145
92e873b9 146/* XXX: for system emulation, it could just be an array */
54936004 147static PageDesc *l1_map[L1_SIZE];
0a962c02 148PhysPageDesc **l1_phys_map;
54936004 149
33417e70 150/* io memory support */
33417e70
FB
151CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
152CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 153void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 154static int io_mem_nb;
6658ffb8
PB
155#if defined(CONFIG_SOFTMMU)
156static int io_mem_watch;
157#endif
33417e70 158
34865134
FB
159/* log support */
160char *logfilename = "/tmp/qemu.log";
161FILE *logfile;
162int loglevel;
e735b91c 163static int log_append = 0;
34865134 164
e3db7226
FB
165/* statistics */
166static int tlb_flush_count;
167static int tb_flush_count;
168static int tb_phys_invalidate_count;
169
db7b5426
BS
170#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
171typedef struct subpage_t {
172 target_phys_addr_t base;
3ee89922
BS
173 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
174 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
175 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
176} subpage_t;
177
7cb69cae
FB
178#ifdef _WIN32
179static void map_exec(void *addr, long size)
180{
181 DWORD old_protect;
182 VirtualProtect(addr, size,
183 PAGE_EXECUTE_READWRITE, &old_protect);
184
185}
186#else
187static void map_exec(void *addr, long size)
188{
189 unsigned long start, end;
190
191 start = (unsigned long)addr;
192 start &= ~(qemu_real_host_page_size - 1);
193
194 end = (unsigned long)addr + size;
195 end += qemu_real_host_page_size - 1;
196 end &= ~(qemu_real_host_page_size - 1);
197
198 mprotect((void *)start, end - start,
199 PROT_READ | PROT_WRITE | PROT_EXEC);
200}
201#endif
202
b346ff46 203static void page_init(void)
54936004 204{
83fb7adf 205 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 206 TARGET_PAGE_SIZE */
67b915a5 207#ifdef _WIN32
d5a8f07c
FB
208 {
209 SYSTEM_INFO system_info;
210 DWORD old_protect;
3b46e624 211
d5a8f07c
FB
212 GetSystemInfo(&system_info);
213 qemu_real_host_page_size = system_info.dwPageSize;
d5a8f07c 214 }
67b915a5 215#else
83fb7adf 216 qemu_real_host_page_size = getpagesize();
67b915a5 217#endif
7cb69cae
FB
218 map_exec(code_gen_buffer, sizeof(code_gen_buffer));
219 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
d5a8f07c 220
83fb7adf
FB
221 if (qemu_host_page_size == 0)
222 qemu_host_page_size = qemu_real_host_page_size;
223 if (qemu_host_page_size < TARGET_PAGE_SIZE)
224 qemu_host_page_size = TARGET_PAGE_SIZE;
225 qemu_host_page_bits = 0;
226 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
227 qemu_host_page_bits++;
228 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
229 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
230 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
231
232#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
233 {
234 long long startaddr, endaddr;
235 FILE *f;
236 int n;
237
238 f = fopen("/proc/self/maps", "r");
239 if (f) {
240 do {
241 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
242 if (n == 2) {
e0b8d65a
BS
243 startaddr = MIN(startaddr,
244 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
245 endaddr = MIN(endaddr,
246 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
50a9569b
AZ
247 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
248 TARGET_PAGE_ALIGN(endaddr),
249 PAGE_RESERVED);
250 }
251 } while (!feof(f));
252 fclose(f);
253 }
254 }
255#endif
54936004
FB
256}
257
00f82b8a 258static inline PageDesc *page_find_alloc(target_ulong index)
54936004 259{
54936004
FB
260 PageDesc **lp, *p;
261
54936004
FB
262 lp = &l1_map[index >> L2_BITS];
263 p = *lp;
264 if (!p) {
265 /* allocate if not found */
59817ccb 266 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 267 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
268 *lp = p;
269 }
270 return p + (index & (L2_SIZE - 1));
271}
272
00f82b8a 273static inline PageDesc *page_find(target_ulong index)
54936004 274{
54936004
FB
275 PageDesc *p;
276
54936004
FB
277 p = l1_map[index >> L2_BITS];
278 if (!p)
279 return 0;
fd6ce8f6
FB
280 return p + (index & (L2_SIZE - 1));
281}
282
108c49b8 283static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 284{
108c49b8 285 void **lp, **p;
e3f4e2a4 286 PhysPageDesc *pd;
92e873b9 287
108c49b8
FB
288 p = (void **)l1_phys_map;
289#if TARGET_PHYS_ADDR_SPACE_BITS > 32
290
291#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
292#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
293#endif
294 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
295 p = *lp;
296 if (!p) {
297 /* allocate if not found */
108c49b8
FB
298 if (!alloc)
299 return NULL;
300 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
301 memset(p, 0, sizeof(void *) * L1_SIZE);
302 *lp = p;
303 }
304#endif
305 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
306 pd = *lp;
307 if (!pd) {
308 int i;
108c49b8
FB
309 /* allocate if not found */
310 if (!alloc)
311 return NULL;
e3f4e2a4
PB
312 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
313 *lp = pd;
314 for (i = 0; i < L2_SIZE; i++)
315 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 316 }
e3f4e2a4 317 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
318}
319
108c49b8 320static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 321{
108c49b8 322 return phys_page_find_alloc(index, 0);
92e873b9
FB
323}
324
9fa3e853 325#if !defined(CONFIG_USER_ONLY)
6a00d601 326static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 327static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 328 target_ulong vaddr);
9fa3e853 329#endif
fd6ce8f6 330
6a00d601 331void cpu_exec_init(CPUState *env)
fd6ce8f6 332{
6a00d601
FB
333 CPUState **penv;
334 int cpu_index;
335
fd6ce8f6 336 if (!code_gen_ptr) {
57fec1fe 337 cpu_gen_init();
fab94c0e 338 tbs = qemu_malloc(CODE_GEN_MAX_BLOCKS * sizeof(TranslationBlock));
fd6ce8f6 339 code_gen_ptr = code_gen_buffer;
b346ff46 340 page_init();
33417e70 341 io_mem_init();
fd6ce8f6 342 }
6a00d601
FB
343 env->next_cpu = NULL;
344 penv = &first_cpu;
345 cpu_index = 0;
346 while (*penv != NULL) {
347 penv = (CPUState **)&(*penv)->next_cpu;
348 cpu_index++;
349 }
350 env->cpu_index = cpu_index;
6658ffb8 351 env->nb_watchpoints = 0;
6a00d601 352 *penv = env;
fd6ce8f6
FB
353}
354
9fa3e853
FB
355static inline void invalidate_page_bitmap(PageDesc *p)
356{
357 if (p->code_bitmap) {
59817ccb 358 qemu_free(p->code_bitmap);
9fa3e853
FB
359 p->code_bitmap = NULL;
360 }
361 p->code_write_count = 0;
362}
363
fd6ce8f6
FB
364/* set to NULL all the 'first_tb' fields in all PageDescs */
365static void page_flush_tb(void)
366{
367 int i, j;
368 PageDesc *p;
369
370 for(i = 0; i < L1_SIZE; i++) {
371 p = l1_map[i];
372 if (p) {
9fa3e853
FB
373 for(j = 0; j < L2_SIZE; j++) {
374 p->first_tb = NULL;
375 invalidate_page_bitmap(p);
376 p++;
377 }
fd6ce8f6
FB
378 }
379 }
380}
381
382/* flush all the translation blocks */
d4e8164f 383/* XXX: tb_flush is currently not thread safe */
6a00d601 384void tb_flush(CPUState *env1)
fd6ce8f6 385{
6a00d601 386 CPUState *env;
0124311e 387#if defined(DEBUG_FLUSH)
ab3d1727
BS
388 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
389 (unsigned long)(code_gen_ptr - code_gen_buffer),
390 nb_tbs, nb_tbs > 0 ?
391 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 392#endif
a208e54a
PB
393 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
394 cpu_abort(env1, "Internal error: code buffer overflow\n");
395
fd6ce8f6 396 nb_tbs = 0;
3b46e624 397
6a00d601
FB
398 for(env = first_cpu; env != NULL; env = env->next_cpu) {
399 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
400 }
9fa3e853 401
8a8a608f 402 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 403 page_flush_tb();
9fa3e853 404
fd6ce8f6 405 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
406 /* XXX: flush processor icache at this point if cache flush is
407 expensive */
e3db7226 408 tb_flush_count++;
fd6ce8f6
FB
409}
410
411#ifdef DEBUG_TB_CHECK
412
bc98a7ef 413static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
414{
415 TranslationBlock *tb;
416 int i;
417 address &= TARGET_PAGE_MASK;
99773bd4
PB
418 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
419 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
420 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
421 address >= tb->pc + tb->size)) {
422 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 423 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
424 }
425 }
426 }
427}
428
429/* verify that all the pages have correct rights for code */
430static void tb_page_check(void)
431{
432 TranslationBlock *tb;
433 int i, flags1, flags2;
3b46e624 434
99773bd4
PB
435 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
436 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
437 flags1 = page_get_flags(tb->pc);
438 flags2 = page_get_flags(tb->pc + tb->size - 1);
439 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
440 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 441 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
442 }
443 }
444 }
445}
446
d4e8164f
FB
447void tb_jmp_check(TranslationBlock *tb)
448{
449 TranslationBlock *tb1;
450 unsigned int n1;
451
452 /* suppress any remaining jumps to this TB */
453 tb1 = tb->jmp_first;
454 for(;;) {
455 n1 = (long)tb1 & 3;
456 tb1 = (TranslationBlock *)((long)tb1 & ~3);
457 if (n1 == 2)
458 break;
459 tb1 = tb1->jmp_next[n1];
460 }
461 /* check end of list */
462 if (tb1 != tb) {
463 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
464 }
465}
466
fd6ce8f6
FB
467#endif
468
469/* invalidate one TB */
470static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
471 int next_offset)
472{
473 TranslationBlock *tb1;
474 for(;;) {
475 tb1 = *ptb;
476 if (tb1 == tb) {
477 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
478 break;
479 }
480 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
481 }
482}
483
9fa3e853
FB
484static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
485{
486 TranslationBlock *tb1;
487 unsigned int n1;
488
489 for(;;) {
490 tb1 = *ptb;
491 n1 = (long)tb1 & 3;
492 tb1 = (TranslationBlock *)((long)tb1 & ~3);
493 if (tb1 == tb) {
494 *ptb = tb1->page_next[n1];
495 break;
496 }
497 ptb = &tb1->page_next[n1];
498 }
499}
500
d4e8164f
FB
501static inline void tb_jmp_remove(TranslationBlock *tb, int n)
502{
503 TranslationBlock *tb1, **ptb;
504 unsigned int n1;
505
506 ptb = &tb->jmp_next[n];
507 tb1 = *ptb;
508 if (tb1) {
509 /* find tb(n) in circular list */
510 for(;;) {
511 tb1 = *ptb;
512 n1 = (long)tb1 & 3;
513 tb1 = (TranslationBlock *)((long)tb1 & ~3);
514 if (n1 == n && tb1 == tb)
515 break;
516 if (n1 == 2) {
517 ptb = &tb1->jmp_first;
518 } else {
519 ptb = &tb1->jmp_next[n1];
520 }
521 }
522 /* now we can suppress tb(n) from the list */
523 *ptb = tb->jmp_next[n];
524
525 tb->jmp_next[n] = NULL;
526 }
527}
528
529/* reset the jump entry 'n' of a TB so that it is not chained to
530 another TB */
531static inline void tb_reset_jump(TranslationBlock *tb, int n)
532{
533 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
534}
535
00f82b8a 536static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 537{
6a00d601 538 CPUState *env;
8a40a180 539 PageDesc *p;
d4e8164f 540 unsigned int h, n1;
00f82b8a 541 target_phys_addr_t phys_pc;
8a40a180 542 TranslationBlock *tb1, *tb2;
3b46e624 543
8a40a180
FB
544 /* remove the TB from the hash list */
545 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
546 h = tb_phys_hash_func(phys_pc);
5fafdf24 547 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
548 offsetof(TranslationBlock, phys_hash_next));
549
550 /* remove the TB from the page list */
551 if (tb->page_addr[0] != page_addr) {
552 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
553 tb_page_remove(&p->first_tb, tb);
554 invalidate_page_bitmap(p);
555 }
556 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
557 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
558 tb_page_remove(&p->first_tb, tb);
559 invalidate_page_bitmap(p);
560 }
561
36bdbe54 562 tb_invalidated_flag = 1;
59817ccb 563
fd6ce8f6 564 /* remove the TB from the hash list */
8a40a180 565 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
566 for(env = first_cpu; env != NULL; env = env->next_cpu) {
567 if (env->tb_jmp_cache[h] == tb)
568 env->tb_jmp_cache[h] = NULL;
569 }
d4e8164f
FB
570
571 /* suppress this TB from the two jump lists */
572 tb_jmp_remove(tb, 0);
573 tb_jmp_remove(tb, 1);
574
575 /* suppress any remaining jumps to this TB */
576 tb1 = tb->jmp_first;
577 for(;;) {
578 n1 = (long)tb1 & 3;
579 if (n1 == 2)
580 break;
581 tb1 = (TranslationBlock *)((long)tb1 & ~3);
582 tb2 = tb1->jmp_next[n1];
583 tb_reset_jump(tb1, n1);
584 tb1->jmp_next[n1] = NULL;
585 tb1 = tb2;
586 }
587 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 588
e3db7226 589 tb_phys_invalidate_count++;
9fa3e853
FB
590}
591
592static inline void set_bits(uint8_t *tab, int start, int len)
593{
594 int end, mask, end1;
595
596 end = start + len;
597 tab += start >> 3;
598 mask = 0xff << (start & 7);
599 if ((start & ~7) == (end & ~7)) {
600 if (start < end) {
601 mask &= ~(0xff << (end & 7));
602 *tab |= mask;
603 }
604 } else {
605 *tab++ |= mask;
606 start = (start + 8) & ~7;
607 end1 = end & ~7;
608 while (start < end1) {
609 *tab++ = 0xff;
610 start += 8;
611 }
612 if (start < end) {
613 mask = ~(0xff << (end & 7));
614 *tab |= mask;
615 }
616 }
617}
618
619static void build_page_bitmap(PageDesc *p)
620{
621 int n, tb_start, tb_end;
622 TranslationBlock *tb;
3b46e624 623
59817ccb 624 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
625 if (!p->code_bitmap)
626 return;
627 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
628
629 tb = p->first_tb;
630 while (tb != NULL) {
631 n = (long)tb & 3;
632 tb = (TranslationBlock *)((long)tb & ~3);
633 /* NOTE: this is subtle as a TB may span two physical pages */
634 if (n == 0) {
635 /* NOTE: tb_end may be after the end of the page, but
636 it is not a problem */
637 tb_start = tb->pc & ~TARGET_PAGE_MASK;
638 tb_end = tb_start + tb->size;
639 if (tb_end > TARGET_PAGE_SIZE)
640 tb_end = TARGET_PAGE_SIZE;
641 } else {
642 tb_start = 0;
643 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
644 }
645 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
646 tb = tb->page_next[n];
647 }
648}
649
d720b93d
FB
650#ifdef TARGET_HAS_PRECISE_SMC
651
5fafdf24 652static void tb_gen_code(CPUState *env,
d720b93d
FB
653 target_ulong pc, target_ulong cs_base, int flags,
654 int cflags)
655{
656 TranslationBlock *tb;
657 uint8_t *tc_ptr;
658 target_ulong phys_pc, phys_page2, virt_page2;
659 int code_gen_size;
660
c27004ec
FB
661 phys_pc = get_phys_addr_code(env, pc);
662 tb = tb_alloc(pc);
d720b93d
FB
663 if (!tb) {
664 /* flush must be done */
665 tb_flush(env);
666 /* cannot fail at this point */
c27004ec 667 tb = tb_alloc(pc);
d720b93d
FB
668 }
669 tc_ptr = code_gen_ptr;
670 tb->tc_ptr = tc_ptr;
671 tb->cs_base = cs_base;
672 tb->flags = flags;
673 tb->cflags = cflags;
d07bde88 674 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 675 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 676
d720b93d 677 /* check next page if needed */
c27004ec 678 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 679 phys_page2 = -1;
c27004ec 680 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
681 phys_page2 = get_phys_addr_code(env, virt_page2);
682 }
683 tb_link_phys(tb, phys_pc, phys_page2);
684}
685#endif
3b46e624 686
9fa3e853
FB
687/* invalidate all TBs which intersect with the target physical page
688 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
689 the same physical page. 'is_cpu_write_access' should be true if called
690 from a real cpu write access: the virtual CPU will exit the current
691 TB if code is modified inside this TB. */
00f82b8a 692void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
693 int is_cpu_write_access)
694{
695 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 696 CPUState *env = cpu_single_env;
9fa3e853 697 PageDesc *p;
ea1c1802 698 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 699 target_ulong tb_start, tb_end;
d720b93d 700 target_ulong current_pc, current_cs_base;
9fa3e853
FB
701
702 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 703 if (!p)
9fa3e853 704 return;
5fafdf24 705 if (!p->code_bitmap &&
d720b93d
FB
706 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
707 is_cpu_write_access) {
9fa3e853
FB
708 /* build code bitmap */
709 build_page_bitmap(p);
710 }
711
712 /* we remove all the TBs in the range [start, end[ */
713 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
714 current_tb_not_found = is_cpu_write_access;
715 current_tb_modified = 0;
716 current_tb = NULL; /* avoid warning */
717 current_pc = 0; /* avoid warning */
718 current_cs_base = 0; /* avoid warning */
719 current_flags = 0; /* avoid warning */
9fa3e853
FB
720 tb = p->first_tb;
721 while (tb != NULL) {
722 n = (long)tb & 3;
723 tb = (TranslationBlock *)((long)tb & ~3);
724 tb_next = tb->page_next[n];
725 /* NOTE: this is subtle as a TB may span two physical pages */
726 if (n == 0) {
727 /* NOTE: tb_end may be after the end of the page, but
728 it is not a problem */
729 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
730 tb_end = tb_start + tb->size;
731 } else {
732 tb_start = tb->page_addr[1];
733 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
734 }
735 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
736#ifdef TARGET_HAS_PRECISE_SMC
737 if (current_tb_not_found) {
738 current_tb_not_found = 0;
739 current_tb = NULL;
740 if (env->mem_write_pc) {
741 /* now we have a real cpu fault */
742 current_tb = tb_find_pc(env->mem_write_pc);
743 }
744 }
745 if (current_tb == tb &&
746 !(current_tb->cflags & CF_SINGLE_INSN)) {
747 /* If we are modifying the current TB, we must stop
748 its execution. We could be more precise by checking
749 that the modification is after the current PC, but it
750 would require a specialized function to partially
751 restore the CPU state */
3b46e624 752
d720b93d 753 current_tb_modified = 1;
5fafdf24 754 cpu_restore_state(current_tb, env,
d720b93d
FB
755 env->mem_write_pc, NULL);
756#if defined(TARGET_I386)
757 current_flags = env->hflags;
758 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
759 current_cs_base = (target_ulong)env->segs[R_CS].base;
760 current_pc = current_cs_base + env->eip;
761#else
762#error unsupported CPU
763#endif
764 }
765#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
766 /* we need to do that to handle the case where a signal
767 occurs while doing tb_phys_invalidate() */
768 saved_tb = NULL;
769 if (env) {
770 saved_tb = env->current_tb;
771 env->current_tb = NULL;
772 }
9fa3e853 773 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
774 if (env) {
775 env->current_tb = saved_tb;
776 if (env->interrupt_request && env->current_tb)
777 cpu_interrupt(env, env->interrupt_request);
778 }
9fa3e853
FB
779 }
780 tb = tb_next;
781 }
782#if !defined(CONFIG_USER_ONLY)
783 /* if no code remaining, no need to continue to use slow writes */
784 if (!p->first_tb) {
785 invalidate_page_bitmap(p);
d720b93d
FB
786 if (is_cpu_write_access) {
787 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
788 }
789 }
790#endif
791#ifdef TARGET_HAS_PRECISE_SMC
792 if (current_tb_modified) {
793 /* we generate a block containing just the instruction
794 modifying the memory. It will ensure that it cannot modify
795 itself */
ea1c1802 796 env->current_tb = NULL;
5fafdf24 797 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
798 CF_SINGLE_INSN);
799 cpu_resume_from_signal(env, NULL);
9fa3e853 800 }
fd6ce8f6 801#endif
9fa3e853 802}
fd6ce8f6 803
9fa3e853 804/* len must be <= 8 and start must be a multiple of len */
00f82b8a 805static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
806{
807 PageDesc *p;
808 int offset, b;
59817ccb 809#if 0
a4193c8a
FB
810 if (1) {
811 if (loglevel) {
5fafdf24
TS
812 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
813 cpu_single_env->mem_write_vaddr, len,
814 cpu_single_env->eip,
a4193c8a
FB
815 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
816 }
59817ccb
FB
817 }
818#endif
9fa3e853 819 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 820 if (!p)
9fa3e853
FB
821 return;
822 if (p->code_bitmap) {
823 offset = start & ~TARGET_PAGE_MASK;
824 b = p->code_bitmap[offset >> 3] >> (offset & 7);
825 if (b & ((1 << len) - 1))
826 goto do_invalidate;
827 } else {
828 do_invalidate:
d720b93d 829 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
830 }
831}
832
9fa3e853 833#if !defined(CONFIG_SOFTMMU)
00f82b8a 834static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 835 unsigned long pc, void *puc)
9fa3e853 836{
d720b93d
FB
837 int n, current_flags, current_tb_modified;
838 target_ulong current_pc, current_cs_base;
9fa3e853 839 PageDesc *p;
d720b93d
FB
840 TranslationBlock *tb, *current_tb;
841#ifdef TARGET_HAS_PRECISE_SMC
842 CPUState *env = cpu_single_env;
843#endif
9fa3e853
FB
844
845 addr &= TARGET_PAGE_MASK;
846 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 847 if (!p)
9fa3e853
FB
848 return;
849 tb = p->first_tb;
d720b93d
FB
850 current_tb_modified = 0;
851 current_tb = NULL;
852 current_pc = 0; /* avoid warning */
853 current_cs_base = 0; /* avoid warning */
854 current_flags = 0; /* avoid warning */
855#ifdef TARGET_HAS_PRECISE_SMC
856 if (tb && pc != 0) {
857 current_tb = tb_find_pc(pc);
858 }
859#endif
9fa3e853
FB
860 while (tb != NULL) {
861 n = (long)tb & 3;
862 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
863#ifdef TARGET_HAS_PRECISE_SMC
864 if (current_tb == tb &&
865 !(current_tb->cflags & CF_SINGLE_INSN)) {
866 /* If we are modifying the current TB, we must stop
867 its execution. We could be more precise by checking
868 that the modification is after the current PC, but it
869 would require a specialized function to partially
870 restore the CPU state */
3b46e624 871
d720b93d
FB
872 current_tb_modified = 1;
873 cpu_restore_state(current_tb, env, pc, puc);
874#if defined(TARGET_I386)
875 current_flags = env->hflags;
876 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
877 current_cs_base = (target_ulong)env->segs[R_CS].base;
878 current_pc = current_cs_base + env->eip;
879#else
880#error unsupported CPU
881#endif
882 }
883#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
884 tb_phys_invalidate(tb, addr);
885 tb = tb->page_next[n];
886 }
fd6ce8f6 887 p->first_tb = NULL;
d720b93d
FB
888#ifdef TARGET_HAS_PRECISE_SMC
889 if (current_tb_modified) {
890 /* we generate a block containing just the instruction
891 modifying the memory. It will ensure that it cannot modify
892 itself */
ea1c1802 893 env->current_tb = NULL;
5fafdf24 894 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
895 CF_SINGLE_INSN);
896 cpu_resume_from_signal(env, puc);
897 }
898#endif
fd6ce8f6 899}
9fa3e853 900#endif
fd6ce8f6
FB
901
902/* add the tb in the target page and protect it if necessary */
5fafdf24 903static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 904 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
905{
906 PageDesc *p;
9fa3e853
FB
907 TranslationBlock *last_first_tb;
908
909 tb->page_addr[n] = page_addr;
3a7d929e 910 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
911 tb->page_next[n] = p->first_tb;
912 last_first_tb = p->first_tb;
913 p->first_tb = (TranslationBlock *)((long)tb | n);
914 invalidate_page_bitmap(p);
fd6ce8f6 915
107db443 916#if defined(TARGET_HAS_SMC) || 1
d720b93d 917
9fa3e853 918#if defined(CONFIG_USER_ONLY)
fd6ce8f6 919 if (p->flags & PAGE_WRITE) {
53a5960a
PB
920 target_ulong addr;
921 PageDesc *p2;
9fa3e853
FB
922 int prot;
923
fd6ce8f6
FB
924 /* force the host page as non writable (writes will have a
925 page fault + mprotect overhead) */
53a5960a 926 page_addr &= qemu_host_page_mask;
fd6ce8f6 927 prot = 0;
53a5960a
PB
928 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
929 addr += TARGET_PAGE_SIZE) {
930
931 p2 = page_find (addr >> TARGET_PAGE_BITS);
932 if (!p2)
933 continue;
934 prot |= p2->flags;
935 p2->flags &= ~PAGE_WRITE;
936 page_get_flags(addr);
937 }
5fafdf24 938 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
939 (prot & PAGE_BITS) & ~PAGE_WRITE);
940#ifdef DEBUG_TB_INVALIDATE
ab3d1727 941 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 942 page_addr);
fd6ce8f6 943#endif
fd6ce8f6 944 }
9fa3e853
FB
945#else
946 /* if some code is already present, then the pages are already
947 protected. So we handle the case where only the first TB is
948 allocated in a physical page */
949 if (!last_first_tb) {
6a00d601 950 tlb_protect_code(page_addr);
9fa3e853
FB
951 }
952#endif
d720b93d
FB
953
954#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
955}
956
957/* Allocate a new translation block. Flush the translation buffer if
958 too many translation blocks or too much generated code. */
c27004ec 959TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
960{
961 TranslationBlock *tb;
fd6ce8f6 962
5fafdf24 963 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
fd6ce8f6 964 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 965 return NULL;
fd6ce8f6
FB
966 tb = &tbs[nb_tbs++];
967 tb->pc = pc;
b448f2f3 968 tb->cflags = 0;
d4e8164f
FB
969 return tb;
970}
971
9fa3e853
FB
972/* add a new TB and link it to the physical page tables. phys_page2 is
973 (-1) to indicate that only one page contains the TB. */
5fafdf24 974void tb_link_phys(TranslationBlock *tb,
9fa3e853 975 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 976{
9fa3e853
FB
977 unsigned int h;
978 TranslationBlock **ptb;
979
980 /* add in the physical hash table */
981 h = tb_phys_hash_func(phys_pc);
982 ptb = &tb_phys_hash[h];
983 tb->phys_hash_next = *ptb;
984 *ptb = tb;
fd6ce8f6
FB
985
986 /* add in the page list */
9fa3e853
FB
987 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
988 if (phys_page2 != -1)
989 tb_alloc_page(tb, 1, phys_page2);
990 else
991 tb->page_addr[1] = -1;
9fa3e853 992
d4e8164f
FB
993 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
994 tb->jmp_next[0] = NULL;
995 tb->jmp_next[1] = NULL;
996
997 /* init original jump addresses */
998 if (tb->tb_next_offset[0] != 0xffff)
999 tb_reset_jump(tb, 0);
1000 if (tb->tb_next_offset[1] != 0xffff)
1001 tb_reset_jump(tb, 1);
8a40a180
FB
1002
1003#ifdef DEBUG_TB_CHECK
1004 tb_page_check();
1005#endif
fd6ce8f6
FB
1006}
1007
9fa3e853
FB
1008/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1009 tb[1].tc_ptr. Return NULL if not found */
1010TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1011{
9fa3e853
FB
1012 int m_min, m_max, m;
1013 unsigned long v;
1014 TranslationBlock *tb;
a513fe19
FB
1015
1016 if (nb_tbs <= 0)
1017 return NULL;
1018 if (tc_ptr < (unsigned long)code_gen_buffer ||
1019 tc_ptr >= (unsigned long)code_gen_ptr)
1020 return NULL;
1021 /* binary search (cf Knuth) */
1022 m_min = 0;
1023 m_max = nb_tbs - 1;
1024 while (m_min <= m_max) {
1025 m = (m_min + m_max) >> 1;
1026 tb = &tbs[m];
1027 v = (unsigned long)tb->tc_ptr;
1028 if (v == tc_ptr)
1029 return tb;
1030 else if (tc_ptr < v) {
1031 m_max = m - 1;
1032 } else {
1033 m_min = m + 1;
1034 }
5fafdf24 1035 }
a513fe19
FB
1036 return &tbs[m_max];
1037}
7501267e 1038
ea041c0e
FB
1039static void tb_reset_jump_recursive(TranslationBlock *tb);
1040
1041static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1042{
1043 TranslationBlock *tb1, *tb_next, **ptb;
1044 unsigned int n1;
1045
1046 tb1 = tb->jmp_next[n];
1047 if (tb1 != NULL) {
1048 /* find head of list */
1049 for(;;) {
1050 n1 = (long)tb1 & 3;
1051 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1052 if (n1 == 2)
1053 break;
1054 tb1 = tb1->jmp_next[n1];
1055 }
1056 /* we are now sure now that tb jumps to tb1 */
1057 tb_next = tb1;
1058
1059 /* remove tb from the jmp_first list */
1060 ptb = &tb_next->jmp_first;
1061 for(;;) {
1062 tb1 = *ptb;
1063 n1 = (long)tb1 & 3;
1064 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1065 if (n1 == n && tb1 == tb)
1066 break;
1067 ptb = &tb1->jmp_next[n1];
1068 }
1069 *ptb = tb->jmp_next[n];
1070 tb->jmp_next[n] = NULL;
3b46e624 1071
ea041c0e
FB
1072 /* suppress the jump to next tb in generated code */
1073 tb_reset_jump(tb, n);
1074
0124311e 1075 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1076 tb_reset_jump_recursive(tb_next);
1077 }
1078}
1079
1080static void tb_reset_jump_recursive(TranslationBlock *tb)
1081{
1082 tb_reset_jump_recursive2(tb, 0);
1083 tb_reset_jump_recursive2(tb, 1);
1084}
1085
1fddef4b 1086#if defined(TARGET_HAS_ICE)
d720b93d
FB
1087static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1088{
9b3c35e0
JM
1089 target_phys_addr_t addr;
1090 target_ulong pd;
c2f07f81
PB
1091 ram_addr_t ram_addr;
1092 PhysPageDesc *p;
d720b93d 1093
c2f07f81
PB
1094 addr = cpu_get_phys_page_debug(env, pc);
1095 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1096 if (!p) {
1097 pd = IO_MEM_UNASSIGNED;
1098 } else {
1099 pd = p->phys_offset;
1100 }
1101 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1102 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1103}
c27004ec 1104#endif
d720b93d 1105
6658ffb8
PB
1106/* Add a watchpoint. */
1107int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1108{
1109 int i;
1110
1111 for (i = 0; i < env->nb_watchpoints; i++) {
1112 if (addr == env->watchpoint[i].vaddr)
1113 return 0;
1114 }
1115 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1116 return -1;
1117
1118 i = env->nb_watchpoints++;
1119 env->watchpoint[i].vaddr = addr;
1120 tlb_flush_page(env, addr);
1121 /* FIXME: This flush is needed because of the hack to make memory ops
1122 terminate the TB. It can be removed once the proper IO trap and
1123 re-execute bits are in. */
1124 tb_flush(env);
1125 return i;
1126}
1127
1128/* Remove a watchpoint. */
1129int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1130{
1131 int i;
1132
1133 for (i = 0; i < env->nb_watchpoints; i++) {
1134 if (addr == env->watchpoint[i].vaddr) {
1135 env->nb_watchpoints--;
1136 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1137 tlb_flush_page(env, addr);
1138 return 0;
1139 }
1140 }
1141 return -1;
1142}
1143
7d03f82f
EI
1144/* Remove all watchpoints. */
1145void cpu_watchpoint_remove_all(CPUState *env) {
1146 int i;
1147
1148 for (i = 0; i < env->nb_watchpoints; i++) {
1149 tlb_flush_page(env, env->watchpoint[i].vaddr);
1150 }
1151 env->nb_watchpoints = 0;
1152}
1153
c33a346e
FB
1154/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1155 breakpoint is reached */
2e12669a 1156int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1157{
1fddef4b 1158#if defined(TARGET_HAS_ICE)
4c3a88a2 1159 int i;
3b46e624 1160
4c3a88a2
FB
1161 for(i = 0; i < env->nb_breakpoints; i++) {
1162 if (env->breakpoints[i] == pc)
1163 return 0;
1164 }
1165
1166 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1167 return -1;
1168 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1169
d720b93d 1170 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1171 return 0;
1172#else
1173 return -1;
1174#endif
1175}
1176
7d03f82f
EI
1177/* remove all breakpoints */
1178void cpu_breakpoint_remove_all(CPUState *env) {
1179#if defined(TARGET_HAS_ICE)
1180 int i;
1181 for(i = 0; i < env->nb_breakpoints; i++) {
1182 breakpoint_invalidate(env, env->breakpoints[i]);
1183 }
1184 env->nb_breakpoints = 0;
1185#endif
1186}
1187
4c3a88a2 1188/* remove a breakpoint */
2e12669a 1189int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1190{
1fddef4b 1191#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1192 int i;
1193 for(i = 0; i < env->nb_breakpoints; i++) {
1194 if (env->breakpoints[i] == pc)
1195 goto found;
1196 }
1197 return -1;
1198 found:
4c3a88a2 1199 env->nb_breakpoints--;
1fddef4b
FB
1200 if (i < env->nb_breakpoints)
1201 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1202
1203 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1204 return 0;
1205#else
1206 return -1;
1207#endif
1208}
1209
c33a346e
FB
1210/* enable or disable single step mode. EXCP_DEBUG is returned by the
1211 CPU loop after each instruction */
1212void cpu_single_step(CPUState *env, int enabled)
1213{
1fddef4b 1214#if defined(TARGET_HAS_ICE)
c33a346e
FB
1215 if (env->singlestep_enabled != enabled) {
1216 env->singlestep_enabled = enabled;
1217 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1218 /* XXX: only flush what is necessary */
0124311e 1219 tb_flush(env);
c33a346e
FB
1220 }
1221#endif
1222}
1223
34865134
FB
1224/* enable or disable low levels log */
1225void cpu_set_log(int log_flags)
1226{
1227 loglevel = log_flags;
1228 if (loglevel && !logfile) {
11fcfab4 1229 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1230 if (!logfile) {
1231 perror(logfilename);
1232 _exit(1);
1233 }
9fa3e853
FB
1234#if !defined(CONFIG_SOFTMMU)
1235 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1236 {
1237 static uint8_t logfile_buf[4096];
1238 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1239 }
1240#else
34865134 1241 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1242#endif
e735b91c
PB
1243 log_append = 1;
1244 }
1245 if (!loglevel && logfile) {
1246 fclose(logfile);
1247 logfile = NULL;
34865134
FB
1248 }
1249}
1250
1251void cpu_set_log_filename(const char *filename)
1252{
1253 logfilename = strdup(filename);
e735b91c
PB
1254 if (logfile) {
1255 fclose(logfile);
1256 logfile = NULL;
1257 }
1258 cpu_set_log(loglevel);
34865134 1259}
c33a346e 1260
0124311e 1261/* mask must never be zero, except for A20 change call */
68a79315 1262void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1263{
1264 TranslationBlock *tb;
15a51156 1265 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1266
68a79315 1267 env->interrupt_request |= mask;
ea041c0e
FB
1268 /* if the cpu is currently executing code, we must unlink it and
1269 all the potentially executing TB */
1270 tb = env->current_tb;
ee8b7021
FB
1271 if (tb && !testandset(&interrupt_lock)) {
1272 env->current_tb = NULL;
ea041c0e 1273 tb_reset_jump_recursive(tb);
15a51156 1274 resetlock(&interrupt_lock);
ea041c0e
FB
1275 }
1276}
1277
b54ad049
FB
1278void cpu_reset_interrupt(CPUState *env, int mask)
1279{
1280 env->interrupt_request &= ~mask;
1281}
1282
f193c797 1283CPULogItem cpu_log_items[] = {
5fafdf24 1284 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1285 "show generated host assembly code for each compiled TB" },
1286 { CPU_LOG_TB_IN_ASM, "in_asm",
1287 "show target assembly code for each compiled TB" },
5fafdf24 1288 { CPU_LOG_TB_OP, "op",
57fec1fe 1289 "show micro ops for each compiled TB" },
f193c797 1290 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1291 "show micro ops "
1292#ifdef TARGET_I386
1293 "before eflags optimization and "
f193c797 1294#endif
e01a1157 1295 "after liveness analysis" },
f193c797
FB
1296 { CPU_LOG_INT, "int",
1297 "show interrupts/exceptions in short format" },
1298 { CPU_LOG_EXEC, "exec",
1299 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1300 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1301 "show CPU state before block translation" },
f193c797
FB
1302#ifdef TARGET_I386
1303 { CPU_LOG_PCALL, "pcall",
1304 "show protected mode far calls/returns/exceptions" },
1305#endif
8e3a9fd2 1306#ifdef DEBUG_IOPORT
fd872598
FB
1307 { CPU_LOG_IOPORT, "ioport",
1308 "show all i/o ports accesses" },
8e3a9fd2 1309#endif
f193c797
FB
1310 { 0, NULL, NULL },
1311};
1312
1313static int cmp1(const char *s1, int n, const char *s2)
1314{
1315 if (strlen(s2) != n)
1316 return 0;
1317 return memcmp(s1, s2, n) == 0;
1318}
3b46e624 1319
f193c797
FB
1320/* takes a comma separated list of log masks. Return 0 if error. */
1321int cpu_str_to_log_mask(const char *str)
1322{
1323 CPULogItem *item;
1324 int mask;
1325 const char *p, *p1;
1326
1327 p = str;
1328 mask = 0;
1329 for(;;) {
1330 p1 = strchr(p, ',');
1331 if (!p1)
1332 p1 = p + strlen(p);
8e3a9fd2
FB
1333 if(cmp1(p,p1-p,"all")) {
1334 for(item = cpu_log_items; item->mask != 0; item++) {
1335 mask |= item->mask;
1336 }
1337 } else {
f193c797
FB
1338 for(item = cpu_log_items; item->mask != 0; item++) {
1339 if (cmp1(p, p1 - p, item->name))
1340 goto found;
1341 }
1342 return 0;
8e3a9fd2 1343 }
f193c797
FB
1344 found:
1345 mask |= item->mask;
1346 if (*p1 != ',')
1347 break;
1348 p = p1 + 1;
1349 }
1350 return mask;
1351}
ea041c0e 1352
7501267e
FB
1353void cpu_abort(CPUState *env, const char *fmt, ...)
1354{
1355 va_list ap;
493ae1f0 1356 va_list ap2;
7501267e
FB
1357
1358 va_start(ap, fmt);
493ae1f0 1359 va_copy(ap2, ap);
7501267e
FB
1360 fprintf(stderr, "qemu: fatal: ");
1361 vfprintf(stderr, fmt, ap);
1362 fprintf(stderr, "\n");
1363#ifdef TARGET_I386
7fe48483
FB
1364 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1365#else
1366 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1367#endif
924edcae 1368 if (logfile) {
f9373291 1369 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1370 vfprintf(logfile, fmt, ap2);
f9373291
JM
1371 fprintf(logfile, "\n");
1372#ifdef TARGET_I386
1373 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1374#else
1375 cpu_dump_state(env, logfile, fprintf, 0);
1376#endif
924edcae
AZ
1377 fflush(logfile);
1378 fclose(logfile);
1379 }
493ae1f0 1380 va_end(ap2);
f9373291 1381 va_end(ap);
7501267e
FB
1382 abort();
1383}
1384
c5be9f08
TS
1385CPUState *cpu_copy(CPUState *env)
1386{
01ba9816 1387 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1388 /* preserve chaining and index */
1389 CPUState *next_cpu = new_env->next_cpu;
1390 int cpu_index = new_env->cpu_index;
1391 memcpy(new_env, env, sizeof(CPUState));
1392 new_env->next_cpu = next_cpu;
1393 new_env->cpu_index = cpu_index;
1394 return new_env;
1395}
1396
0124311e
FB
1397#if !defined(CONFIG_USER_ONLY)
1398
5c751e99
EI
1399static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1400{
1401 unsigned int i;
1402
1403 /* Discard jump cache entries for any tb which might potentially
1404 overlap the flushed page. */
1405 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1406 memset (&env->tb_jmp_cache[i], 0,
1407 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1408
1409 i = tb_jmp_cache_hash_page(addr);
1410 memset (&env->tb_jmp_cache[i], 0,
1411 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1412}
1413
ee8b7021
FB
1414/* NOTE: if flush_global is true, also flush global entries (not
1415 implemented yet) */
1416void tlb_flush(CPUState *env, int flush_global)
33417e70 1417{
33417e70 1418 int i;
0124311e 1419
9fa3e853
FB
1420#if defined(DEBUG_TLB)
1421 printf("tlb_flush:\n");
1422#endif
0124311e
FB
1423 /* must reset current TB so that interrupts cannot modify the
1424 links while we are modifying them */
1425 env->current_tb = NULL;
1426
33417e70 1427 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1428 env->tlb_table[0][i].addr_read = -1;
1429 env->tlb_table[0][i].addr_write = -1;
1430 env->tlb_table[0][i].addr_code = -1;
1431 env->tlb_table[1][i].addr_read = -1;
1432 env->tlb_table[1][i].addr_write = -1;
1433 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1434#if (NB_MMU_MODES >= 3)
1435 env->tlb_table[2][i].addr_read = -1;
1436 env->tlb_table[2][i].addr_write = -1;
1437 env->tlb_table[2][i].addr_code = -1;
1438#if (NB_MMU_MODES == 4)
1439 env->tlb_table[3][i].addr_read = -1;
1440 env->tlb_table[3][i].addr_write = -1;
1441 env->tlb_table[3][i].addr_code = -1;
1442#endif
1443#endif
33417e70 1444 }
9fa3e853 1445
8a40a180 1446 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1447
1448#if !defined(CONFIG_SOFTMMU)
1449 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1450#endif
1451#ifdef USE_KQEMU
1452 if (env->kqemu_enabled) {
1453 kqemu_flush(env, flush_global);
1454 }
9fa3e853 1455#endif
e3db7226 1456 tlb_flush_count++;
33417e70
FB
1457}
1458
274da6b2 1459static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1460{
5fafdf24 1461 if (addr == (tlb_entry->addr_read &
84b7b8e7 1462 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1463 addr == (tlb_entry->addr_write &
84b7b8e7 1464 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1465 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1466 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1467 tlb_entry->addr_read = -1;
1468 tlb_entry->addr_write = -1;
1469 tlb_entry->addr_code = -1;
1470 }
61382a50
FB
1471}
1472
2e12669a 1473void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1474{
8a40a180 1475 int i;
0124311e 1476
9fa3e853 1477#if defined(DEBUG_TLB)
108c49b8 1478 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1479#endif
0124311e
FB
1480 /* must reset current TB so that interrupts cannot modify the
1481 links while we are modifying them */
1482 env->current_tb = NULL;
61382a50
FB
1483
1484 addr &= TARGET_PAGE_MASK;
1485 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1486 tlb_flush_entry(&env->tlb_table[0][i], addr);
1487 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1488#if (NB_MMU_MODES >= 3)
1489 tlb_flush_entry(&env->tlb_table[2][i], addr);
1490#if (NB_MMU_MODES == 4)
1491 tlb_flush_entry(&env->tlb_table[3][i], addr);
1492#endif
1493#endif
0124311e 1494
5c751e99 1495 tlb_flush_jmp_cache(env, addr);
9fa3e853 1496
0124311e 1497#if !defined(CONFIG_SOFTMMU)
9fa3e853 1498 if (addr < MMAP_AREA_END)
0124311e 1499 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1500#endif
0a962c02
FB
1501#ifdef USE_KQEMU
1502 if (env->kqemu_enabled) {
1503 kqemu_flush_page(env, addr);
1504 }
1505#endif
9fa3e853
FB
1506}
1507
9fa3e853
FB
1508/* update the TLBs so that writes to code in the virtual page 'addr'
1509 can be detected */
6a00d601 1510static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1511{
5fafdf24 1512 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1513 ram_addr + TARGET_PAGE_SIZE,
1514 CODE_DIRTY_FLAG);
9fa3e853
FB
1515}
1516
9fa3e853 1517/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1518 tested for self modifying code */
5fafdf24 1519static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1520 target_ulong vaddr)
9fa3e853 1521{
3a7d929e 1522 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1523}
1524
5fafdf24 1525static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1526 unsigned long start, unsigned long length)
1527{
1528 unsigned long addr;
84b7b8e7
FB
1529 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1530 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1531 if ((addr - start) < length) {
84b7b8e7 1532 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1533 }
1534 }
1535}
1536
3a7d929e 1537void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1538 int dirty_flags)
1ccde1cb
FB
1539{
1540 CPUState *env;
4f2ac237 1541 unsigned long length, start1;
0a962c02
FB
1542 int i, mask, len;
1543 uint8_t *p;
1ccde1cb
FB
1544
1545 start &= TARGET_PAGE_MASK;
1546 end = TARGET_PAGE_ALIGN(end);
1547
1548 length = end - start;
1549 if (length == 0)
1550 return;
0a962c02 1551 len = length >> TARGET_PAGE_BITS;
3a7d929e 1552#ifdef USE_KQEMU
6a00d601
FB
1553 /* XXX: should not depend on cpu context */
1554 env = first_cpu;
3a7d929e 1555 if (env->kqemu_enabled) {
f23db169
FB
1556 ram_addr_t addr;
1557 addr = start;
1558 for(i = 0; i < len; i++) {
1559 kqemu_set_notdirty(env, addr);
1560 addr += TARGET_PAGE_SIZE;
1561 }
3a7d929e
FB
1562 }
1563#endif
f23db169
FB
1564 mask = ~dirty_flags;
1565 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1566 for(i = 0; i < len; i++)
1567 p[i] &= mask;
1568
1ccde1cb
FB
1569 /* we modify the TLB cache so that the dirty bit will be set again
1570 when accessing the range */
59817ccb 1571 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1572 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1573 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1574 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1575 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1576 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1577#if (NB_MMU_MODES >= 3)
1578 for(i = 0; i < CPU_TLB_SIZE; i++)
1579 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1580#if (NB_MMU_MODES == 4)
1581 for(i = 0; i < CPU_TLB_SIZE; i++)
1582 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1583#endif
1584#endif
6a00d601 1585 }
59817ccb
FB
1586
1587#if !defined(CONFIG_SOFTMMU)
1588 /* XXX: this is expensive */
1589 {
1590 VirtPageDesc *p;
1591 int j;
1592 target_ulong addr;
1593
1594 for(i = 0; i < L1_SIZE; i++) {
1595 p = l1_virt_map[i];
1596 if (p) {
1597 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1598 for(j = 0; j < L2_SIZE; j++) {
1599 if (p->valid_tag == virt_valid_tag &&
1600 p->phys_addr >= start && p->phys_addr < end &&
1601 (p->prot & PROT_WRITE)) {
1602 if (addr < MMAP_AREA_END) {
5fafdf24 1603 mprotect((void *)addr, TARGET_PAGE_SIZE,
59817ccb
FB
1604 p->prot & ~PROT_WRITE);
1605 }
1606 }
1607 addr += TARGET_PAGE_SIZE;
1608 p++;
1609 }
1610 }
1611 }
1612 }
1613#endif
1ccde1cb
FB
1614}
1615
3a7d929e
FB
1616static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1617{
1618 ram_addr_t ram_addr;
1619
84b7b8e7 1620 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1621 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1622 tlb_entry->addend - (unsigned long)phys_ram_base;
1623 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1624 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1625 }
1626 }
1627}
1628
1629/* update the TLB according to the current state of the dirty bits */
1630void cpu_tlb_update_dirty(CPUState *env)
1631{
1632 int i;
1633 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1634 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1635 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1636 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1637#if (NB_MMU_MODES >= 3)
1638 for(i = 0; i < CPU_TLB_SIZE; i++)
1639 tlb_update_dirty(&env->tlb_table[2][i]);
1640#if (NB_MMU_MODES == 4)
1641 for(i = 0; i < CPU_TLB_SIZE; i++)
1642 tlb_update_dirty(&env->tlb_table[3][i]);
1643#endif
1644#endif
3a7d929e
FB
1645}
1646
5fafdf24 1647static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1648 unsigned long start)
1ccde1cb
FB
1649{
1650 unsigned long addr;
84b7b8e7
FB
1651 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1652 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1653 if (addr == start) {
84b7b8e7 1654 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1655 }
1656 }
1657}
1658
1659/* update the TLB corresponding to virtual page vaddr and phys addr
1660 addr so that it is no longer dirty */
6a00d601
FB
1661static inline void tlb_set_dirty(CPUState *env,
1662 unsigned long addr, target_ulong vaddr)
1ccde1cb 1663{
1ccde1cb
FB
1664 int i;
1665
1ccde1cb
FB
1666 addr &= TARGET_PAGE_MASK;
1667 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1668 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1669 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1670#if (NB_MMU_MODES >= 3)
1671 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1672#if (NB_MMU_MODES == 4)
1673 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1674#endif
1675#endif
9fa3e853
FB
1676}
1677
59817ccb
FB
1678/* add a new TLB entry. At most one entry for a given virtual address
1679 is permitted. Return 0 if OK or 2 if the page could not be mapped
1680 (can only happen in non SOFTMMU mode for I/O pages or pages
1681 conflicting with the host address space). */
5fafdf24
TS
1682int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1683 target_phys_addr_t paddr, int prot,
6ebbf390 1684 int mmu_idx, int is_softmmu)
9fa3e853 1685{
92e873b9 1686 PhysPageDesc *p;
4f2ac237 1687 unsigned long pd;
9fa3e853 1688 unsigned int index;
4f2ac237 1689 target_ulong address;
108c49b8 1690 target_phys_addr_t addend;
9fa3e853 1691 int ret;
84b7b8e7 1692 CPUTLBEntry *te;
6658ffb8 1693 int i;
9fa3e853 1694
92e873b9 1695 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1696 if (!p) {
1697 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1698 } else {
1699 pd = p->phys_offset;
9fa3e853
FB
1700 }
1701#if defined(DEBUG_TLB)
6ebbf390
JM
1702 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1703 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1704#endif
1705
1706 ret = 0;
1707#if !defined(CONFIG_SOFTMMU)
5fafdf24 1708 if (is_softmmu)
9fa3e853
FB
1709#endif
1710 {
2a4188a3 1711 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1712 /* IO memory case */
1713 address = vaddr | pd;
1714 addend = paddr;
1715 } else {
1716 /* standard memory */
1717 address = vaddr;
1718 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1719 }
6658ffb8
PB
1720
1721 /* Make accesses to pages with watchpoints go via the
1722 watchpoint trap routines. */
1723 for (i = 0; i < env->nb_watchpoints; i++) {
1724 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1725 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1726 env->watchpoint[i].addend = 0;
6658ffb8
PB
1727 address = vaddr | io_mem_watch;
1728 } else {
d79acba4
AZ
1729 env->watchpoint[i].addend = pd - paddr +
1730 (unsigned long) phys_ram_base;
6658ffb8
PB
1731 /* TODO: Figure out how to make read watchpoints coexist
1732 with code. */
1733 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1734 }
1735 }
1736 }
d79acba4 1737
90f18422 1738 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1739 addend -= vaddr;
6ebbf390 1740 te = &env->tlb_table[mmu_idx][index];
84b7b8e7 1741 te->addend = addend;
67b915a5 1742 if (prot & PAGE_READ) {
84b7b8e7
FB
1743 te->addr_read = address;
1744 } else {
1745 te->addr_read = -1;
1746 }
5c751e99 1747
84b7b8e7
FB
1748 if (prot & PAGE_EXEC) {
1749 te->addr_code = address;
9fa3e853 1750 } else {
84b7b8e7 1751 te->addr_code = -1;
9fa3e853 1752 }
67b915a5 1753 if (prot & PAGE_WRITE) {
5fafdf24 1754 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
856074ec
FB
1755 (pd & IO_MEM_ROMD)) {
1756 /* write access calls the I/O callback */
5fafdf24 1757 te->addr_write = vaddr |
856074ec 1758 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
5fafdf24 1759 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1760 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1761 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1762 } else {
84b7b8e7 1763 te->addr_write = address;
9fa3e853
FB
1764 }
1765 } else {
84b7b8e7 1766 te->addr_write = -1;
9fa3e853
FB
1767 }
1768 }
1769#if !defined(CONFIG_SOFTMMU)
1770 else {
1771 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1772 /* IO access: no mapping is done as it will be handled by the
1773 soft MMU */
1774 if (!(env->hflags & HF_SOFTMMU_MASK))
1775 ret = 2;
1776 } else {
1777 void *map_addr;
59817ccb
FB
1778
1779 if (vaddr >= MMAP_AREA_END) {
1780 ret = 2;
1781 } else {
1782 if (prot & PROT_WRITE) {
5fafdf24 1783 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1784#if defined(TARGET_HAS_SMC) || 1
59817ccb 1785 first_tb ||
d720b93d 1786#endif
5fafdf24 1787 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
59817ccb
FB
1788 !cpu_physical_memory_is_dirty(pd))) {
1789 /* ROM: we do as if code was inside */
1790 /* if code is present, we only map as read only and save the
1791 original mapping */
1792 VirtPageDesc *vp;
3b46e624 1793
90f18422 1794 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1795 vp->phys_addr = pd;
1796 vp->prot = prot;
1797 vp->valid_tag = virt_valid_tag;
1798 prot &= ~PAGE_WRITE;
1799 }
1800 }
5fafdf24 1801 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
59817ccb
FB
1802 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1803 if (map_addr == MAP_FAILED) {
1804 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1805 paddr, vaddr);
9fa3e853 1806 }
9fa3e853
FB
1807 }
1808 }
1809 }
1810#endif
1811 return ret;
1812}
1813
1814/* called from signal handler: invalidate the code and unprotect the
1815 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1816int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1817{
1818#if !defined(CONFIG_SOFTMMU)
1819 VirtPageDesc *vp;
1820
1821#if defined(DEBUG_TLB)
1822 printf("page_unprotect: addr=0x%08x\n", addr);
1823#endif
1824 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1825
1826 /* if it is not mapped, no need to worry here */
1827 if (addr >= MMAP_AREA_END)
1828 return 0;
9fa3e853
FB
1829 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1830 if (!vp)
1831 return 0;
1832 /* NOTE: in this case, validate_tag is _not_ tested as it
1833 validates only the code TLB */
1834 if (vp->valid_tag != virt_valid_tag)
1835 return 0;
1836 if (!(vp->prot & PAGE_WRITE))
1837 return 0;
1838#if defined(DEBUG_TLB)
5fafdf24 1839 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
9fa3e853
FB
1840 addr, vp->phys_addr, vp->prot);
1841#endif
59817ccb
FB
1842 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1843 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1844 (unsigned long)addr, vp->prot);
d720b93d 1845 /* set the dirty bit */
0a962c02 1846 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1847 /* flush the code inside */
1848 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1849 return 1;
1850#else
1851 return 0;
1852#endif
33417e70
FB
1853}
1854
0124311e
FB
1855#else
1856
ee8b7021 1857void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1858{
1859}
1860
2e12669a 1861void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1862{
1863}
1864
5fafdf24
TS
1865int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1866 target_phys_addr_t paddr, int prot,
6ebbf390 1867 int mmu_idx, int is_softmmu)
9fa3e853
FB
1868{
1869 return 0;
1870}
0124311e 1871
9fa3e853
FB
1872/* dump memory mappings */
1873void page_dump(FILE *f)
33417e70 1874{
9fa3e853
FB
1875 unsigned long start, end;
1876 int i, j, prot, prot1;
1877 PageDesc *p;
33417e70 1878
9fa3e853
FB
1879 fprintf(f, "%-8s %-8s %-8s %s\n",
1880 "start", "end", "size", "prot");
1881 start = -1;
1882 end = -1;
1883 prot = 0;
1884 for(i = 0; i <= L1_SIZE; i++) {
1885 if (i < L1_SIZE)
1886 p = l1_map[i];
1887 else
1888 p = NULL;
1889 for(j = 0;j < L2_SIZE; j++) {
1890 if (!p)
1891 prot1 = 0;
1892 else
1893 prot1 = p[j].flags;
1894 if (prot1 != prot) {
1895 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1896 if (start != -1) {
1897 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1898 start, end, end - start,
9fa3e853
FB
1899 prot & PAGE_READ ? 'r' : '-',
1900 prot & PAGE_WRITE ? 'w' : '-',
1901 prot & PAGE_EXEC ? 'x' : '-');
1902 }
1903 if (prot1 != 0)
1904 start = end;
1905 else
1906 start = -1;
1907 prot = prot1;
1908 }
1909 if (!p)
1910 break;
1911 }
33417e70 1912 }
33417e70
FB
1913}
1914
53a5960a 1915int page_get_flags(target_ulong address)
33417e70 1916{
9fa3e853
FB
1917 PageDesc *p;
1918
1919 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1920 if (!p)
9fa3e853
FB
1921 return 0;
1922 return p->flags;
1923}
1924
1925/* modify the flags of a page and invalidate the code if
1926 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1927 depending on PAGE_WRITE */
53a5960a 1928void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1929{
1930 PageDesc *p;
53a5960a 1931 target_ulong addr;
9fa3e853
FB
1932
1933 start = start & TARGET_PAGE_MASK;
1934 end = TARGET_PAGE_ALIGN(end);
1935 if (flags & PAGE_WRITE)
1936 flags |= PAGE_WRITE_ORG;
1937 spin_lock(&tb_lock);
1938 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1939 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1940 /* if the write protection is set, then we invalidate the code
1941 inside */
5fafdf24 1942 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
1943 (flags & PAGE_WRITE) &&
1944 p->first_tb) {
d720b93d 1945 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1946 }
1947 p->flags = flags;
1948 }
1949 spin_unlock(&tb_lock);
33417e70
FB
1950}
1951
3d97b40b
TS
1952int page_check_range(target_ulong start, target_ulong len, int flags)
1953{
1954 PageDesc *p;
1955 target_ulong end;
1956 target_ulong addr;
1957
1958 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1959 start = start & TARGET_PAGE_MASK;
1960
1961 if( end < start )
1962 /* we've wrapped around */
1963 return -1;
1964 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1965 p = page_find(addr >> TARGET_PAGE_BITS);
1966 if( !p )
1967 return -1;
1968 if( !(p->flags & PAGE_VALID) )
1969 return -1;
1970
dae3270c 1971 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 1972 return -1;
dae3270c
FB
1973 if (flags & PAGE_WRITE) {
1974 if (!(p->flags & PAGE_WRITE_ORG))
1975 return -1;
1976 /* unprotect the page if it was put read-only because it
1977 contains translated code */
1978 if (!(p->flags & PAGE_WRITE)) {
1979 if (!page_unprotect(addr, 0, NULL))
1980 return -1;
1981 }
1982 return 0;
1983 }
3d97b40b
TS
1984 }
1985 return 0;
1986}
1987
9fa3e853
FB
1988/* called from signal handler: invalidate the code and unprotect the
1989 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1990int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1991{
1992 unsigned int page_index, prot, pindex;
1993 PageDesc *p, *p1;
53a5960a 1994 target_ulong host_start, host_end, addr;
9fa3e853 1995
83fb7adf 1996 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1997 page_index = host_start >> TARGET_PAGE_BITS;
1998 p1 = page_find(page_index);
1999 if (!p1)
2000 return 0;
83fb7adf 2001 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2002 p = p1;
2003 prot = 0;
2004 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2005 prot |= p->flags;
2006 p++;
2007 }
2008 /* if the page was really writable, then we change its
2009 protection back to writable */
2010 if (prot & PAGE_WRITE_ORG) {
2011 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2012 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2013 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2014 (prot & PAGE_BITS) | PAGE_WRITE);
2015 p1[pindex].flags |= PAGE_WRITE;
2016 /* and since the content will be modified, we must invalidate
2017 the corresponding translated code. */
d720b93d 2018 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2019#ifdef DEBUG_TB_CHECK
2020 tb_invalidate_check(address);
2021#endif
2022 return 1;
2023 }
2024 }
2025 return 0;
2026}
2027
6a00d601
FB
2028static inline void tlb_set_dirty(CPUState *env,
2029 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2030{
2031}
9fa3e853
FB
2032#endif /* defined(CONFIG_USER_ONLY) */
2033
db7b5426 2034static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2035 ram_addr_t memory);
2036static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2037 ram_addr_t orig_memory);
db7b5426
BS
2038#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2039 need_subpage) \
2040 do { \
2041 if (addr > start_addr) \
2042 start_addr2 = 0; \
2043 else { \
2044 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2045 if (start_addr2 > 0) \
2046 need_subpage = 1; \
2047 } \
2048 \
49e9fba2 2049 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2050 end_addr2 = TARGET_PAGE_SIZE - 1; \
2051 else { \
2052 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2053 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2054 need_subpage = 1; \
2055 } \
2056 } while (0)
2057
33417e70
FB
2058/* register physical memory. 'size' must be a multiple of the target
2059 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2060 io memory page */
5fafdf24 2061void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2062 ram_addr_t size,
2063 ram_addr_t phys_offset)
33417e70 2064{
108c49b8 2065 target_phys_addr_t addr, end_addr;
92e873b9 2066 PhysPageDesc *p;
9d42037b 2067 CPUState *env;
00f82b8a 2068 ram_addr_t orig_size = size;
db7b5426 2069 void *subpage;
33417e70 2070
5fd386f6 2071 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2072 end_addr = start_addr + (target_phys_addr_t)size;
2073 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2074 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2075 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2076 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2077 target_phys_addr_t start_addr2, end_addr2;
2078 int need_subpage = 0;
2079
2080 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2081 need_subpage);
4254fab8 2082 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2083 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2084 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2085 &p->phys_offset, orig_memory);
2086 } else {
2087 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2088 >> IO_MEM_SHIFT];
2089 }
2090 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2091 } else {
2092 p->phys_offset = phys_offset;
2093 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2094 (phys_offset & IO_MEM_ROMD))
2095 phys_offset += TARGET_PAGE_SIZE;
2096 }
2097 } else {
2098 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2099 p->phys_offset = phys_offset;
2100 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2101 (phys_offset & IO_MEM_ROMD))
2102 phys_offset += TARGET_PAGE_SIZE;
2103 else {
2104 target_phys_addr_t start_addr2, end_addr2;
2105 int need_subpage = 0;
2106
2107 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2108 end_addr2, need_subpage);
2109
4254fab8 2110 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2111 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2112 &p->phys_offset, IO_MEM_UNASSIGNED);
2113 subpage_register(subpage, start_addr2, end_addr2,
2114 phys_offset);
2115 }
2116 }
2117 }
33417e70 2118 }
3b46e624 2119
9d42037b
FB
2120 /* since each CPU stores ram addresses in its TLB cache, we must
2121 reset the modified entries */
2122 /* XXX: slow ! */
2123 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2124 tlb_flush(env, 1);
2125 }
33417e70
FB
2126}
2127
ba863458 2128/* XXX: temporary until new memory mapping API */
00f82b8a 2129ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2130{
2131 PhysPageDesc *p;
2132
2133 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2134 if (!p)
2135 return IO_MEM_UNASSIGNED;
2136 return p->phys_offset;
2137}
2138
e9a1ab19 2139/* XXX: better than nothing */
00f82b8a 2140ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2141{
2142 ram_addr_t addr;
7fb4fdcf 2143 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
ed441467
FB
2144 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2145 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2146 abort();
2147 }
2148 addr = phys_ram_alloc_offset;
2149 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2150 return addr;
2151}
2152
2153void qemu_ram_free(ram_addr_t addr)
2154{
2155}
2156
a4193c8a 2157static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2158{
67d3b957 2159#ifdef DEBUG_UNASSIGNED
ab3d1727 2160 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2161#endif
2162#ifdef TARGET_SPARC
6c36d3fa 2163 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2164#elif TARGET_CRIS
2165 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2166#endif
33417e70
FB
2167 return 0;
2168}
2169
a4193c8a 2170static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2171{
67d3b957 2172#ifdef DEBUG_UNASSIGNED
ab3d1727 2173 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2174#endif
b4f0a316 2175#ifdef TARGET_SPARC
6c36d3fa 2176 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2177#elif TARGET_CRIS
2178 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2179#endif
33417e70
FB
2180}
2181
2182static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2183 unassigned_mem_readb,
2184 unassigned_mem_readb,
2185 unassigned_mem_readb,
2186};
2187
2188static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2189 unassigned_mem_writeb,
2190 unassigned_mem_writeb,
2191 unassigned_mem_writeb,
2192};
2193
3a7d929e 2194static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2195{
3a7d929e
FB
2196 unsigned long ram_addr;
2197 int dirty_flags;
2198 ram_addr = addr - (unsigned long)phys_ram_base;
2199 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2200 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2201#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2202 tb_invalidate_phys_page_fast(ram_addr, 1);
2203 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2204#endif
3a7d929e 2205 }
c27004ec 2206 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2207#ifdef USE_KQEMU
2208 if (cpu_single_env->kqemu_enabled &&
2209 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2210 kqemu_modify_page(cpu_single_env, ram_addr);
2211#endif
f23db169
FB
2212 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2213 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2214 /* we remove the notdirty callback only if the code has been
2215 flushed */
2216 if (dirty_flags == 0xff)
6a00d601 2217 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2218}
2219
3a7d929e 2220static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2221{
3a7d929e
FB
2222 unsigned long ram_addr;
2223 int dirty_flags;
2224 ram_addr = addr - (unsigned long)phys_ram_base;
2225 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2226 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2227#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2228 tb_invalidate_phys_page_fast(ram_addr, 2);
2229 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2230#endif
3a7d929e 2231 }
c27004ec 2232 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2233#ifdef USE_KQEMU
2234 if (cpu_single_env->kqemu_enabled &&
2235 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2236 kqemu_modify_page(cpu_single_env, ram_addr);
2237#endif
f23db169
FB
2238 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2239 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2240 /* we remove the notdirty callback only if the code has been
2241 flushed */
2242 if (dirty_flags == 0xff)
6a00d601 2243 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2244}
2245
3a7d929e 2246static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2247{
3a7d929e
FB
2248 unsigned long ram_addr;
2249 int dirty_flags;
2250 ram_addr = addr - (unsigned long)phys_ram_base;
2251 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2252 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2253#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2254 tb_invalidate_phys_page_fast(ram_addr, 4);
2255 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2256#endif
3a7d929e 2257 }
c27004ec 2258 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2259#ifdef USE_KQEMU
2260 if (cpu_single_env->kqemu_enabled &&
2261 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2262 kqemu_modify_page(cpu_single_env, ram_addr);
2263#endif
f23db169
FB
2264 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2265 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2266 /* we remove the notdirty callback only if the code has been
2267 flushed */
2268 if (dirty_flags == 0xff)
6a00d601 2269 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2270}
2271
3a7d929e 2272static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2273 NULL, /* never used */
2274 NULL, /* never used */
2275 NULL, /* never used */
2276};
2277
1ccde1cb
FB
2278static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2279 notdirty_mem_writeb,
2280 notdirty_mem_writew,
2281 notdirty_mem_writel,
2282};
2283
6658ffb8
PB
2284#if defined(CONFIG_SOFTMMU)
2285/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2286 so these check for a hit then pass through to the normal out-of-line
2287 phys routines. */
2288static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2289{
2290 return ldub_phys(addr);
2291}
2292
2293static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2294{
2295 return lduw_phys(addr);
2296}
2297
2298static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2299{
2300 return ldl_phys(addr);
2301}
2302
2303/* Generate a debug exception if a watchpoint has been hit.
2304 Returns the real physical address of the access. addr will be a host
d79acba4 2305 address in case of a RAM location. */
6658ffb8
PB
2306static target_ulong check_watchpoint(target_phys_addr_t addr)
2307{
2308 CPUState *env = cpu_single_env;
2309 target_ulong watch;
2310 target_ulong retaddr;
2311 int i;
2312
2313 retaddr = addr;
2314 for (i = 0; i < env->nb_watchpoints; i++) {
2315 watch = env->watchpoint[i].vaddr;
2316 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2317 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2318 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2319 cpu_single_env->watchpoint_hit = i + 1;
2320 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2321 break;
2322 }
2323 }
2324 }
2325 return retaddr;
2326}
2327
2328static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2329 uint32_t val)
2330{
2331 addr = check_watchpoint(addr);
2332 stb_phys(addr, val);
2333}
2334
2335static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2336 uint32_t val)
2337{
2338 addr = check_watchpoint(addr);
2339 stw_phys(addr, val);
2340}
2341
2342static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2343 uint32_t val)
2344{
2345 addr = check_watchpoint(addr);
2346 stl_phys(addr, val);
2347}
2348
2349static CPUReadMemoryFunc *watch_mem_read[3] = {
2350 watch_mem_readb,
2351 watch_mem_readw,
2352 watch_mem_readl,
2353};
2354
2355static CPUWriteMemoryFunc *watch_mem_write[3] = {
2356 watch_mem_writeb,
2357 watch_mem_writew,
2358 watch_mem_writel,
2359};
2360#endif
2361
db7b5426
BS
2362static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2363 unsigned int len)
2364{
db7b5426
BS
2365 uint32_t ret;
2366 unsigned int idx;
2367
2368 idx = SUBPAGE_IDX(addr - mmio->base);
2369#if defined(DEBUG_SUBPAGE)
2370 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2371 mmio, len, addr, idx);
2372#endif
3ee89922 2373 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2374
2375 return ret;
2376}
2377
2378static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2379 uint32_t value, unsigned int len)
2380{
db7b5426
BS
2381 unsigned int idx;
2382
2383 idx = SUBPAGE_IDX(addr - mmio->base);
2384#if defined(DEBUG_SUBPAGE)
2385 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2386 mmio, len, addr, idx, value);
2387#endif
3ee89922 2388 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2389}
2390
2391static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2392{
2393#if defined(DEBUG_SUBPAGE)
2394 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2395#endif
2396
2397 return subpage_readlen(opaque, addr, 0);
2398}
2399
2400static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2401 uint32_t value)
2402{
2403#if defined(DEBUG_SUBPAGE)
2404 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2405#endif
2406 subpage_writelen(opaque, addr, value, 0);
2407}
2408
2409static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2410{
2411#if defined(DEBUG_SUBPAGE)
2412 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2413#endif
2414
2415 return subpage_readlen(opaque, addr, 1);
2416}
2417
2418static void subpage_writew (void *opaque, target_phys_addr_t addr,
2419 uint32_t value)
2420{
2421#if defined(DEBUG_SUBPAGE)
2422 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2423#endif
2424 subpage_writelen(opaque, addr, value, 1);
2425}
2426
2427static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2428{
2429#if defined(DEBUG_SUBPAGE)
2430 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2431#endif
2432
2433 return subpage_readlen(opaque, addr, 2);
2434}
2435
2436static void subpage_writel (void *opaque,
2437 target_phys_addr_t addr, uint32_t value)
2438{
2439#if defined(DEBUG_SUBPAGE)
2440 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2441#endif
2442 subpage_writelen(opaque, addr, value, 2);
2443}
2444
2445static CPUReadMemoryFunc *subpage_read[] = {
2446 &subpage_readb,
2447 &subpage_readw,
2448 &subpage_readl,
2449};
2450
2451static CPUWriteMemoryFunc *subpage_write[] = {
2452 &subpage_writeb,
2453 &subpage_writew,
2454 &subpage_writel,
2455};
2456
2457static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2458 ram_addr_t memory)
db7b5426
BS
2459{
2460 int idx, eidx;
4254fab8 2461 unsigned int i;
db7b5426
BS
2462
2463 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2464 return -1;
2465 idx = SUBPAGE_IDX(start);
2466 eidx = SUBPAGE_IDX(end);
2467#if defined(DEBUG_SUBPAGE)
2468 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2469 mmio, start, end, idx, eidx, memory);
2470#endif
2471 memory >>= IO_MEM_SHIFT;
2472 for (; idx <= eidx; idx++) {
4254fab8 2473 for (i = 0; i < 4; i++) {
3ee89922
BS
2474 if (io_mem_read[memory][i]) {
2475 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2476 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2477 }
2478 if (io_mem_write[memory][i]) {
2479 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2480 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2481 }
4254fab8 2482 }
db7b5426
BS
2483 }
2484
2485 return 0;
2486}
2487
00f82b8a
AJ
2488static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2489 ram_addr_t orig_memory)
db7b5426
BS
2490{
2491 subpage_t *mmio;
2492 int subpage_memory;
2493
2494 mmio = qemu_mallocz(sizeof(subpage_t));
2495 if (mmio != NULL) {
2496 mmio->base = base;
2497 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2498#if defined(DEBUG_SUBPAGE)
2499 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2500 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2501#endif
2502 *phys = subpage_memory | IO_MEM_SUBPAGE;
2503 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2504 }
2505
2506 return mmio;
2507}
2508
33417e70
FB
2509static void io_mem_init(void)
2510{
3a7d929e 2511 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2512 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2513 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2514 io_mem_nb = 5;
2515
6658ffb8
PB
2516#if defined(CONFIG_SOFTMMU)
2517 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2518 watch_mem_write, NULL);
2519#endif
1ccde1cb 2520 /* alloc dirty bits array */
0a962c02 2521 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2522 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2523}
2524
2525/* mem_read and mem_write are arrays of functions containing the
2526 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2527 2). Functions can be omitted with a NULL function pointer. The
2528 registered functions may be modified dynamically later.
2529 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2530 modified. If it is zero, a new io zone is allocated. The return
2531 value can be used with cpu_register_physical_memory(). (-1) is
2532 returned if error. */
33417e70
FB
2533int cpu_register_io_memory(int io_index,
2534 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2535 CPUWriteMemoryFunc **mem_write,
2536 void *opaque)
33417e70 2537{
4254fab8 2538 int i, subwidth = 0;
33417e70
FB
2539
2540 if (io_index <= 0) {
b5ff1b31 2541 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2542 return -1;
2543 io_index = io_mem_nb++;
2544 } else {
2545 if (io_index >= IO_MEM_NB_ENTRIES)
2546 return -1;
2547 }
b5ff1b31 2548
33417e70 2549 for(i = 0;i < 3; i++) {
4254fab8
BS
2550 if (!mem_read[i] || !mem_write[i])
2551 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2552 io_mem_read[io_index][i] = mem_read[i];
2553 io_mem_write[io_index][i] = mem_write[i];
2554 }
a4193c8a 2555 io_mem_opaque[io_index] = opaque;
4254fab8 2556 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2557}
61382a50 2558
8926b517
FB
2559CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2560{
2561 return io_mem_write[io_index >> IO_MEM_SHIFT];
2562}
2563
2564CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2565{
2566 return io_mem_read[io_index >> IO_MEM_SHIFT];
2567}
2568
13eb76e0
FB
2569/* physical memory access (slow version, mainly for debug) */
2570#if defined(CONFIG_USER_ONLY)
5fafdf24 2571void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2572 int len, int is_write)
2573{
2574 int l, flags;
2575 target_ulong page;
53a5960a 2576 void * p;
13eb76e0
FB
2577
2578 while (len > 0) {
2579 page = addr & TARGET_PAGE_MASK;
2580 l = (page + TARGET_PAGE_SIZE) - addr;
2581 if (l > len)
2582 l = len;
2583 flags = page_get_flags(page);
2584 if (!(flags & PAGE_VALID))
2585 return;
2586 if (is_write) {
2587 if (!(flags & PAGE_WRITE))
2588 return;
579a97f7 2589 /* XXX: this code should not depend on lock_user */
72fb7daa 2590 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2591 /* FIXME - should this return an error rather than just fail? */
2592 return;
72fb7daa
AJ
2593 memcpy(p, buf, l);
2594 unlock_user(p, addr, l);
13eb76e0
FB
2595 } else {
2596 if (!(flags & PAGE_READ))
2597 return;
579a97f7 2598 /* XXX: this code should not depend on lock_user */
72fb7daa 2599 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2600 /* FIXME - should this return an error rather than just fail? */
2601 return;
72fb7daa 2602 memcpy(buf, p, l);
5b257578 2603 unlock_user(p, addr, 0);
13eb76e0
FB
2604 }
2605 len -= l;
2606 buf += l;
2607 addr += l;
2608 }
2609}
8df1cd07 2610
13eb76e0 2611#else
5fafdf24 2612void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2613 int len, int is_write)
2614{
2615 int l, io_index;
2616 uint8_t *ptr;
2617 uint32_t val;
2e12669a
FB
2618 target_phys_addr_t page;
2619 unsigned long pd;
92e873b9 2620 PhysPageDesc *p;
3b46e624 2621
13eb76e0
FB
2622 while (len > 0) {
2623 page = addr & TARGET_PAGE_MASK;
2624 l = (page + TARGET_PAGE_SIZE) - addr;
2625 if (l > len)
2626 l = len;
92e873b9 2627 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2628 if (!p) {
2629 pd = IO_MEM_UNASSIGNED;
2630 } else {
2631 pd = p->phys_offset;
2632 }
3b46e624 2633
13eb76e0 2634 if (is_write) {
3a7d929e 2635 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2636 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2637 /* XXX: could force cpu_single_env to NULL to avoid
2638 potential bugs */
13eb76e0 2639 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2640 /* 32 bit write access */
c27004ec 2641 val = ldl_p(buf);
a4193c8a 2642 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2643 l = 4;
2644 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2645 /* 16 bit write access */
c27004ec 2646 val = lduw_p(buf);
a4193c8a 2647 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2648 l = 2;
2649 } else {
1c213d19 2650 /* 8 bit write access */
c27004ec 2651 val = ldub_p(buf);
a4193c8a 2652 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2653 l = 1;
2654 }
2655 } else {
b448f2f3
FB
2656 unsigned long addr1;
2657 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2658 /* RAM case */
b448f2f3 2659 ptr = phys_ram_base + addr1;
13eb76e0 2660 memcpy(ptr, buf, l);
3a7d929e
FB
2661 if (!cpu_physical_memory_is_dirty(addr1)) {
2662 /* invalidate code */
2663 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2664 /* set dirty bit */
5fafdf24 2665 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2666 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2667 }
13eb76e0
FB
2668 }
2669 } else {
5fafdf24 2670 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2671 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2672 /* I/O case */
2673 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2674 if (l >= 4 && ((addr & 3) == 0)) {
2675 /* 32 bit read access */
a4193c8a 2676 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2677 stl_p(buf, val);
13eb76e0
FB
2678 l = 4;
2679 } else if (l >= 2 && ((addr & 1) == 0)) {
2680 /* 16 bit read access */
a4193c8a 2681 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2682 stw_p(buf, val);
13eb76e0
FB
2683 l = 2;
2684 } else {
1c213d19 2685 /* 8 bit read access */
a4193c8a 2686 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2687 stb_p(buf, val);
13eb76e0
FB
2688 l = 1;
2689 }
2690 } else {
2691 /* RAM case */
5fafdf24 2692 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2693 (addr & ~TARGET_PAGE_MASK);
2694 memcpy(buf, ptr, l);
2695 }
2696 }
2697 len -= l;
2698 buf += l;
2699 addr += l;
2700 }
2701}
8df1cd07 2702
d0ecd2aa 2703/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2704void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2705 const uint8_t *buf, int len)
2706{
2707 int l;
2708 uint8_t *ptr;
2709 target_phys_addr_t page;
2710 unsigned long pd;
2711 PhysPageDesc *p;
3b46e624 2712
d0ecd2aa
FB
2713 while (len > 0) {
2714 page = addr & TARGET_PAGE_MASK;
2715 l = (page + TARGET_PAGE_SIZE) - addr;
2716 if (l > len)
2717 l = len;
2718 p = phys_page_find(page >> TARGET_PAGE_BITS);
2719 if (!p) {
2720 pd = IO_MEM_UNASSIGNED;
2721 } else {
2722 pd = p->phys_offset;
2723 }
3b46e624 2724
d0ecd2aa 2725 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2726 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2727 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2728 /* do nothing */
2729 } else {
2730 unsigned long addr1;
2731 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2732 /* ROM/RAM case */
2733 ptr = phys_ram_base + addr1;
2734 memcpy(ptr, buf, l);
2735 }
2736 len -= l;
2737 buf += l;
2738 addr += l;
2739 }
2740}
2741
2742
8df1cd07
FB
2743/* warning: addr must be aligned */
2744uint32_t ldl_phys(target_phys_addr_t addr)
2745{
2746 int io_index;
2747 uint8_t *ptr;
2748 uint32_t val;
2749 unsigned long pd;
2750 PhysPageDesc *p;
2751
2752 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2753 if (!p) {
2754 pd = IO_MEM_UNASSIGNED;
2755 } else {
2756 pd = p->phys_offset;
2757 }
3b46e624 2758
5fafdf24 2759 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2760 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2761 /* I/O case */
2762 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2763 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2764 } else {
2765 /* RAM case */
5fafdf24 2766 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2767 (addr & ~TARGET_PAGE_MASK);
2768 val = ldl_p(ptr);
2769 }
2770 return val;
2771}
2772
84b7b8e7
FB
2773/* warning: addr must be aligned */
2774uint64_t ldq_phys(target_phys_addr_t addr)
2775{
2776 int io_index;
2777 uint8_t *ptr;
2778 uint64_t val;
2779 unsigned long pd;
2780 PhysPageDesc *p;
2781
2782 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2783 if (!p) {
2784 pd = IO_MEM_UNASSIGNED;
2785 } else {
2786 pd = p->phys_offset;
2787 }
3b46e624 2788
2a4188a3
FB
2789 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2790 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2791 /* I/O case */
2792 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2793#ifdef TARGET_WORDS_BIGENDIAN
2794 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2795 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2796#else
2797 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2798 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2799#endif
2800 } else {
2801 /* RAM case */
5fafdf24 2802 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2803 (addr & ~TARGET_PAGE_MASK);
2804 val = ldq_p(ptr);
2805 }
2806 return val;
2807}
2808
aab33094
FB
2809/* XXX: optimize */
2810uint32_t ldub_phys(target_phys_addr_t addr)
2811{
2812 uint8_t val;
2813 cpu_physical_memory_read(addr, &val, 1);
2814 return val;
2815}
2816
2817/* XXX: optimize */
2818uint32_t lduw_phys(target_phys_addr_t addr)
2819{
2820 uint16_t val;
2821 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2822 return tswap16(val);
2823}
2824
8df1cd07
FB
2825/* warning: addr must be aligned. The ram page is not masked as dirty
2826 and the code inside is not invalidated. It is useful if the dirty
2827 bits are used to track modified PTEs */
2828void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2829{
2830 int io_index;
2831 uint8_t *ptr;
2832 unsigned long pd;
2833 PhysPageDesc *p;
2834
2835 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2836 if (!p) {
2837 pd = IO_MEM_UNASSIGNED;
2838 } else {
2839 pd = p->phys_offset;
2840 }
3b46e624 2841
3a7d929e 2842 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2843 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2844 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2845 } else {
5fafdf24 2846 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2847 (addr & ~TARGET_PAGE_MASK);
2848 stl_p(ptr, val);
2849 }
2850}
2851
bc98a7ef
JM
2852void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2853{
2854 int io_index;
2855 uint8_t *ptr;
2856 unsigned long pd;
2857 PhysPageDesc *p;
2858
2859 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2860 if (!p) {
2861 pd = IO_MEM_UNASSIGNED;
2862 } else {
2863 pd = p->phys_offset;
2864 }
3b46e624 2865
bc98a7ef
JM
2866 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2867 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2868#ifdef TARGET_WORDS_BIGENDIAN
2869 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2870 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2871#else
2872 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2873 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2874#endif
2875 } else {
5fafdf24 2876 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2877 (addr & ~TARGET_PAGE_MASK);
2878 stq_p(ptr, val);
2879 }
2880}
2881
8df1cd07 2882/* warning: addr must be aligned */
8df1cd07
FB
2883void stl_phys(target_phys_addr_t addr, uint32_t val)
2884{
2885 int io_index;
2886 uint8_t *ptr;
2887 unsigned long pd;
2888 PhysPageDesc *p;
2889
2890 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2891 if (!p) {
2892 pd = IO_MEM_UNASSIGNED;
2893 } else {
2894 pd = p->phys_offset;
2895 }
3b46e624 2896
3a7d929e 2897 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2898 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2899 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2900 } else {
2901 unsigned long addr1;
2902 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2903 /* RAM case */
2904 ptr = phys_ram_base + addr1;
2905 stl_p(ptr, val);
3a7d929e
FB
2906 if (!cpu_physical_memory_is_dirty(addr1)) {
2907 /* invalidate code */
2908 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2909 /* set dirty bit */
f23db169
FB
2910 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2911 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2912 }
8df1cd07
FB
2913 }
2914}
2915
aab33094
FB
2916/* XXX: optimize */
2917void stb_phys(target_phys_addr_t addr, uint32_t val)
2918{
2919 uint8_t v = val;
2920 cpu_physical_memory_write(addr, &v, 1);
2921}
2922
2923/* XXX: optimize */
2924void stw_phys(target_phys_addr_t addr, uint32_t val)
2925{
2926 uint16_t v = tswap16(val);
2927 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2928}
2929
2930/* XXX: optimize */
2931void stq_phys(target_phys_addr_t addr, uint64_t val)
2932{
2933 val = tswap64(val);
2934 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2935}
2936
13eb76e0
FB
2937#endif
2938
2939/* virtual memory access for debug */
5fafdf24 2940int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 2941 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2942{
2943 int l;
9b3c35e0
JM
2944 target_phys_addr_t phys_addr;
2945 target_ulong page;
13eb76e0
FB
2946
2947 while (len > 0) {
2948 page = addr & TARGET_PAGE_MASK;
2949 phys_addr = cpu_get_phys_page_debug(env, page);
2950 /* if no physical page mapped, return an error */
2951 if (phys_addr == -1)
2952 return -1;
2953 l = (page + TARGET_PAGE_SIZE) - addr;
2954 if (l > len)
2955 l = len;
5fafdf24 2956 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 2957 buf, l, is_write);
13eb76e0
FB
2958 len -= l;
2959 buf += l;
2960 addr += l;
2961 }
2962 return 0;
2963}
2964
e3db7226
FB
2965void dump_exec_info(FILE *f,
2966 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2967{
2968 int i, target_code_size, max_target_code_size;
2969 int direct_jmp_count, direct_jmp2_count, cross_page;
2970 TranslationBlock *tb;
3b46e624 2971
e3db7226
FB
2972 target_code_size = 0;
2973 max_target_code_size = 0;
2974 cross_page = 0;
2975 direct_jmp_count = 0;
2976 direct_jmp2_count = 0;
2977 for(i = 0; i < nb_tbs; i++) {
2978 tb = &tbs[i];
2979 target_code_size += tb->size;
2980 if (tb->size > max_target_code_size)
2981 max_target_code_size = tb->size;
2982 if (tb->page_addr[1] != -1)
2983 cross_page++;
2984 if (tb->tb_next_offset[0] != 0xffff) {
2985 direct_jmp_count++;
2986 if (tb->tb_next_offset[1] != 0xffff) {
2987 direct_jmp2_count++;
2988 }
2989 }
2990 }
2991 /* XXX: avoid using doubles ? */
57fec1fe 2992 cpu_fprintf(f, "Translation buffer state:\n");
e3db7226 2993 cpu_fprintf(f, "TB count %d\n", nb_tbs);
5fafdf24 2994 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
2995 nb_tbs ? target_code_size / nb_tbs : 0,
2996 max_target_code_size);
5fafdf24 2997 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
2998 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2999 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3000 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3001 cross_page,
e3db7226
FB
3002 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3003 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3004 direct_jmp_count,
e3db7226
FB
3005 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3006 direct_jmp2_count,
3007 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3008 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3009 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3010 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3011 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3012 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3013}
3014
5fafdf24 3015#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3016
3017#define MMUSUFFIX _cmmu
3018#define GETPC() NULL
3019#define env cpu_single_env
b769d8fe 3020#define SOFTMMU_CODE_ACCESS
61382a50
FB
3021
3022#define SHIFT 0
3023#include "softmmu_template.h"
3024
3025#define SHIFT 1
3026#include "softmmu_template.h"
3027
3028#define SHIFT 2
3029#include "softmmu_template.h"
3030
3031#define SHIFT 3
3032#include "softmmu_template.h"
3033
3034#undef env
3035
3036#endif