]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
TCG code generator
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
40#endif
54936004 41
fd6ce8f6 42//#define DEBUG_TB_INVALIDATE
66e85a21 43//#define DEBUG_FLUSH
9fa3e853 44//#define DEBUG_TLB
67d3b957 45//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
46
47/* make various TB consistency checks */
5fafdf24
TS
48//#define DEBUG_TB_CHECK
49//#define DEBUG_TLB_CHECK
fd6ce8f6 50
1196be37 51//#define DEBUG_IOPORT
db7b5426 52//#define DEBUG_SUBPAGE
1196be37 53
99773bd4
PB
54#if !defined(CONFIG_USER_ONLY)
55/* TB consistency checks only implemented for usermode emulation. */
56#undef DEBUG_TB_CHECK
57#endif
58
fd6ce8f6 59/* threshold to flush the translated code buffer */
d07bde88 60#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
fd6ce8f6 61
9fa3e853
FB
62#define SMC_BITMAP_USE_THRESHOLD 10
63
64#define MMAP_AREA_START 0x00000000
65#define MMAP_AREA_END 0xa8000000
fd6ce8f6 66
108c49b8
FB
67#if defined(TARGET_SPARC64)
68#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
69#elif defined(TARGET_SPARC)
70#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
71#elif defined(TARGET_ALPHA)
72#define TARGET_PHYS_ADDR_SPACE_BITS 42
73#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
74#elif defined(TARGET_PPC64)
75#define TARGET_PHYS_ADDR_SPACE_BITS 42
76#else
77/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
78#define TARGET_PHYS_ADDR_SPACE_BITS 32
79#endif
80
fd6ce8f6 81TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 82TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 83int nb_tbs;
eb51d102
FB
84/* any access to the tbs or the page table must use this lock */
85spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 86
b8076a74 87uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
88uint8_t *code_gen_ptr;
89
9fa3e853
FB
90int phys_ram_size;
91int phys_ram_fd;
92uint8_t *phys_ram_base;
1ccde1cb 93uint8_t *phys_ram_dirty;
e9a1ab19 94static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 95
6a00d601
FB
96CPUState *first_cpu;
97/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
5fafdf24 99CPUState *cpu_single_env;
6a00d601 100
54936004 101typedef struct PageDesc {
92e873b9 102 /* list of TBs intersecting this ram page */
fd6ce8f6 103 TranslationBlock *first_tb;
9fa3e853
FB
104 /* in order to optimize self modifying code, we count the number
105 of lookups we do to a given page to use a bitmap */
106 unsigned int code_write_count;
107 uint8_t *code_bitmap;
108#if defined(CONFIG_USER_ONLY)
109 unsigned long flags;
110#endif
54936004
FB
111} PageDesc;
112
92e873b9
FB
113typedef struct PhysPageDesc {
114 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 115 uint32_t phys_offset;
92e873b9
FB
116} PhysPageDesc;
117
54936004 118#define L2_BITS 10
bedb69ea
JM
119#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
120/* XXX: this is a temporary hack for alpha target.
121 * In the future, this is to be replaced by a multi-level table
122 * to actually be able to handle the complete 64 bits address space.
123 */
124#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
125#else
54936004 126#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 127#endif
54936004
FB
128
129#define L1_SIZE (1 << L1_BITS)
130#define L2_SIZE (1 << L2_BITS)
131
33417e70 132static void io_mem_init(void);
fd6ce8f6 133
83fb7adf
FB
134unsigned long qemu_real_host_page_size;
135unsigned long qemu_host_page_bits;
136unsigned long qemu_host_page_size;
137unsigned long qemu_host_page_mask;
54936004 138
92e873b9 139/* XXX: for system emulation, it could just be an array */
54936004 140static PageDesc *l1_map[L1_SIZE];
0a962c02 141PhysPageDesc **l1_phys_map;
54936004 142
33417e70 143/* io memory support */
33417e70
FB
144CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
145CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 146void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 147static int io_mem_nb;
6658ffb8
PB
148#if defined(CONFIG_SOFTMMU)
149static int io_mem_watch;
150#endif
33417e70 151
34865134
FB
152/* log support */
153char *logfilename = "/tmp/qemu.log";
154FILE *logfile;
155int loglevel;
e735b91c 156static int log_append = 0;
34865134 157
e3db7226
FB
158/* statistics */
159static int tlb_flush_count;
160static int tb_flush_count;
161static int tb_phys_invalidate_count;
162
db7b5426
BS
163#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
164typedef struct subpage_t {
165 target_phys_addr_t base;
3ee89922
BS
166 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
167 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
168 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
169} subpage_t;
170
b346ff46 171static void page_init(void)
54936004 172{
83fb7adf 173 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 174 TARGET_PAGE_SIZE */
67b915a5 175#ifdef _WIN32
d5a8f07c
FB
176 {
177 SYSTEM_INFO system_info;
178 DWORD old_protect;
3b46e624 179
d5a8f07c
FB
180 GetSystemInfo(&system_info);
181 qemu_real_host_page_size = system_info.dwPageSize;
3b46e624 182
d5a8f07c
FB
183 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
184 PAGE_EXECUTE_READWRITE, &old_protect);
185 }
67b915a5 186#else
83fb7adf 187 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
188 {
189 unsigned long start, end;
190
191 start = (unsigned long)code_gen_buffer;
192 start &= ~(qemu_real_host_page_size - 1);
3b46e624 193
d5a8f07c
FB
194 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
195 end += qemu_real_host_page_size - 1;
196 end &= ~(qemu_real_host_page_size - 1);
3b46e624 197
5fafdf24 198 mprotect((void *)start, end - start,
d5a8f07c
FB
199 PROT_READ | PROT_WRITE | PROT_EXEC);
200 }
67b915a5 201#endif
d5a8f07c 202
83fb7adf
FB
203 if (qemu_host_page_size == 0)
204 qemu_host_page_size = qemu_real_host_page_size;
205 if (qemu_host_page_size < TARGET_PAGE_SIZE)
206 qemu_host_page_size = TARGET_PAGE_SIZE;
207 qemu_host_page_bits = 0;
208 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
209 qemu_host_page_bits++;
210 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
211 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
212 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
213
214#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
215 {
216 long long startaddr, endaddr;
217 FILE *f;
218 int n;
219
220 f = fopen("/proc/self/maps", "r");
221 if (f) {
222 do {
223 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
224 if (n == 2) {
225 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
226 TARGET_PAGE_ALIGN(endaddr),
227 PAGE_RESERVED);
228 }
229 } while (!feof(f));
230 fclose(f);
231 }
232 }
233#endif
54936004
FB
234}
235
fd6ce8f6 236static inline PageDesc *page_find_alloc(unsigned int index)
54936004 237{
54936004
FB
238 PageDesc **lp, *p;
239
54936004
FB
240 lp = &l1_map[index >> L2_BITS];
241 p = *lp;
242 if (!p) {
243 /* allocate if not found */
59817ccb 244 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 245 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
246 *lp = p;
247 }
248 return p + (index & (L2_SIZE - 1));
249}
250
fd6ce8f6 251static inline PageDesc *page_find(unsigned int index)
54936004 252{
54936004
FB
253 PageDesc *p;
254
54936004
FB
255 p = l1_map[index >> L2_BITS];
256 if (!p)
257 return 0;
fd6ce8f6
FB
258 return p + (index & (L2_SIZE - 1));
259}
260
108c49b8 261static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 262{
108c49b8 263 void **lp, **p;
e3f4e2a4 264 PhysPageDesc *pd;
92e873b9 265
108c49b8
FB
266 p = (void **)l1_phys_map;
267#if TARGET_PHYS_ADDR_SPACE_BITS > 32
268
269#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
270#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
271#endif
272 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
273 p = *lp;
274 if (!p) {
275 /* allocate if not found */
108c49b8
FB
276 if (!alloc)
277 return NULL;
278 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
279 memset(p, 0, sizeof(void *) * L1_SIZE);
280 *lp = p;
281 }
282#endif
283 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
284 pd = *lp;
285 if (!pd) {
286 int i;
108c49b8
FB
287 /* allocate if not found */
288 if (!alloc)
289 return NULL;
e3f4e2a4
PB
290 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
291 *lp = pd;
292 for (i = 0; i < L2_SIZE; i++)
293 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 294 }
e3f4e2a4 295 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
296}
297
108c49b8 298static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 299{
108c49b8 300 return phys_page_find_alloc(index, 0);
92e873b9
FB
301}
302
9fa3e853 303#if !defined(CONFIG_USER_ONLY)
6a00d601 304static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 305static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 306 target_ulong vaddr);
9fa3e853 307#endif
fd6ce8f6 308
6a00d601 309void cpu_exec_init(CPUState *env)
fd6ce8f6 310{
6a00d601
FB
311 CPUState **penv;
312 int cpu_index;
313
fd6ce8f6
FB
314 if (!code_gen_ptr) {
315 code_gen_ptr = code_gen_buffer;
b346ff46 316 page_init();
33417e70 317 io_mem_init();
fd6ce8f6 318 }
6a00d601
FB
319 env->next_cpu = NULL;
320 penv = &first_cpu;
321 cpu_index = 0;
322 while (*penv != NULL) {
323 penv = (CPUState **)&(*penv)->next_cpu;
324 cpu_index++;
325 }
326 env->cpu_index = cpu_index;
6658ffb8 327 env->nb_watchpoints = 0;
6a00d601 328 *penv = env;
fd6ce8f6
FB
329}
330
9fa3e853
FB
331static inline void invalidate_page_bitmap(PageDesc *p)
332{
333 if (p->code_bitmap) {
59817ccb 334 qemu_free(p->code_bitmap);
9fa3e853
FB
335 p->code_bitmap = NULL;
336 }
337 p->code_write_count = 0;
338}
339
fd6ce8f6
FB
340/* set to NULL all the 'first_tb' fields in all PageDescs */
341static void page_flush_tb(void)
342{
343 int i, j;
344 PageDesc *p;
345
346 for(i = 0; i < L1_SIZE; i++) {
347 p = l1_map[i];
348 if (p) {
9fa3e853
FB
349 for(j = 0; j < L2_SIZE; j++) {
350 p->first_tb = NULL;
351 invalidate_page_bitmap(p);
352 p++;
353 }
fd6ce8f6
FB
354 }
355 }
356}
357
358/* flush all the translation blocks */
d4e8164f 359/* XXX: tb_flush is currently not thread safe */
6a00d601 360void tb_flush(CPUState *env1)
fd6ce8f6 361{
6a00d601 362 CPUState *env;
0124311e 363#if defined(DEBUG_FLUSH)
ab3d1727
BS
364 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
365 (unsigned long)(code_gen_ptr - code_gen_buffer),
366 nb_tbs, nb_tbs > 0 ?
367 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6
FB
368#endif
369 nb_tbs = 0;
3b46e624 370
6a00d601
FB
371 for(env = first_cpu; env != NULL; env = env->next_cpu) {
372 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
373 }
9fa3e853 374
8a8a608f 375 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 376 page_flush_tb();
9fa3e853 377
fd6ce8f6 378 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
379 /* XXX: flush processor icache at this point if cache flush is
380 expensive */
e3db7226 381 tb_flush_count++;
fd6ce8f6
FB
382}
383
384#ifdef DEBUG_TB_CHECK
385
bc98a7ef 386static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
387{
388 TranslationBlock *tb;
389 int i;
390 address &= TARGET_PAGE_MASK;
99773bd4
PB
391 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
392 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
393 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
394 address >= tb->pc + tb->size)) {
395 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 396 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
397 }
398 }
399 }
400}
401
402/* verify that all the pages have correct rights for code */
403static void tb_page_check(void)
404{
405 TranslationBlock *tb;
406 int i, flags1, flags2;
3b46e624 407
99773bd4
PB
408 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
409 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
410 flags1 = page_get_flags(tb->pc);
411 flags2 = page_get_flags(tb->pc + tb->size - 1);
412 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
413 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 414 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
415 }
416 }
417 }
418}
419
d4e8164f
FB
420void tb_jmp_check(TranslationBlock *tb)
421{
422 TranslationBlock *tb1;
423 unsigned int n1;
424
425 /* suppress any remaining jumps to this TB */
426 tb1 = tb->jmp_first;
427 for(;;) {
428 n1 = (long)tb1 & 3;
429 tb1 = (TranslationBlock *)((long)tb1 & ~3);
430 if (n1 == 2)
431 break;
432 tb1 = tb1->jmp_next[n1];
433 }
434 /* check end of list */
435 if (tb1 != tb) {
436 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
437 }
438}
439
fd6ce8f6
FB
440#endif
441
442/* invalidate one TB */
443static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
444 int next_offset)
445{
446 TranslationBlock *tb1;
447 for(;;) {
448 tb1 = *ptb;
449 if (tb1 == tb) {
450 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
451 break;
452 }
453 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
454 }
455}
456
9fa3e853
FB
457static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
458{
459 TranslationBlock *tb1;
460 unsigned int n1;
461
462 for(;;) {
463 tb1 = *ptb;
464 n1 = (long)tb1 & 3;
465 tb1 = (TranslationBlock *)((long)tb1 & ~3);
466 if (tb1 == tb) {
467 *ptb = tb1->page_next[n1];
468 break;
469 }
470 ptb = &tb1->page_next[n1];
471 }
472}
473
d4e8164f
FB
474static inline void tb_jmp_remove(TranslationBlock *tb, int n)
475{
476 TranslationBlock *tb1, **ptb;
477 unsigned int n1;
478
479 ptb = &tb->jmp_next[n];
480 tb1 = *ptb;
481 if (tb1) {
482 /* find tb(n) in circular list */
483 for(;;) {
484 tb1 = *ptb;
485 n1 = (long)tb1 & 3;
486 tb1 = (TranslationBlock *)((long)tb1 & ~3);
487 if (n1 == n && tb1 == tb)
488 break;
489 if (n1 == 2) {
490 ptb = &tb1->jmp_first;
491 } else {
492 ptb = &tb1->jmp_next[n1];
493 }
494 }
495 /* now we can suppress tb(n) from the list */
496 *ptb = tb->jmp_next[n];
497
498 tb->jmp_next[n] = NULL;
499 }
500}
501
502/* reset the jump entry 'n' of a TB so that it is not chained to
503 another TB */
504static inline void tb_reset_jump(TranslationBlock *tb, int n)
505{
506 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
507}
508
8a40a180 509static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 510{
6a00d601 511 CPUState *env;
8a40a180 512 PageDesc *p;
d4e8164f 513 unsigned int h, n1;
8a40a180
FB
514 target_ulong phys_pc;
515 TranslationBlock *tb1, *tb2;
3b46e624 516
8a40a180
FB
517 /* remove the TB from the hash list */
518 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
519 h = tb_phys_hash_func(phys_pc);
5fafdf24 520 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
521 offsetof(TranslationBlock, phys_hash_next));
522
523 /* remove the TB from the page list */
524 if (tb->page_addr[0] != page_addr) {
525 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
526 tb_page_remove(&p->first_tb, tb);
527 invalidate_page_bitmap(p);
528 }
529 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
530 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
531 tb_page_remove(&p->first_tb, tb);
532 invalidate_page_bitmap(p);
533 }
534
36bdbe54 535 tb_invalidated_flag = 1;
59817ccb 536
fd6ce8f6 537 /* remove the TB from the hash list */
8a40a180 538 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
539 for(env = first_cpu; env != NULL; env = env->next_cpu) {
540 if (env->tb_jmp_cache[h] == tb)
541 env->tb_jmp_cache[h] = NULL;
542 }
d4e8164f
FB
543
544 /* suppress this TB from the two jump lists */
545 tb_jmp_remove(tb, 0);
546 tb_jmp_remove(tb, 1);
547
548 /* suppress any remaining jumps to this TB */
549 tb1 = tb->jmp_first;
550 for(;;) {
551 n1 = (long)tb1 & 3;
552 if (n1 == 2)
553 break;
554 tb1 = (TranslationBlock *)((long)tb1 & ~3);
555 tb2 = tb1->jmp_next[n1];
556 tb_reset_jump(tb1, n1);
557 tb1->jmp_next[n1] = NULL;
558 tb1 = tb2;
559 }
560 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 561
e3db7226 562 tb_phys_invalidate_count++;
9fa3e853
FB
563}
564
565static inline void set_bits(uint8_t *tab, int start, int len)
566{
567 int end, mask, end1;
568
569 end = start + len;
570 tab += start >> 3;
571 mask = 0xff << (start & 7);
572 if ((start & ~7) == (end & ~7)) {
573 if (start < end) {
574 mask &= ~(0xff << (end & 7));
575 *tab |= mask;
576 }
577 } else {
578 *tab++ |= mask;
579 start = (start + 8) & ~7;
580 end1 = end & ~7;
581 while (start < end1) {
582 *tab++ = 0xff;
583 start += 8;
584 }
585 if (start < end) {
586 mask = ~(0xff << (end & 7));
587 *tab |= mask;
588 }
589 }
590}
591
592static void build_page_bitmap(PageDesc *p)
593{
594 int n, tb_start, tb_end;
595 TranslationBlock *tb;
3b46e624 596
59817ccb 597 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
598 if (!p->code_bitmap)
599 return;
600 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
601
602 tb = p->first_tb;
603 while (tb != NULL) {
604 n = (long)tb & 3;
605 tb = (TranslationBlock *)((long)tb & ~3);
606 /* NOTE: this is subtle as a TB may span two physical pages */
607 if (n == 0) {
608 /* NOTE: tb_end may be after the end of the page, but
609 it is not a problem */
610 tb_start = tb->pc & ~TARGET_PAGE_MASK;
611 tb_end = tb_start + tb->size;
612 if (tb_end > TARGET_PAGE_SIZE)
613 tb_end = TARGET_PAGE_SIZE;
614 } else {
615 tb_start = 0;
616 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
617 }
618 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
619 tb = tb->page_next[n];
620 }
621}
622
d720b93d
FB
623#ifdef TARGET_HAS_PRECISE_SMC
624
5fafdf24 625static void tb_gen_code(CPUState *env,
d720b93d
FB
626 target_ulong pc, target_ulong cs_base, int flags,
627 int cflags)
628{
629 TranslationBlock *tb;
630 uint8_t *tc_ptr;
631 target_ulong phys_pc, phys_page2, virt_page2;
632 int code_gen_size;
633
c27004ec
FB
634 phys_pc = get_phys_addr_code(env, pc);
635 tb = tb_alloc(pc);
d720b93d
FB
636 if (!tb) {
637 /* flush must be done */
638 tb_flush(env);
639 /* cannot fail at this point */
c27004ec 640 tb = tb_alloc(pc);
d720b93d
FB
641 }
642 tc_ptr = code_gen_ptr;
643 tb->tc_ptr = tc_ptr;
644 tb->cs_base = cs_base;
645 tb->flags = flags;
646 tb->cflags = cflags;
d07bde88 647 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 648 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 649
d720b93d 650 /* check next page if needed */
c27004ec 651 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 652 phys_page2 = -1;
c27004ec 653 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
654 phys_page2 = get_phys_addr_code(env, virt_page2);
655 }
656 tb_link_phys(tb, phys_pc, phys_page2);
657}
658#endif
3b46e624 659
9fa3e853
FB
660/* invalidate all TBs which intersect with the target physical page
661 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
662 the same physical page. 'is_cpu_write_access' should be true if called
663 from a real cpu write access: the virtual CPU will exit the current
664 TB if code is modified inside this TB. */
5fafdf24 665void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
d720b93d
FB
666 int is_cpu_write_access)
667{
668 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 669 CPUState *env = cpu_single_env;
9fa3e853 670 PageDesc *p;
ea1c1802 671 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 672 target_ulong tb_start, tb_end;
d720b93d 673 target_ulong current_pc, current_cs_base;
9fa3e853
FB
674
675 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 676 if (!p)
9fa3e853 677 return;
5fafdf24 678 if (!p->code_bitmap &&
d720b93d
FB
679 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
680 is_cpu_write_access) {
9fa3e853
FB
681 /* build code bitmap */
682 build_page_bitmap(p);
683 }
684
685 /* we remove all the TBs in the range [start, end[ */
686 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
687 current_tb_not_found = is_cpu_write_access;
688 current_tb_modified = 0;
689 current_tb = NULL; /* avoid warning */
690 current_pc = 0; /* avoid warning */
691 current_cs_base = 0; /* avoid warning */
692 current_flags = 0; /* avoid warning */
9fa3e853
FB
693 tb = p->first_tb;
694 while (tb != NULL) {
695 n = (long)tb & 3;
696 tb = (TranslationBlock *)((long)tb & ~3);
697 tb_next = tb->page_next[n];
698 /* NOTE: this is subtle as a TB may span two physical pages */
699 if (n == 0) {
700 /* NOTE: tb_end may be after the end of the page, but
701 it is not a problem */
702 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
703 tb_end = tb_start + tb->size;
704 } else {
705 tb_start = tb->page_addr[1];
706 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
707 }
708 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
709#ifdef TARGET_HAS_PRECISE_SMC
710 if (current_tb_not_found) {
711 current_tb_not_found = 0;
712 current_tb = NULL;
713 if (env->mem_write_pc) {
714 /* now we have a real cpu fault */
715 current_tb = tb_find_pc(env->mem_write_pc);
716 }
717 }
718 if (current_tb == tb &&
719 !(current_tb->cflags & CF_SINGLE_INSN)) {
720 /* If we are modifying the current TB, we must stop
721 its execution. We could be more precise by checking
722 that the modification is after the current PC, but it
723 would require a specialized function to partially
724 restore the CPU state */
3b46e624 725
d720b93d 726 current_tb_modified = 1;
5fafdf24 727 cpu_restore_state(current_tb, env,
d720b93d
FB
728 env->mem_write_pc, NULL);
729#if defined(TARGET_I386)
730 current_flags = env->hflags;
731 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
732 current_cs_base = (target_ulong)env->segs[R_CS].base;
733 current_pc = current_cs_base + env->eip;
734#else
735#error unsupported CPU
736#endif
737 }
738#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
739 /* we need to do that to handle the case where a signal
740 occurs while doing tb_phys_invalidate() */
741 saved_tb = NULL;
742 if (env) {
743 saved_tb = env->current_tb;
744 env->current_tb = NULL;
745 }
9fa3e853 746 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
747 if (env) {
748 env->current_tb = saved_tb;
749 if (env->interrupt_request && env->current_tb)
750 cpu_interrupt(env, env->interrupt_request);
751 }
9fa3e853
FB
752 }
753 tb = tb_next;
754 }
755#if !defined(CONFIG_USER_ONLY)
756 /* if no code remaining, no need to continue to use slow writes */
757 if (!p->first_tb) {
758 invalidate_page_bitmap(p);
d720b93d
FB
759 if (is_cpu_write_access) {
760 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
761 }
762 }
763#endif
764#ifdef TARGET_HAS_PRECISE_SMC
765 if (current_tb_modified) {
766 /* we generate a block containing just the instruction
767 modifying the memory. It will ensure that it cannot modify
768 itself */
ea1c1802 769 env->current_tb = NULL;
5fafdf24 770 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
771 CF_SINGLE_INSN);
772 cpu_resume_from_signal(env, NULL);
9fa3e853 773 }
fd6ce8f6 774#endif
9fa3e853 775}
fd6ce8f6 776
9fa3e853 777/* len must be <= 8 and start must be a multiple of len */
d720b93d 778static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
779{
780 PageDesc *p;
781 int offset, b;
59817ccb 782#if 0
a4193c8a
FB
783 if (1) {
784 if (loglevel) {
5fafdf24
TS
785 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
786 cpu_single_env->mem_write_vaddr, len,
787 cpu_single_env->eip,
a4193c8a
FB
788 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
789 }
59817ccb
FB
790 }
791#endif
9fa3e853 792 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 793 if (!p)
9fa3e853
FB
794 return;
795 if (p->code_bitmap) {
796 offset = start & ~TARGET_PAGE_MASK;
797 b = p->code_bitmap[offset >> 3] >> (offset & 7);
798 if (b & ((1 << len) - 1))
799 goto do_invalidate;
800 } else {
801 do_invalidate:
d720b93d 802 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
803 }
804}
805
9fa3e853 806#if !defined(CONFIG_SOFTMMU)
5fafdf24 807static void tb_invalidate_phys_page(target_ulong addr,
d720b93d 808 unsigned long pc, void *puc)
9fa3e853 809{
d720b93d
FB
810 int n, current_flags, current_tb_modified;
811 target_ulong current_pc, current_cs_base;
9fa3e853 812 PageDesc *p;
d720b93d
FB
813 TranslationBlock *tb, *current_tb;
814#ifdef TARGET_HAS_PRECISE_SMC
815 CPUState *env = cpu_single_env;
816#endif
9fa3e853
FB
817
818 addr &= TARGET_PAGE_MASK;
819 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 820 if (!p)
9fa3e853
FB
821 return;
822 tb = p->first_tb;
d720b93d
FB
823 current_tb_modified = 0;
824 current_tb = NULL;
825 current_pc = 0; /* avoid warning */
826 current_cs_base = 0; /* avoid warning */
827 current_flags = 0; /* avoid warning */
828#ifdef TARGET_HAS_PRECISE_SMC
829 if (tb && pc != 0) {
830 current_tb = tb_find_pc(pc);
831 }
832#endif
9fa3e853
FB
833 while (tb != NULL) {
834 n = (long)tb & 3;
835 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
836#ifdef TARGET_HAS_PRECISE_SMC
837 if (current_tb == tb &&
838 !(current_tb->cflags & CF_SINGLE_INSN)) {
839 /* If we are modifying the current TB, we must stop
840 its execution. We could be more precise by checking
841 that the modification is after the current PC, but it
842 would require a specialized function to partially
843 restore the CPU state */
3b46e624 844
d720b93d
FB
845 current_tb_modified = 1;
846 cpu_restore_state(current_tb, env, pc, puc);
847#if defined(TARGET_I386)
848 current_flags = env->hflags;
849 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
850 current_cs_base = (target_ulong)env->segs[R_CS].base;
851 current_pc = current_cs_base + env->eip;
852#else
853#error unsupported CPU
854#endif
855 }
856#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
857 tb_phys_invalidate(tb, addr);
858 tb = tb->page_next[n];
859 }
fd6ce8f6 860 p->first_tb = NULL;
d720b93d
FB
861#ifdef TARGET_HAS_PRECISE_SMC
862 if (current_tb_modified) {
863 /* we generate a block containing just the instruction
864 modifying the memory. It will ensure that it cannot modify
865 itself */
ea1c1802 866 env->current_tb = NULL;
5fafdf24 867 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
868 CF_SINGLE_INSN);
869 cpu_resume_from_signal(env, puc);
870 }
871#endif
fd6ce8f6 872}
9fa3e853 873#endif
fd6ce8f6
FB
874
875/* add the tb in the target page and protect it if necessary */
5fafdf24 876static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 877 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
878{
879 PageDesc *p;
9fa3e853
FB
880 TranslationBlock *last_first_tb;
881
882 tb->page_addr[n] = page_addr;
3a7d929e 883 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
884 tb->page_next[n] = p->first_tb;
885 last_first_tb = p->first_tb;
886 p->first_tb = (TranslationBlock *)((long)tb | n);
887 invalidate_page_bitmap(p);
fd6ce8f6 888
107db443 889#if defined(TARGET_HAS_SMC) || 1
d720b93d 890
9fa3e853 891#if defined(CONFIG_USER_ONLY)
fd6ce8f6 892 if (p->flags & PAGE_WRITE) {
53a5960a
PB
893 target_ulong addr;
894 PageDesc *p2;
9fa3e853
FB
895 int prot;
896
fd6ce8f6
FB
897 /* force the host page as non writable (writes will have a
898 page fault + mprotect overhead) */
53a5960a 899 page_addr &= qemu_host_page_mask;
fd6ce8f6 900 prot = 0;
53a5960a
PB
901 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
902 addr += TARGET_PAGE_SIZE) {
903
904 p2 = page_find (addr >> TARGET_PAGE_BITS);
905 if (!p2)
906 continue;
907 prot |= p2->flags;
908 p2->flags &= ~PAGE_WRITE;
909 page_get_flags(addr);
910 }
5fafdf24 911 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
912 (prot & PAGE_BITS) & ~PAGE_WRITE);
913#ifdef DEBUG_TB_INVALIDATE
ab3d1727 914 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 915 page_addr);
fd6ce8f6 916#endif
fd6ce8f6 917 }
9fa3e853
FB
918#else
919 /* if some code is already present, then the pages are already
920 protected. So we handle the case where only the first TB is
921 allocated in a physical page */
922 if (!last_first_tb) {
6a00d601 923 tlb_protect_code(page_addr);
9fa3e853
FB
924 }
925#endif
d720b93d
FB
926
927#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
928}
929
930/* Allocate a new translation block. Flush the translation buffer if
931 too many translation blocks or too much generated code. */
c27004ec 932TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
933{
934 TranslationBlock *tb;
fd6ce8f6 935
5fafdf24 936 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
fd6ce8f6 937 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 938 return NULL;
fd6ce8f6
FB
939 tb = &tbs[nb_tbs++];
940 tb->pc = pc;
b448f2f3 941 tb->cflags = 0;
d4e8164f
FB
942 return tb;
943}
944
9fa3e853
FB
945/* add a new TB and link it to the physical page tables. phys_page2 is
946 (-1) to indicate that only one page contains the TB. */
5fafdf24 947void tb_link_phys(TranslationBlock *tb,
9fa3e853 948 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 949{
9fa3e853
FB
950 unsigned int h;
951 TranslationBlock **ptb;
952
953 /* add in the physical hash table */
954 h = tb_phys_hash_func(phys_pc);
955 ptb = &tb_phys_hash[h];
956 tb->phys_hash_next = *ptb;
957 *ptb = tb;
fd6ce8f6
FB
958
959 /* add in the page list */
9fa3e853
FB
960 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
961 if (phys_page2 != -1)
962 tb_alloc_page(tb, 1, phys_page2);
963 else
964 tb->page_addr[1] = -1;
9fa3e853 965
d4e8164f
FB
966 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
967 tb->jmp_next[0] = NULL;
968 tb->jmp_next[1] = NULL;
969
970 /* init original jump addresses */
971 if (tb->tb_next_offset[0] != 0xffff)
972 tb_reset_jump(tb, 0);
973 if (tb->tb_next_offset[1] != 0xffff)
974 tb_reset_jump(tb, 1);
8a40a180
FB
975
976#ifdef DEBUG_TB_CHECK
977 tb_page_check();
978#endif
fd6ce8f6
FB
979}
980
9fa3e853
FB
981/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
982 tb[1].tc_ptr. Return NULL if not found */
983TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 984{
9fa3e853
FB
985 int m_min, m_max, m;
986 unsigned long v;
987 TranslationBlock *tb;
a513fe19
FB
988
989 if (nb_tbs <= 0)
990 return NULL;
991 if (tc_ptr < (unsigned long)code_gen_buffer ||
992 tc_ptr >= (unsigned long)code_gen_ptr)
993 return NULL;
994 /* binary search (cf Knuth) */
995 m_min = 0;
996 m_max = nb_tbs - 1;
997 while (m_min <= m_max) {
998 m = (m_min + m_max) >> 1;
999 tb = &tbs[m];
1000 v = (unsigned long)tb->tc_ptr;
1001 if (v == tc_ptr)
1002 return tb;
1003 else if (tc_ptr < v) {
1004 m_max = m - 1;
1005 } else {
1006 m_min = m + 1;
1007 }
5fafdf24 1008 }
a513fe19
FB
1009 return &tbs[m_max];
1010}
7501267e 1011
ea041c0e
FB
1012static void tb_reset_jump_recursive(TranslationBlock *tb);
1013
1014static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1015{
1016 TranslationBlock *tb1, *tb_next, **ptb;
1017 unsigned int n1;
1018
1019 tb1 = tb->jmp_next[n];
1020 if (tb1 != NULL) {
1021 /* find head of list */
1022 for(;;) {
1023 n1 = (long)tb1 & 3;
1024 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1025 if (n1 == 2)
1026 break;
1027 tb1 = tb1->jmp_next[n1];
1028 }
1029 /* we are now sure now that tb jumps to tb1 */
1030 tb_next = tb1;
1031
1032 /* remove tb from the jmp_first list */
1033 ptb = &tb_next->jmp_first;
1034 for(;;) {
1035 tb1 = *ptb;
1036 n1 = (long)tb1 & 3;
1037 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1038 if (n1 == n && tb1 == tb)
1039 break;
1040 ptb = &tb1->jmp_next[n1];
1041 }
1042 *ptb = tb->jmp_next[n];
1043 tb->jmp_next[n] = NULL;
3b46e624 1044
ea041c0e
FB
1045 /* suppress the jump to next tb in generated code */
1046 tb_reset_jump(tb, n);
1047
0124311e 1048 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1049 tb_reset_jump_recursive(tb_next);
1050 }
1051}
1052
1053static void tb_reset_jump_recursive(TranslationBlock *tb)
1054{
1055 tb_reset_jump_recursive2(tb, 0);
1056 tb_reset_jump_recursive2(tb, 1);
1057}
1058
1fddef4b 1059#if defined(TARGET_HAS_ICE)
d720b93d
FB
1060static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1061{
9b3c35e0
JM
1062 target_phys_addr_t addr;
1063 target_ulong pd;
c2f07f81
PB
1064 ram_addr_t ram_addr;
1065 PhysPageDesc *p;
d720b93d 1066
c2f07f81
PB
1067 addr = cpu_get_phys_page_debug(env, pc);
1068 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1069 if (!p) {
1070 pd = IO_MEM_UNASSIGNED;
1071 } else {
1072 pd = p->phys_offset;
1073 }
1074 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1075 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1076}
c27004ec 1077#endif
d720b93d 1078
6658ffb8
PB
1079/* Add a watchpoint. */
1080int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1081{
1082 int i;
1083
1084 for (i = 0; i < env->nb_watchpoints; i++) {
1085 if (addr == env->watchpoint[i].vaddr)
1086 return 0;
1087 }
1088 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1089 return -1;
1090
1091 i = env->nb_watchpoints++;
1092 env->watchpoint[i].vaddr = addr;
1093 tlb_flush_page(env, addr);
1094 /* FIXME: This flush is needed because of the hack to make memory ops
1095 terminate the TB. It can be removed once the proper IO trap and
1096 re-execute bits are in. */
1097 tb_flush(env);
1098 return i;
1099}
1100
1101/* Remove a watchpoint. */
1102int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1103{
1104 int i;
1105
1106 for (i = 0; i < env->nb_watchpoints; i++) {
1107 if (addr == env->watchpoint[i].vaddr) {
1108 env->nb_watchpoints--;
1109 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1110 tlb_flush_page(env, addr);
1111 return 0;
1112 }
1113 }
1114 return -1;
1115}
1116
c33a346e
FB
1117/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1118 breakpoint is reached */
2e12669a 1119int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1120{
1fddef4b 1121#if defined(TARGET_HAS_ICE)
4c3a88a2 1122 int i;
3b46e624 1123
4c3a88a2
FB
1124 for(i = 0; i < env->nb_breakpoints; i++) {
1125 if (env->breakpoints[i] == pc)
1126 return 0;
1127 }
1128
1129 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1130 return -1;
1131 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1132
d720b93d 1133 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1134 return 0;
1135#else
1136 return -1;
1137#endif
1138}
1139
1140/* remove a breakpoint */
2e12669a 1141int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1142{
1fddef4b 1143#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1144 int i;
1145 for(i = 0; i < env->nb_breakpoints; i++) {
1146 if (env->breakpoints[i] == pc)
1147 goto found;
1148 }
1149 return -1;
1150 found:
4c3a88a2 1151 env->nb_breakpoints--;
1fddef4b
FB
1152 if (i < env->nb_breakpoints)
1153 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1154
1155 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1156 return 0;
1157#else
1158 return -1;
1159#endif
1160}
1161
c33a346e
FB
1162/* enable or disable single step mode. EXCP_DEBUG is returned by the
1163 CPU loop after each instruction */
1164void cpu_single_step(CPUState *env, int enabled)
1165{
1fddef4b 1166#if defined(TARGET_HAS_ICE)
c33a346e
FB
1167 if (env->singlestep_enabled != enabled) {
1168 env->singlestep_enabled = enabled;
1169 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1170 /* XXX: only flush what is necessary */
0124311e 1171 tb_flush(env);
c33a346e
FB
1172 }
1173#endif
1174}
1175
34865134
FB
1176/* enable or disable low levels log */
1177void cpu_set_log(int log_flags)
1178{
1179 loglevel = log_flags;
1180 if (loglevel && !logfile) {
11fcfab4 1181 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1182 if (!logfile) {
1183 perror(logfilename);
1184 _exit(1);
1185 }
9fa3e853
FB
1186#if !defined(CONFIG_SOFTMMU)
1187 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1188 {
1189 static uint8_t logfile_buf[4096];
1190 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1191 }
1192#else
34865134 1193 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1194#endif
e735b91c
PB
1195 log_append = 1;
1196 }
1197 if (!loglevel && logfile) {
1198 fclose(logfile);
1199 logfile = NULL;
34865134
FB
1200 }
1201}
1202
1203void cpu_set_log_filename(const char *filename)
1204{
1205 logfilename = strdup(filename);
e735b91c
PB
1206 if (logfile) {
1207 fclose(logfile);
1208 logfile = NULL;
1209 }
1210 cpu_set_log(loglevel);
34865134 1211}
c33a346e 1212
0124311e 1213/* mask must never be zero, except for A20 change call */
68a79315 1214void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1215{
1216 TranslationBlock *tb;
ee8b7021 1217 static int interrupt_lock;
59817ccb 1218
68a79315 1219 env->interrupt_request |= mask;
ea041c0e
FB
1220 /* if the cpu is currently executing code, we must unlink it and
1221 all the potentially executing TB */
1222 tb = env->current_tb;
ee8b7021
FB
1223 if (tb && !testandset(&interrupt_lock)) {
1224 env->current_tb = NULL;
ea041c0e 1225 tb_reset_jump_recursive(tb);
ee8b7021 1226 interrupt_lock = 0;
ea041c0e
FB
1227 }
1228}
1229
b54ad049
FB
1230void cpu_reset_interrupt(CPUState *env, int mask)
1231{
1232 env->interrupt_request &= ~mask;
1233}
1234
f193c797 1235CPULogItem cpu_log_items[] = {
5fafdf24 1236 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1237 "show generated host assembly code for each compiled TB" },
1238 { CPU_LOG_TB_IN_ASM, "in_asm",
1239 "show target assembly code for each compiled TB" },
5fafdf24 1240 { CPU_LOG_TB_OP, "op",
f193c797
FB
1241 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1242#ifdef TARGET_I386
1243 { CPU_LOG_TB_OP_OPT, "op_opt",
1244 "show micro ops after optimization for each compiled TB" },
1245#endif
1246 { CPU_LOG_INT, "int",
1247 "show interrupts/exceptions in short format" },
1248 { CPU_LOG_EXEC, "exec",
1249 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1250 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1251 "show CPU state before block translation" },
f193c797
FB
1252#ifdef TARGET_I386
1253 { CPU_LOG_PCALL, "pcall",
1254 "show protected mode far calls/returns/exceptions" },
1255#endif
8e3a9fd2 1256#ifdef DEBUG_IOPORT
fd872598
FB
1257 { CPU_LOG_IOPORT, "ioport",
1258 "show all i/o ports accesses" },
8e3a9fd2 1259#endif
f193c797
FB
1260 { 0, NULL, NULL },
1261};
1262
1263static int cmp1(const char *s1, int n, const char *s2)
1264{
1265 if (strlen(s2) != n)
1266 return 0;
1267 return memcmp(s1, s2, n) == 0;
1268}
3b46e624 1269
f193c797
FB
1270/* takes a comma separated list of log masks. Return 0 if error. */
1271int cpu_str_to_log_mask(const char *str)
1272{
1273 CPULogItem *item;
1274 int mask;
1275 const char *p, *p1;
1276
1277 p = str;
1278 mask = 0;
1279 for(;;) {
1280 p1 = strchr(p, ',');
1281 if (!p1)
1282 p1 = p + strlen(p);
8e3a9fd2
FB
1283 if(cmp1(p,p1-p,"all")) {
1284 for(item = cpu_log_items; item->mask != 0; item++) {
1285 mask |= item->mask;
1286 }
1287 } else {
f193c797
FB
1288 for(item = cpu_log_items; item->mask != 0; item++) {
1289 if (cmp1(p, p1 - p, item->name))
1290 goto found;
1291 }
1292 return 0;
8e3a9fd2 1293 }
f193c797
FB
1294 found:
1295 mask |= item->mask;
1296 if (*p1 != ',')
1297 break;
1298 p = p1 + 1;
1299 }
1300 return mask;
1301}
ea041c0e 1302
7501267e
FB
1303void cpu_abort(CPUState *env, const char *fmt, ...)
1304{
1305 va_list ap;
493ae1f0 1306 va_list ap2;
7501267e
FB
1307
1308 va_start(ap, fmt);
493ae1f0 1309 va_copy(ap2, ap);
7501267e
FB
1310 fprintf(stderr, "qemu: fatal: ");
1311 vfprintf(stderr, fmt, ap);
1312 fprintf(stderr, "\n");
1313#ifdef TARGET_I386
0573fbfc
TS
1314 if(env->intercept & INTERCEPT_SVM_MASK) {
1315 /* most probably the virtual machine should not
1316 be shut down but rather caught by the VMM */
1317 vmexit(SVM_EXIT_SHUTDOWN, 0);
1318 }
7fe48483
FB
1319 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1320#else
1321 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1322#endif
924edcae 1323 if (logfile) {
f9373291 1324 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1325 vfprintf(logfile, fmt, ap2);
f9373291
JM
1326 fprintf(logfile, "\n");
1327#ifdef TARGET_I386
1328 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1329#else
1330 cpu_dump_state(env, logfile, fprintf, 0);
1331#endif
924edcae
AZ
1332 fflush(logfile);
1333 fclose(logfile);
1334 }
493ae1f0 1335 va_end(ap2);
f9373291 1336 va_end(ap);
7501267e
FB
1337 abort();
1338}
1339
c5be9f08
TS
1340CPUState *cpu_copy(CPUState *env)
1341{
01ba9816 1342 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1343 /* preserve chaining and index */
1344 CPUState *next_cpu = new_env->next_cpu;
1345 int cpu_index = new_env->cpu_index;
1346 memcpy(new_env, env, sizeof(CPUState));
1347 new_env->next_cpu = next_cpu;
1348 new_env->cpu_index = cpu_index;
1349 return new_env;
1350}
1351
0124311e
FB
1352#if !defined(CONFIG_USER_ONLY)
1353
ee8b7021
FB
1354/* NOTE: if flush_global is true, also flush global entries (not
1355 implemented yet) */
1356void tlb_flush(CPUState *env, int flush_global)
33417e70 1357{
33417e70 1358 int i;
0124311e 1359
9fa3e853
FB
1360#if defined(DEBUG_TLB)
1361 printf("tlb_flush:\n");
1362#endif
0124311e
FB
1363 /* must reset current TB so that interrupts cannot modify the
1364 links while we are modifying them */
1365 env->current_tb = NULL;
1366
33417e70 1367 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1368 env->tlb_table[0][i].addr_read = -1;
1369 env->tlb_table[0][i].addr_write = -1;
1370 env->tlb_table[0][i].addr_code = -1;
1371 env->tlb_table[1][i].addr_read = -1;
1372 env->tlb_table[1][i].addr_write = -1;
1373 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1374#if (NB_MMU_MODES >= 3)
1375 env->tlb_table[2][i].addr_read = -1;
1376 env->tlb_table[2][i].addr_write = -1;
1377 env->tlb_table[2][i].addr_code = -1;
1378#if (NB_MMU_MODES == 4)
1379 env->tlb_table[3][i].addr_read = -1;
1380 env->tlb_table[3][i].addr_write = -1;
1381 env->tlb_table[3][i].addr_code = -1;
1382#endif
1383#endif
33417e70 1384 }
9fa3e853 1385
8a40a180 1386 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1387
1388#if !defined(CONFIG_SOFTMMU)
1389 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1390#endif
1391#ifdef USE_KQEMU
1392 if (env->kqemu_enabled) {
1393 kqemu_flush(env, flush_global);
1394 }
9fa3e853 1395#endif
e3db7226 1396 tlb_flush_count++;
33417e70
FB
1397}
1398
274da6b2 1399static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1400{
5fafdf24 1401 if (addr == (tlb_entry->addr_read &
84b7b8e7 1402 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1403 addr == (tlb_entry->addr_write &
84b7b8e7 1404 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1405 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1406 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1407 tlb_entry->addr_read = -1;
1408 tlb_entry->addr_write = -1;
1409 tlb_entry->addr_code = -1;
1410 }
61382a50
FB
1411}
1412
2e12669a 1413void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1414{
8a40a180 1415 int i;
9fa3e853 1416 TranslationBlock *tb;
0124311e 1417
9fa3e853 1418#if defined(DEBUG_TLB)
108c49b8 1419 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1420#endif
0124311e
FB
1421 /* must reset current TB so that interrupts cannot modify the
1422 links while we are modifying them */
1423 env->current_tb = NULL;
61382a50
FB
1424
1425 addr &= TARGET_PAGE_MASK;
1426 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1427 tlb_flush_entry(&env->tlb_table[0][i], addr);
1428 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1429#if (NB_MMU_MODES >= 3)
1430 tlb_flush_entry(&env->tlb_table[2][i], addr);
1431#if (NB_MMU_MODES == 4)
1432 tlb_flush_entry(&env->tlb_table[3][i], addr);
1433#endif
1434#endif
0124311e 1435
b362e5e0
PB
1436 /* Discard jump cache entries for any tb which might potentially
1437 overlap the flushed page. */
1438 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1439 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1440
1441 i = tb_jmp_cache_hash_page(addr);
1442 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1443
0124311e 1444#if !defined(CONFIG_SOFTMMU)
9fa3e853 1445 if (addr < MMAP_AREA_END)
0124311e 1446 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1447#endif
0a962c02
FB
1448#ifdef USE_KQEMU
1449 if (env->kqemu_enabled) {
1450 kqemu_flush_page(env, addr);
1451 }
1452#endif
9fa3e853
FB
1453}
1454
9fa3e853
FB
1455/* update the TLBs so that writes to code in the virtual page 'addr'
1456 can be detected */
6a00d601 1457static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1458{
5fafdf24 1459 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1460 ram_addr + TARGET_PAGE_SIZE,
1461 CODE_DIRTY_FLAG);
9fa3e853
FB
1462}
1463
9fa3e853 1464/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1465 tested for self modifying code */
5fafdf24 1466static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1467 target_ulong vaddr)
9fa3e853 1468{
3a7d929e 1469 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1470}
1471
5fafdf24 1472static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1473 unsigned long start, unsigned long length)
1474{
1475 unsigned long addr;
84b7b8e7
FB
1476 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1477 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1478 if ((addr - start) < length) {
84b7b8e7 1479 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1480 }
1481 }
1482}
1483
3a7d929e 1484void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1485 int dirty_flags)
1ccde1cb
FB
1486{
1487 CPUState *env;
4f2ac237 1488 unsigned long length, start1;
0a962c02
FB
1489 int i, mask, len;
1490 uint8_t *p;
1ccde1cb
FB
1491
1492 start &= TARGET_PAGE_MASK;
1493 end = TARGET_PAGE_ALIGN(end);
1494
1495 length = end - start;
1496 if (length == 0)
1497 return;
0a962c02 1498 len = length >> TARGET_PAGE_BITS;
3a7d929e 1499#ifdef USE_KQEMU
6a00d601
FB
1500 /* XXX: should not depend on cpu context */
1501 env = first_cpu;
3a7d929e 1502 if (env->kqemu_enabled) {
f23db169
FB
1503 ram_addr_t addr;
1504 addr = start;
1505 for(i = 0; i < len; i++) {
1506 kqemu_set_notdirty(env, addr);
1507 addr += TARGET_PAGE_SIZE;
1508 }
3a7d929e
FB
1509 }
1510#endif
f23db169
FB
1511 mask = ~dirty_flags;
1512 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1513 for(i = 0; i < len; i++)
1514 p[i] &= mask;
1515
1ccde1cb
FB
1516 /* we modify the TLB cache so that the dirty bit will be set again
1517 when accessing the range */
59817ccb 1518 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1519 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1520 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1521 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1522 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1523 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1524#if (NB_MMU_MODES >= 3)
1525 for(i = 0; i < CPU_TLB_SIZE; i++)
1526 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1527#if (NB_MMU_MODES == 4)
1528 for(i = 0; i < CPU_TLB_SIZE; i++)
1529 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1530#endif
1531#endif
6a00d601 1532 }
59817ccb
FB
1533
1534#if !defined(CONFIG_SOFTMMU)
1535 /* XXX: this is expensive */
1536 {
1537 VirtPageDesc *p;
1538 int j;
1539 target_ulong addr;
1540
1541 for(i = 0; i < L1_SIZE; i++) {
1542 p = l1_virt_map[i];
1543 if (p) {
1544 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1545 for(j = 0; j < L2_SIZE; j++) {
1546 if (p->valid_tag == virt_valid_tag &&
1547 p->phys_addr >= start && p->phys_addr < end &&
1548 (p->prot & PROT_WRITE)) {
1549 if (addr < MMAP_AREA_END) {
5fafdf24 1550 mprotect((void *)addr, TARGET_PAGE_SIZE,
59817ccb
FB
1551 p->prot & ~PROT_WRITE);
1552 }
1553 }
1554 addr += TARGET_PAGE_SIZE;
1555 p++;
1556 }
1557 }
1558 }
1559 }
1560#endif
1ccde1cb
FB
1561}
1562
3a7d929e
FB
1563static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1564{
1565 ram_addr_t ram_addr;
1566
84b7b8e7 1567 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1568 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1569 tlb_entry->addend - (unsigned long)phys_ram_base;
1570 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1571 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1572 }
1573 }
1574}
1575
1576/* update the TLB according to the current state of the dirty bits */
1577void cpu_tlb_update_dirty(CPUState *env)
1578{
1579 int i;
1580 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1581 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1582 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1583 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1584#if (NB_MMU_MODES >= 3)
1585 for(i = 0; i < CPU_TLB_SIZE; i++)
1586 tlb_update_dirty(&env->tlb_table[2][i]);
1587#if (NB_MMU_MODES == 4)
1588 for(i = 0; i < CPU_TLB_SIZE; i++)
1589 tlb_update_dirty(&env->tlb_table[3][i]);
1590#endif
1591#endif
3a7d929e
FB
1592}
1593
5fafdf24 1594static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1595 unsigned long start)
1ccde1cb
FB
1596{
1597 unsigned long addr;
84b7b8e7
FB
1598 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1599 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1600 if (addr == start) {
84b7b8e7 1601 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1602 }
1603 }
1604}
1605
1606/* update the TLB corresponding to virtual page vaddr and phys addr
1607 addr so that it is no longer dirty */
6a00d601
FB
1608static inline void tlb_set_dirty(CPUState *env,
1609 unsigned long addr, target_ulong vaddr)
1ccde1cb 1610{
1ccde1cb
FB
1611 int i;
1612
1ccde1cb
FB
1613 addr &= TARGET_PAGE_MASK;
1614 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1615 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1616 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1617#if (NB_MMU_MODES >= 3)
1618 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1619#if (NB_MMU_MODES == 4)
1620 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1621#endif
1622#endif
9fa3e853
FB
1623}
1624
59817ccb
FB
1625/* add a new TLB entry. At most one entry for a given virtual address
1626 is permitted. Return 0 if OK or 2 if the page could not be mapped
1627 (can only happen in non SOFTMMU mode for I/O pages or pages
1628 conflicting with the host address space). */
5fafdf24
TS
1629int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1630 target_phys_addr_t paddr, int prot,
6ebbf390 1631 int mmu_idx, int is_softmmu)
9fa3e853 1632{
92e873b9 1633 PhysPageDesc *p;
4f2ac237 1634 unsigned long pd;
9fa3e853 1635 unsigned int index;
4f2ac237 1636 target_ulong address;
108c49b8 1637 target_phys_addr_t addend;
9fa3e853 1638 int ret;
84b7b8e7 1639 CPUTLBEntry *te;
6658ffb8 1640 int i;
9fa3e853 1641
92e873b9 1642 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1643 if (!p) {
1644 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1645 } else {
1646 pd = p->phys_offset;
9fa3e853
FB
1647 }
1648#if defined(DEBUG_TLB)
6ebbf390
JM
1649 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1650 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1651#endif
1652
1653 ret = 0;
1654#if !defined(CONFIG_SOFTMMU)
5fafdf24 1655 if (is_softmmu)
9fa3e853
FB
1656#endif
1657 {
2a4188a3 1658 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1659 /* IO memory case */
1660 address = vaddr | pd;
1661 addend = paddr;
1662 } else {
1663 /* standard memory */
1664 address = vaddr;
1665 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1666 }
6658ffb8
PB
1667
1668 /* Make accesses to pages with watchpoints go via the
1669 watchpoint trap routines. */
1670 for (i = 0; i < env->nb_watchpoints; i++) {
1671 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1672 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1673 env->watchpoint[i].addend = 0;
6658ffb8
PB
1674 address = vaddr | io_mem_watch;
1675 } else {
d79acba4
AZ
1676 env->watchpoint[i].addend = pd - paddr +
1677 (unsigned long) phys_ram_base;
6658ffb8
PB
1678 /* TODO: Figure out how to make read watchpoints coexist
1679 with code. */
1680 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1681 }
1682 }
1683 }
d79acba4 1684
90f18422 1685 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1686 addend -= vaddr;
6ebbf390 1687 te = &env->tlb_table[mmu_idx][index];
84b7b8e7 1688 te->addend = addend;
67b915a5 1689 if (prot & PAGE_READ) {
84b7b8e7
FB
1690 te->addr_read = address;
1691 } else {
1692 te->addr_read = -1;
1693 }
1694 if (prot & PAGE_EXEC) {
1695 te->addr_code = address;
9fa3e853 1696 } else {
84b7b8e7 1697 te->addr_code = -1;
9fa3e853 1698 }
67b915a5 1699 if (prot & PAGE_WRITE) {
5fafdf24 1700 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
856074ec
FB
1701 (pd & IO_MEM_ROMD)) {
1702 /* write access calls the I/O callback */
5fafdf24 1703 te->addr_write = vaddr |
856074ec 1704 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
5fafdf24 1705 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1706 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1707 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1708 } else {
84b7b8e7 1709 te->addr_write = address;
9fa3e853
FB
1710 }
1711 } else {
84b7b8e7 1712 te->addr_write = -1;
9fa3e853
FB
1713 }
1714 }
1715#if !defined(CONFIG_SOFTMMU)
1716 else {
1717 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1718 /* IO access: no mapping is done as it will be handled by the
1719 soft MMU */
1720 if (!(env->hflags & HF_SOFTMMU_MASK))
1721 ret = 2;
1722 } else {
1723 void *map_addr;
59817ccb
FB
1724
1725 if (vaddr >= MMAP_AREA_END) {
1726 ret = 2;
1727 } else {
1728 if (prot & PROT_WRITE) {
5fafdf24 1729 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1730#if defined(TARGET_HAS_SMC) || 1
59817ccb 1731 first_tb ||
d720b93d 1732#endif
5fafdf24 1733 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
59817ccb
FB
1734 !cpu_physical_memory_is_dirty(pd))) {
1735 /* ROM: we do as if code was inside */
1736 /* if code is present, we only map as read only and save the
1737 original mapping */
1738 VirtPageDesc *vp;
3b46e624 1739
90f18422 1740 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1741 vp->phys_addr = pd;
1742 vp->prot = prot;
1743 vp->valid_tag = virt_valid_tag;
1744 prot &= ~PAGE_WRITE;
1745 }
1746 }
5fafdf24 1747 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
59817ccb
FB
1748 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1749 if (map_addr == MAP_FAILED) {
1750 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1751 paddr, vaddr);
9fa3e853 1752 }
9fa3e853
FB
1753 }
1754 }
1755 }
1756#endif
1757 return ret;
1758}
1759
1760/* called from signal handler: invalidate the code and unprotect the
1761 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1762int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1763{
1764#if !defined(CONFIG_SOFTMMU)
1765 VirtPageDesc *vp;
1766
1767#if defined(DEBUG_TLB)
1768 printf("page_unprotect: addr=0x%08x\n", addr);
1769#endif
1770 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1771
1772 /* if it is not mapped, no need to worry here */
1773 if (addr >= MMAP_AREA_END)
1774 return 0;
9fa3e853
FB
1775 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1776 if (!vp)
1777 return 0;
1778 /* NOTE: in this case, validate_tag is _not_ tested as it
1779 validates only the code TLB */
1780 if (vp->valid_tag != virt_valid_tag)
1781 return 0;
1782 if (!(vp->prot & PAGE_WRITE))
1783 return 0;
1784#if defined(DEBUG_TLB)
5fafdf24 1785 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
9fa3e853
FB
1786 addr, vp->phys_addr, vp->prot);
1787#endif
59817ccb
FB
1788 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1789 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1790 (unsigned long)addr, vp->prot);
d720b93d 1791 /* set the dirty bit */
0a962c02 1792 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1793 /* flush the code inside */
1794 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1795 return 1;
1796#else
1797 return 0;
1798#endif
33417e70
FB
1799}
1800
0124311e
FB
1801#else
1802
ee8b7021 1803void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1804{
1805}
1806
2e12669a 1807void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1808{
1809}
1810
5fafdf24
TS
1811int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1812 target_phys_addr_t paddr, int prot,
6ebbf390 1813 int mmu_idx, int is_softmmu)
9fa3e853
FB
1814{
1815 return 0;
1816}
0124311e 1817
9fa3e853
FB
1818/* dump memory mappings */
1819void page_dump(FILE *f)
33417e70 1820{
9fa3e853
FB
1821 unsigned long start, end;
1822 int i, j, prot, prot1;
1823 PageDesc *p;
33417e70 1824
9fa3e853
FB
1825 fprintf(f, "%-8s %-8s %-8s %s\n",
1826 "start", "end", "size", "prot");
1827 start = -1;
1828 end = -1;
1829 prot = 0;
1830 for(i = 0; i <= L1_SIZE; i++) {
1831 if (i < L1_SIZE)
1832 p = l1_map[i];
1833 else
1834 p = NULL;
1835 for(j = 0;j < L2_SIZE; j++) {
1836 if (!p)
1837 prot1 = 0;
1838 else
1839 prot1 = p[j].flags;
1840 if (prot1 != prot) {
1841 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1842 if (start != -1) {
1843 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1844 start, end, end - start,
9fa3e853
FB
1845 prot & PAGE_READ ? 'r' : '-',
1846 prot & PAGE_WRITE ? 'w' : '-',
1847 prot & PAGE_EXEC ? 'x' : '-');
1848 }
1849 if (prot1 != 0)
1850 start = end;
1851 else
1852 start = -1;
1853 prot = prot1;
1854 }
1855 if (!p)
1856 break;
1857 }
33417e70 1858 }
33417e70
FB
1859}
1860
53a5960a 1861int page_get_flags(target_ulong address)
33417e70 1862{
9fa3e853
FB
1863 PageDesc *p;
1864
1865 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1866 if (!p)
9fa3e853
FB
1867 return 0;
1868 return p->flags;
1869}
1870
1871/* modify the flags of a page and invalidate the code if
1872 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1873 depending on PAGE_WRITE */
53a5960a 1874void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1875{
1876 PageDesc *p;
53a5960a 1877 target_ulong addr;
9fa3e853
FB
1878
1879 start = start & TARGET_PAGE_MASK;
1880 end = TARGET_PAGE_ALIGN(end);
1881 if (flags & PAGE_WRITE)
1882 flags |= PAGE_WRITE_ORG;
1883 spin_lock(&tb_lock);
1884 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1885 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1886 /* if the write protection is set, then we invalidate the code
1887 inside */
5fafdf24 1888 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
1889 (flags & PAGE_WRITE) &&
1890 p->first_tb) {
d720b93d 1891 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1892 }
1893 p->flags = flags;
1894 }
1895 spin_unlock(&tb_lock);
33417e70
FB
1896}
1897
3d97b40b
TS
1898int page_check_range(target_ulong start, target_ulong len, int flags)
1899{
1900 PageDesc *p;
1901 target_ulong end;
1902 target_ulong addr;
1903
1904 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1905 start = start & TARGET_PAGE_MASK;
1906
1907 if( end < start )
1908 /* we've wrapped around */
1909 return -1;
1910 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1911 p = page_find(addr >> TARGET_PAGE_BITS);
1912 if( !p )
1913 return -1;
1914 if( !(p->flags & PAGE_VALID) )
1915 return -1;
1916
dae3270c 1917 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 1918 return -1;
dae3270c
FB
1919 if (flags & PAGE_WRITE) {
1920 if (!(p->flags & PAGE_WRITE_ORG))
1921 return -1;
1922 /* unprotect the page if it was put read-only because it
1923 contains translated code */
1924 if (!(p->flags & PAGE_WRITE)) {
1925 if (!page_unprotect(addr, 0, NULL))
1926 return -1;
1927 }
1928 return 0;
1929 }
3d97b40b
TS
1930 }
1931 return 0;
1932}
1933
9fa3e853
FB
1934/* called from signal handler: invalidate the code and unprotect the
1935 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1936int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1937{
1938 unsigned int page_index, prot, pindex;
1939 PageDesc *p, *p1;
53a5960a 1940 target_ulong host_start, host_end, addr;
9fa3e853 1941
83fb7adf 1942 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1943 page_index = host_start >> TARGET_PAGE_BITS;
1944 p1 = page_find(page_index);
1945 if (!p1)
1946 return 0;
83fb7adf 1947 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1948 p = p1;
1949 prot = 0;
1950 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1951 prot |= p->flags;
1952 p++;
1953 }
1954 /* if the page was really writable, then we change its
1955 protection back to writable */
1956 if (prot & PAGE_WRITE_ORG) {
1957 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1958 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 1959 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1960 (prot & PAGE_BITS) | PAGE_WRITE);
1961 p1[pindex].flags |= PAGE_WRITE;
1962 /* and since the content will be modified, we must invalidate
1963 the corresponding translated code. */
d720b93d 1964 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1965#ifdef DEBUG_TB_CHECK
1966 tb_invalidate_check(address);
1967#endif
1968 return 1;
1969 }
1970 }
1971 return 0;
1972}
1973
6a00d601
FB
1974static inline void tlb_set_dirty(CPUState *env,
1975 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1976{
1977}
9fa3e853
FB
1978#endif /* defined(CONFIG_USER_ONLY) */
1979
db7b5426
BS
1980static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1981 int memory);
1982static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1983 int orig_memory);
1984#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1985 need_subpage) \
1986 do { \
1987 if (addr > start_addr) \
1988 start_addr2 = 0; \
1989 else { \
1990 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1991 if (start_addr2 > 0) \
1992 need_subpage = 1; \
1993 } \
1994 \
49e9fba2 1995 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
1996 end_addr2 = TARGET_PAGE_SIZE - 1; \
1997 else { \
1998 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1999 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2000 need_subpage = 1; \
2001 } \
2002 } while (0)
2003
33417e70
FB
2004/* register physical memory. 'size' must be a multiple of the target
2005 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2006 io memory page */
5fafdf24 2007void cpu_register_physical_memory(target_phys_addr_t start_addr,
2e12669a
FB
2008 unsigned long size,
2009 unsigned long phys_offset)
33417e70 2010{
108c49b8 2011 target_phys_addr_t addr, end_addr;
92e873b9 2012 PhysPageDesc *p;
9d42037b 2013 CPUState *env;
db7b5426
BS
2014 unsigned long orig_size = size;
2015 void *subpage;
33417e70 2016
5fd386f6 2017 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2018 end_addr = start_addr + (target_phys_addr_t)size;
2019 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2020 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2021 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2022 unsigned long orig_memory = p->phys_offset;
2023 target_phys_addr_t start_addr2, end_addr2;
2024 int need_subpage = 0;
2025
2026 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2027 need_subpage);
4254fab8 2028 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2029 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2030 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2031 &p->phys_offset, orig_memory);
2032 } else {
2033 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2034 >> IO_MEM_SHIFT];
2035 }
2036 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2037 } else {
2038 p->phys_offset = phys_offset;
2039 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2040 (phys_offset & IO_MEM_ROMD))
2041 phys_offset += TARGET_PAGE_SIZE;
2042 }
2043 } else {
2044 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2045 p->phys_offset = phys_offset;
2046 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2047 (phys_offset & IO_MEM_ROMD))
2048 phys_offset += TARGET_PAGE_SIZE;
2049 else {
2050 target_phys_addr_t start_addr2, end_addr2;
2051 int need_subpage = 0;
2052
2053 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2054 end_addr2, need_subpage);
2055
4254fab8 2056 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2057 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2058 &p->phys_offset, IO_MEM_UNASSIGNED);
2059 subpage_register(subpage, start_addr2, end_addr2,
2060 phys_offset);
2061 }
2062 }
2063 }
33417e70 2064 }
3b46e624 2065
9d42037b
FB
2066 /* since each CPU stores ram addresses in its TLB cache, we must
2067 reset the modified entries */
2068 /* XXX: slow ! */
2069 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2070 tlb_flush(env, 1);
2071 }
33417e70
FB
2072}
2073
ba863458
FB
2074/* XXX: temporary until new memory mapping API */
2075uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2076{
2077 PhysPageDesc *p;
2078
2079 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2080 if (!p)
2081 return IO_MEM_UNASSIGNED;
2082 return p->phys_offset;
2083}
2084
e9a1ab19
FB
2085/* XXX: better than nothing */
2086ram_addr_t qemu_ram_alloc(unsigned int size)
2087{
2088 ram_addr_t addr;
2089 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
5fafdf24 2090 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
e9a1ab19
FB
2091 size, phys_ram_size);
2092 abort();
2093 }
2094 addr = phys_ram_alloc_offset;
2095 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2096 return addr;
2097}
2098
2099void qemu_ram_free(ram_addr_t addr)
2100{
2101}
2102
a4193c8a 2103static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2104{
67d3b957 2105#ifdef DEBUG_UNASSIGNED
ab3d1727 2106 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2107#endif
2108#ifdef TARGET_SPARC
6c36d3fa 2109 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2110#elif TARGET_CRIS
2111 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2112#endif
33417e70
FB
2113 return 0;
2114}
2115
a4193c8a 2116static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2117{
67d3b957 2118#ifdef DEBUG_UNASSIGNED
ab3d1727 2119 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2120#endif
b4f0a316 2121#ifdef TARGET_SPARC
6c36d3fa 2122 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2123#elif TARGET_CRIS
2124 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2125#endif
33417e70
FB
2126}
2127
2128static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2129 unassigned_mem_readb,
2130 unassigned_mem_readb,
2131 unassigned_mem_readb,
2132};
2133
2134static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2135 unassigned_mem_writeb,
2136 unassigned_mem_writeb,
2137 unassigned_mem_writeb,
2138};
2139
3a7d929e 2140static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2141{
3a7d929e
FB
2142 unsigned long ram_addr;
2143 int dirty_flags;
2144 ram_addr = addr - (unsigned long)phys_ram_base;
2145 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2146 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2147#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2148 tb_invalidate_phys_page_fast(ram_addr, 1);
2149 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2150#endif
3a7d929e 2151 }
c27004ec 2152 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2153#ifdef USE_KQEMU
2154 if (cpu_single_env->kqemu_enabled &&
2155 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2156 kqemu_modify_page(cpu_single_env, ram_addr);
2157#endif
f23db169
FB
2158 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2159 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2160 /* we remove the notdirty callback only if the code has been
2161 flushed */
2162 if (dirty_flags == 0xff)
6a00d601 2163 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2164}
2165
3a7d929e 2166static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2167{
3a7d929e
FB
2168 unsigned long ram_addr;
2169 int dirty_flags;
2170 ram_addr = addr - (unsigned long)phys_ram_base;
2171 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2172 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2173#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2174 tb_invalidate_phys_page_fast(ram_addr, 2);
2175 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2176#endif
3a7d929e 2177 }
c27004ec 2178 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2179#ifdef USE_KQEMU
2180 if (cpu_single_env->kqemu_enabled &&
2181 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2182 kqemu_modify_page(cpu_single_env, ram_addr);
2183#endif
f23db169
FB
2184 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2185 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2186 /* we remove the notdirty callback only if the code has been
2187 flushed */
2188 if (dirty_flags == 0xff)
6a00d601 2189 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2190}
2191
3a7d929e 2192static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2193{
3a7d929e
FB
2194 unsigned long ram_addr;
2195 int dirty_flags;
2196 ram_addr = addr - (unsigned long)phys_ram_base;
2197 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2198 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2199#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2200 tb_invalidate_phys_page_fast(ram_addr, 4);
2201 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2202#endif
3a7d929e 2203 }
c27004ec 2204 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2205#ifdef USE_KQEMU
2206 if (cpu_single_env->kqemu_enabled &&
2207 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2208 kqemu_modify_page(cpu_single_env, ram_addr);
2209#endif
f23db169
FB
2210 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2211 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2212 /* we remove the notdirty callback only if the code has been
2213 flushed */
2214 if (dirty_flags == 0xff)
6a00d601 2215 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2216}
2217
3a7d929e 2218static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2219 NULL, /* never used */
2220 NULL, /* never used */
2221 NULL, /* never used */
2222};
2223
1ccde1cb
FB
2224static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2225 notdirty_mem_writeb,
2226 notdirty_mem_writew,
2227 notdirty_mem_writel,
2228};
2229
6658ffb8
PB
2230#if defined(CONFIG_SOFTMMU)
2231/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2232 so these check for a hit then pass through to the normal out-of-line
2233 phys routines. */
2234static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2235{
2236 return ldub_phys(addr);
2237}
2238
2239static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2240{
2241 return lduw_phys(addr);
2242}
2243
2244static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2245{
2246 return ldl_phys(addr);
2247}
2248
2249/* Generate a debug exception if a watchpoint has been hit.
2250 Returns the real physical address of the access. addr will be a host
d79acba4 2251 address in case of a RAM location. */
6658ffb8
PB
2252static target_ulong check_watchpoint(target_phys_addr_t addr)
2253{
2254 CPUState *env = cpu_single_env;
2255 target_ulong watch;
2256 target_ulong retaddr;
2257 int i;
2258
2259 retaddr = addr;
2260 for (i = 0; i < env->nb_watchpoints; i++) {
2261 watch = env->watchpoint[i].vaddr;
2262 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2263 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2264 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2265 cpu_single_env->watchpoint_hit = i + 1;
2266 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2267 break;
2268 }
2269 }
2270 }
2271 return retaddr;
2272}
2273
2274static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2275 uint32_t val)
2276{
2277 addr = check_watchpoint(addr);
2278 stb_phys(addr, val);
2279}
2280
2281static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2282 uint32_t val)
2283{
2284 addr = check_watchpoint(addr);
2285 stw_phys(addr, val);
2286}
2287
2288static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2289 uint32_t val)
2290{
2291 addr = check_watchpoint(addr);
2292 stl_phys(addr, val);
2293}
2294
2295static CPUReadMemoryFunc *watch_mem_read[3] = {
2296 watch_mem_readb,
2297 watch_mem_readw,
2298 watch_mem_readl,
2299};
2300
2301static CPUWriteMemoryFunc *watch_mem_write[3] = {
2302 watch_mem_writeb,
2303 watch_mem_writew,
2304 watch_mem_writel,
2305};
2306#endif
2307
db7b5426
BS
2308static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2309 unsigned int len)
2310{
db7b5426
BS
2311 uint32_t ret;
2312 unsigned int idx;
2313
2314 idx = SUBPAGE_IDX(addr - mmio->base);
2315#if defined(DEBUG_SUBPAGE)
2316 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2317 mmio, len, addr, idx);
2318#endif
3ee89922 2319 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2320
2321 return ret;
2322}
2323
2324static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2325 uint32_t value, unsigned int len)
2326{
db7b5426
BS
2327 unsigned int idx;
2328
2329 idx = SUBPAGE_IDX(addr - mmio->base);
2330#if defined(DEBUG_SUBPAGE)
2331 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2332 mmio, len, addr, idx, value);
2333#endif
3ee89922 2334 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2335}
2336
2337static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2338{
2339#if defined(DEBUG_SUBPAGE)
2340 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2341#endif
2342
2343 return subpage_readlen(opaque, addr, 0);
2344}
2345
2346static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2347 uint32_t value)
2348{
2349#if defined(DEBUG_SUBPAGE)
2350 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2351#endif
2352 subpage_writelen(opaque, addr, value, 0);
2353}
2354
2355static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2356{
2357#if defined(DEBUG_SUBPAGE)
2358 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2359#endif
2360
2361 return subpage_readlen(opaque, addr, 1);
2362}
2363
2364static void subpage_writew (void *opaque, target_phys_addr_t addr,
2365 uint32_t value)
2366{
2367#if defined(DEBUG_SUBPAGE)
2368 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2369#endif
2370 subpage_writelen(opaque, addr, value, 1);
2371}
2372
2373static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2374{
2375#if defined(DEBUG_SUBPAGE)
2376 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2377#endif
2378
2379 return subpage_readlen(opaque, addr, 2);
2380}
2381
2382static void subpage_writel (void *opaque,
2383 target_phys_addr_t addr, uint32_t value)
2384{
2385#if defined(DEBUG_SUBPAGE)
2386 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2387#endif
2388 subpage_writelen(opaque, addr, value, 2);
2389}
2390
2391static CPUReadMemoryFunc *subpage_read[] = {
2392 &subpage_readb,
2393 &subpage_readw,
2394 &subpage_readl,
2395};
2396
2397static CPUWriteMemoryFunc *subpage_write[] = {
2398 &subpage_writeb,
2399 &subpage_writew,
2400 &subpage_writel,
2401};
2402
2403static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2404 int memory)
2405{
2406 int idx, eidx;
4254fab8 2407 unsigned int i;
db7b5426
BS
2408
2409 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2410 return -1;
2411 idx = SUBPAGE_IDX(start);
2412 eidx = SUBPAGE_IDX(end);
2413#if defined(DEBUG_SUBPAGE)
2414 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2415 mmio, start, end, idx, eidx, memory);
2416#endif
2417 memory >>= IO_MEM_SHIFT;
2418 for (; idx <= eidx; idx++) {
4254fab8 2419 for (i = 0; i < 4; i++) {
3ee89922
BS
2420 if (io_mem_read[memory][i]) {
2421 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2422 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2423 }
2424 if (io_mem_write[memory][i]) {
2425 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2426 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2427 }
4254fab8 2428 }
db7b5426
BS
2429 }
2430
2431 return 0;
2432}
2433
2434static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2435 int orig_memory)
2436{
2437 subpage_t *mmio;
2438 int subpage_memory;
2439
2440 mmio = qemu_mallocz(sizeof(subpage_t));
2441 if (mmio != NULL) {
2442 mmio->base = base;
2443 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2444#if defined(DEBUG_SUBPAGE)
2445 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2446 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2447#endif
2448 *phys = subpage_memory | IO_MEM_SUBPAGE;
2449 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2450 }
2451
2452 return mmio;
2453}
2454
33417e70
FB
2455static void io_mem_init(void)
2456{
3a7d929e 2457 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2458 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2459 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2460 io_mem_nb = 5;
2461
6658ffb8
PB
2462#if defined(CONFIG_SOFTMMU)
2463 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2464 watch_mem_write, NULL);
2465#endif
1ccde1cb 2466 /* alloc dirty bits array */
0a962c02 2467 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2468 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2469}
2470
2471/* mem_read and mem_write are arrays of functions containing the
2472 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2473 2). Functions can be omitted with a NULL function pointer. The
2474 registered functions may be modified dynamically later.
2475 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2476 modified. If it is zero, a new io zone is allocated. The return
2477 value can be used with cpu_register_physical_memory(). (-1) is
2478 returned if error. */
33417e70
FB
2479int cpu_register_io_memory(int io_index,
2480 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2481 CPUWriteMemoryFunc **mem_write,
2482 void *opaque)
33417e70 2483{
4254fab8 2484 int i, subwidth = 0;
33417e70
FB
2485
2486 if (io_index <= 0) {
b5ff1b31 2487 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2488 return -1;
2489 io_index = io_mem_nb++;
2490 } else {
2491 if (io_index >= IO_MEM_NB_ENTRIES)
2492 return -1;
2493 }
b5ff1b31 2494
33417e70 2495 for(i = 0;i < 3; i++) {
4254fab8
BS
2496 if (!mem_read[i] || !mem_write[i])
2497 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2498 io_mem_read[io_index][i] = mem_read[i];
2499 io_mem_write[io_index][i] = mem_write[i];
2500 }
a4193c8a 2501 io_mem_opaque[io_index] = opaque;
4254fab8 2502 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2503}
61382a50 2504
8926b517
FB
2505CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2506{
2507 return io_mem_write[io_index >> IO_MEM_SHIFT];
2508}
2509
2510CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2511{
2512 return io_mem_read[io_index >> IO_MEM_SHIFT];
2513}
2514
13eb76e0
FB
2515/* physical memory access (slow version, mainly for debug) */
2516#if defined(CONFIG_USER_ONLY)
5fafdf24 2517void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2518 int len, int is_write)
2519{
2520 int l, flags;
2521 target_ulong page;
53a5960a 2522 void * p;
13eb76e0
FB
2523
2524 while (len > 0) {
2525 page = addr & TARGET_PAGE_MASK;
2526 l = (page + TARGET_PAGE_SIZE) - addr;
2527 if (l > len)
2528 l = len;
2529 flags = page_get_flags(page);
2530 if (!(flags & PAGE_VALID))
2531 return;
2532 if (is_write) {
2533 if (!(flags & PAGE_WRITE))
2534 return;
579a97f7
FB
2535 /* XXX: this code should not depend on lock_user */
2536 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2537 /* FIXME - should this return an error rather than just fail? */
2538 return;
53a5960a
PB
2539 memcpy(p, buf, len);
2540 unlock_user(p, addr, len);
13eb76e0
FB
2541 } else {
2542 if (!(flags & PAGE_READ))
2543 return;
579a97f7
FB
2544 /* XXX: this code should not depend on lock_user */
2545 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2546 /* FIXME - should this return an error rather than just fail? */
2547 return;
53a5960a
PB
2548 memcpy(buf, p, len);
2549 unlock_user(p, addr, 0);
13eb76e0
FB
2550 }
2551 len -= l;
2552 buf += l;
2553 addr += l;
2554 }
2555}
8df1cd07 2556
13eb76e0 2557#else
5fafdf24 2558void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2559 int len, int is_write)
2560{
2561 int l, io_index;
2562 uint8_t *ptr;
2563 uint32_t val;
2e12669a
FB
2564 target_phys_addr_t page;
2565 unsigned long pd;
92e873b9 2566 PhysPageDesc *p;
3b46e624 2567
13eb76e0
FB
2568 while (len > 0) {
2569 page = addr & TARGET_PAGE_MASK;
2570 l = (page + TARGET_PAGE_SIZE) - addr;
2571 if (l > len)
2572 l = len;
92e873b9 2573 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2574 if (!p) {
2575 pd = IO_MEM_UNASSIGNED;
2576 } else {
2577 pd = p->phys_offset;
2578 }
3b46e624 2579
13eb76e0 2580 if (is_write) {
3a7d929e 2581 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2582 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2583 /* XXX: could force cpu_single_env to NULL to avoid
2584 potential bugs */
13eb76e0 2585 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2586 /* 32 bit write access */
c27004ec 2587 val = ldl_p(buf);
a4193c8a 2588 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2589 l = 4;
2590 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2591 /* 16 bit write access */
c27004ec 2592 val = lduw_p(buf);
a4193c8a 2593 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2594 l = 2;
2595 } else {
1c213d19 2596 /* 8 bit write access */
c27004ec 2597 val = ldub_p(buf);
a4193c8a 2598 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2599 l = 1;
2600 }
2601 } else {
b448f2f3
FB
2602 unsigned long addr1;
2603 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2604 /* RAM case */
b448f2f3 2605 ptr = phys_ram_base + addr1;
13eb76e0 2606 memcpy(ptr, buf, l);
3a7d929e
FB
2607 if (!cpu_physical_memory_is_dirty(addr1)) {
2608 /* invalidate code */
2609 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2610 /* set dirty bit */
5fafdf24 2611 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2612 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2613 }
13eb76e0
FB
2614 }
2615 } else {
5fafdf24 2616 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2617 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2618 /* I/O case */
2619 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2620 if (l >= 4 && ((addr & 3) == 0)) {
2621 /* 32 bit read access */
a4193c8a 2622 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2623 stl_p(buf, val);
13eb76e0
FB
2624 l = 4;
2625 } else if (l >= 2 && ((addr & 1) == 0)) {
2626 /* 16 bit read access */
a4193c8a 2627 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2628 stw_p(buf, val);
13eb76e0
FB
2629 l = 2;
2630 } else {
1c213d19 2631 /* 8 bit read access */
a4193c8a 2632 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2633 stb_p(buf, val);
13eb76e0
FB
2634 l = 1;
2635 }
2636 } else {
2637 /* RAM case */
5fafdf24 2638 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2639 (addr & ~TARGET_PAGE_MASK);
2640 memcpy(buf, ptr, l);
2641 }
2642 }
2643 len -= l;
2644 buf += l;
2645 addr += l;
2646 }
2647}
8df1cd07 2648
d0ecd2aa 2649/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2650void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2651 const uint8_t *buf, int len)
2652{
2653 int l;
2654 uint8_t *ptr;
2655 target_phys_addr_t page;
2656 unsigned long pd;
2657 PhysPageDesc *p;
3b46e624 2658
d0ecd2aa
FB
2659 while (len > 0) {
2660 page = addr & TARGET_PAGE_MASK;
2661 l = (page + TARGET_PAGE_SIZE) - addr;
2662 if (l > len)
2663 l = len;
2664 p = phys_page_find(page >> TARGET_PAGE_BITS);
2665 if (!p) {
2666 pd = IO_MEM_UNASSIGNED;
2667 } else {
2668 pd = p->phys_offset;
2669 }
3b46e624 2670
d0ecd2aa 2671 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2672 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2673 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2674 /* do nothing */
2675 } else {
2676 unsigned long addr1;
2677 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2678 /* ROM/RAM case */
2679 ptr = phys_ram_base + addr1;
2680 memcpy(ptr, buf, l);
2681 }
2682 len -= l;
2683 buf += l;
2684 addr += l;
2685 }
2686}
2687
2688
8df1cd07
FB
2689/* warning: addr must be aligned */
2690uint32_t ldl_phys(target_phys_addr_t addr)
2691{
2692 int io_index;
2693 uint8_t *ptr;
2694 uint32_t val;
2695 unsigned long pd;
2696 PhysPageDesc *p;
2697
2698 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2699 if (!p) {
2700 pd = IO_MEM_UNASSIGNED;
2701 } else {
2702 pd = p->phys_offset;
2703 }
3b46e624 2704
5fafdf24 2705 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2706 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2707 /* I/O case */
2708 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2709 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2710 } else {
2711 /* RAM case */
5fafdf24 2712 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2713 (addr & ~TARGET_PAGE_MASK);
2714 val = ldl_p(ptr);
2715 }
2716 return val;
2717}
2718
84b7b8e7
FB
2719/* warning: addr must be aligned */
2720uint64_t ldq_phys(target_phys_addr_t addr)
2721{
2722 int io_index;
2723 uint8_t *ptr;
2724 uint64_t val;
2725 unsigned long pd;
2726 PhysPageDesc *p;
2727
2728 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2729 if (!p) {
2730 pd = IO_MEM_UNASSIGNED;
2731 } else {
2732 pd = p->phys_offset;
2733 }
3b46e624 2734
2a4188a3
FB
2735 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2736 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2737 /* I/O case */
2738 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2739#ifdef TARGET_WORDS_BIGENDIAN
2740 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2741 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2742#else
2743 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2744 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2745#endif
2746 } else {
2747 /* RAM case */
5fafdf24 2748 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2749 (addr & ~TARGET_PAGE_MASK);
2750 val = ldq_p(ptr);
2751 }
2752 return val;
2753}
2754
aab33094
FB
2755/* XXX: optimize */
2756uint32_t ldub_phys(target_phys_addr_t addr)
2757{
2758 uint8_t val;
2759 cpu_physical_memory_read(addr, &val, 1);
2760 return val;
2761}
2762
2763/* XXX: optimize */
2764uint32_t lduw_phys(target_phys_addr_t addr)
2765{
2766 uint16_t val;
2767 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2768 return tswap16(val);
2769}
2770
8df1cd07
FB
2771/* warning: addr must be aligned. The ram page is not masked as dirty
2772 and the code inside is not invalidated. It is useful if the dirty
2773 bits are used to track modified PTEs */
2774void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2775{
2776 int io_index;
2777 uint8_t *ptr;
2778 unsigned long pd;
2779 PhysPageDesc *p;
2780
2781 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2782 if (!p) {
2783 pd = IO_MEM_UNASSIGNED;
2784 } else {
2785 pd = p->phys_offset;
2786 }
3b46e624 2787
3a7d929e 2788 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2789 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2790 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2791 } else {
5fafdf24 2792 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2793 (addr & ~TARGET_PAGE_MASK);
2794 stl_p(ptr, val);
2795 }
2796}
2797
bc98a7ef
JM
2798void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2799{
2800 int io_index;
2801 uint8_t *ptr;
2802 unsigned long pd;
2803 PhysPageDesc *p;
2804
2805 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2806 if (!p) {
2807 pd = IO_MEM_UNASSIGNED;
2808 } else {
2809 pd = p->phys_offset;
2810 }
3b46e624 2811
bc98a7ef
JM
2812 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2813 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2814#ifdef TARGET_WORDS_BIGENDIAN
2815 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2816 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2817#else
2818 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2819 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2820#endif
2821 } else {
5fafdf24 2822 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2823 (addr & ~TARGET_PAGE_MASK);
2824 stq_p(ptr, val);
2825 }
2826}
2827
8df1cd07 2828/* warning: addr must be aligned */
8df1cd07
FB
2829void stl_phys(target_phys_addr_t addr, uint32_t val)
2830{
2831 int io_index;
2832 uint8_t *ptr;
2833 unsigned long pd;
2834 PhysPageDesc *p;
2835
2836 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2837 if (!p) {
2838 pd = IO_MEM_UNASSIGNED;
2839 } else {
2840 pd = p->phys_offset;
2841 }
3b46e624 2842
3a7d929e 2843 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2844 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2845 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2846 } else {
2847 unsigned long addr1;
2848 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2849 /* RAM case */
2850 ptr = phys_ram_base + addr1;
2851 stl_p(ptr, val);
3a7d929e
FB
2852 if (!cpu_physical_memory_is_dirty(addr1)) {
2853 /* invalidate code */
2854 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2855 /* set dirty bit */
f23db169
FB
2856 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2857 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2858 }
8df1cd07
FB
2859 }
2860}
2861
aab33094
FB
2862/* XXX: optimize */
2863void stb_phys(target_phys_addr_t addr, uint32_t val)
2864{
2865 uint8_t v = val;
2866 cpu_physical_memory_write(addr, &v, 1);
2867}
2868
2869/* XXX: optimize */
2870void stw_phys(target_phys_addr_t addr, uint32_t val)
2871{
2872 uint16_t v = tswap16(val);
2873 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2874}
2875
2876/* XXX: optimize */
2877void stq_phys(target_phys_addr_t addr, uint64_t val)
2878{
2879 val = tswap64(val);
2880 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2881}
2882
13eb76e0
FB
2883#endif
2884
2885/* virtual memory access for debug */
5fafdf24 2886int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 2887 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2888{
2889 int l;
9b3c35e0
JM
2890 target_phys_addr_t phys_addr;
2891 target_ulong page;
13eb76e0
FB
2892
2893 while (len > 0) {
2894 page = addr & TARGET_PAGE_MASK;
2895 phys_addr = cpu_get_phys_page_debug(env, page);
2896 /* if no physical page mapped, return an error */
2897 if (phys_addr == -1)
2898 return -1;
2899 l = (page + TARGET_PAGE_SIZE) - addr;
2900 if (l > len)
2901 l = len;
5fafdf24 2902 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 2903 buf, l, is_write);
13eb76e0
FB
2904 len -= l;
2905 buf += l;
2906 addr += l;
2907 }
2908 return 0;
2909}
2910
e3db7226
FB
2911void dump_exec_info(FILE *f,
2912 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2913{
2914 int i, target_code_size, max_target_code_size;
2915 int direct_jmp_count, direct_jmp2_count, cross_page;
2916 TranslationBlock *tb;
3b46e624 2917
e3db7226
FB
2918 target_code_size = 0;
2919 max_target_code_size = 0;
2920 cross_page = 0;
2921 direct_jmp_count = 0;
2922 direct_jmp2_count = 0;
2923 for(i = 0; i < nb_tbs; i++) {
2924 tb = &tbs[i];
2925 target_code_size += tb->size;
2926 if (tb->size > max_target_code_size)
2927 max_target_code_size = tb->size;
2928 if (tb->page_addr[1] != -1)
2929 cross_page++;
2930 if (tb->tb_next_offset[0] != 0xffff) {
2931 direct_jmp_count++;
2932 if (tb->tb_next_offset[1] != 0xffff) {
2933 direct_jmp2_count++;
2934 }
2935 }
2936 }
2937 /* XXX: avoid using doubles ? */
2938 cpu_fprintf(f, "TB count %d\n", nb_tbs);
5fafdf24 2939 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
2940 nb_tbs ? target_code_size / nb_tbs : 0,
2941 max_target_code_size);
5fafdf24 2942 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
2943 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2944 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
2945 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2946 cross_page,
e3db7226
FB
2947 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2948 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 2949 direct_jmp_count,
e3db7226
FB
2950 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2951 direct_jmp2_count,
2952 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2953 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2954 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2955 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2956}
2957
5fafdf24 2958#if !defined(CONFIG_USER_ONLY)
61382a50
FB
2959
2960#define MMUSUFFIX _cmmu
2961#define GETPC() NULL
2962#define env cpu_single_env
b769d8fe 2963#define SOFTMMU_CODE_ACCESS
61382a50
FB
2964
2965#define SHIFT 0
2966#include "softmmu_template.h"
2967
2968#define SHIFT 1
2969#include "softmmu_template.h"
2970
2971#define SHIFT 2
2972#include "softmmu_template.h"
2973
2974#define SHIFT 3
2975#include "softmmu_template.h"
2976
2977#undef env
2978
2979#endif