]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Use memory globals for G registers
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
40#endif
54936004 41
fd6ce8f6 42//#define DEBUG_TB_INVALIDATE
66e85a21 43//#define DEBUG_FLUSH
9fa3e853 44//#define DEBUG_TLB
67d3b957 45//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
46
47/* make various TB consistency checks */
5fafdf24
TS
48//#define DEBUG_TB_CHECK
49//#define DEBUG_TLB_CHECK
fd6ce8f6 50
1196be37 51//#define DEBUG_IOPORT
db7b5426 52//#define DEBUG_SUBPAGE
1196be37 53
99773bd4
PB
54#if !defined(CONFIG_USER_ONLY)
55/* TB consistency checks only implemented for usermode emulation. */
56#undef DEBUG_TB_CHECK
57#endif
58
fd6ce8f6 59/* threshold to flush the translated code buffer */
d07bde88 60#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
fd6ce8f6 61
9fa3e853
FB
62#define SMC_BITMAP_USE_THRESHOLD 10
63
64#define MMAP_AREA_START 0x00000000
65#define MMAP_AREA_END 0xa8000000
fd6ce8f6 66
108c49b8
FB
67#if defined(TARGET_SPARC64)
68#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
69#elif defined(TARGET_SPARC)
70#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
71#elif defined(TARGET_ALPHA)
72#define TARGET_PHYS_ADDR_SPACE_BITS 42
73#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
74#elif defined(TARGET_PPC64)
75#define TARGET_PHYS_ADDR_SPACE_BITS 42
76#else
77/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
78#define TARGET_PHYS_ADDR_SPACE_BITS 32
79#endif
80
fd6ce8f6 81TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 82TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 83int nb_tbs;
eb51d102
FB
84/* any access to the tbs or the page table must use this lock */
85spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 86
b8076a74 87uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
88uint8_t *code_gen_ptr;
89
9fa3e853
FB
90int phys_ram_size;
91int phys_ram_fd;
92uint8_t *phys_ram_base;
1ccde1cb 93uint8_t *phys_ram_dirty;
e9a1ab19 94static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 95
6a00d601
FB
96CPUState *first_cpu;
97/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
5fafdf24 99CPUState *cpu_single_env;
6a00d601 100
54936004 101typedef struct PageDesc {
92e873b9 102 /* list of TBs intersecting this ram page */
fd6ce8f6 103 TranslationBlock *first_tb;
9fa3e853
FB
104 /* in order to optimize self modifying code, we count the number
105 of lookups we do to a given page to use a bitmap */
106 unsigned int code_write_count;
107 uint8_t *code_bitmap;
108#if defined(CONFIG_USER_ONLY)
109 unsigned long flags;
110#endif
54936004
FB
111} PageDesc;
112
92e873b9
FB
113typedef struct PhysPageDesc {
114 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 115 uint32_t phys_offset;
92e873b9
FB
116} PhysPageDesc;
117
54936004 118#define L2_BITS 10
bedb69ea
JM
119#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
120/* XXX: this is a temporary hack for alpha target.
121 * In the future, this is to be replaced by a multi-level table
122 * to actually be able to handle the complete 64 bits address space.
123 */
124#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
125#else
54936004 126#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 127#endif
54936004
FB
128
129#define L1_SIZE (1 << L1_BITS)
130#define L2_SIZE (1 << L2_BITS)
131
33417e70 132static void io_mem_init(void);
fd6ce8f6 133
83fb7adf
FB
134unsigned long qemu_real_host_page_size;
135unsigned long qemu_host_page_bits;
136unsigned long qemu_host_page_size;
137unsigned long qemu_host_page_mask;
54936004 138
92e873b9 139/* XXX: for system emulation, it could just be an array */
54936004 140static PageDesc *l1_map[L1_SIZE];
0a962c02 141PhysPageDesc **l1_phys_map;
54936004 142
33417e70 143/* io memory support */
33417e70
FB
144CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
145CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 146void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 147static int io_mem_nb;
6658ffb8
PB
148#if defined(CONFIG_SOFTMMU)
149static int io_mem_watch;
150#endif
33417e70 151
34865134
FB
152/* log support */
153char *logfilename = "/tmp/qemu.log";
154FILE *logfile;
155int loglevel;
e735b91c 156static int log_append = 0;
34865134 157
e3db7226
FB
158/* statistics */
159static int tlb_flush_count;
160static int tb_flush_count;
161static int tb_phys_invalidate_count;
162
db7b5426
BS
163#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
164typedef struct subpage_t {
165 target_phys_addr_t base;
3ee89922
BS
166 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
167 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
168 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
169} subpage_t;
170
b346ff46 171static void page_init(void)
54936004 172{
83fb7adf 173 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 174 TARGET_PAGE_SIZE */
67b915a5 175#ifdef _WIN32
d5a8f07c
FB
176 {
177 SYSTEM_INFO system_info;
178 DWORD old_protect;
3b46e624 179
d5a8f07c
FB
180 GetSystemInfo(&system_info);
181 qemu_real_host_page_size = system_info.dwPageSize;
3b46e624 182
d5a8f07c
FB
183 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
184 PAGE_EXECUTE_READWRITE, &old_protect);
185 }
67b915a5 186#else
83fb7adf 187 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
188 {
189 unsigned long start, end;
190
191 start = (unsigned long)code_gen_buffer;
192 start &= ~(qemu_real_host_page_size - 1);
3b46e624 193
d5a8f07c
FB
194 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
195 end += qemu_real_host_page_size - 1;
196 end &= ~(qemu_real_host_page_size - 1);
3b46e624 197
5fafdf24 198 mprotect((void *)start, end - start,
d5a8f07c
FB
199 PROT_READ | PROT_WRITE | PROT_EXEC);
200 }
67b915a5 201#endif
d5a8f07c 202
83fb7adf
FB
203 if (qemu_host_page_size == 0)
204 qemu_host_page_size = qemu_real_host_page_size;
205 if (qemu_host_page_size < TARGET_PAGE_SIZE)
206 qemu_host_page_size = TARGET_PAGE_SIZE;
207 qemu_host_page_bits = 0;
208 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
209 qemu_host_page_bits++;
210 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
211 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
212 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
213
214#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
215 {
216 long long startaddr, endaddr;
217 FILE *f;
218 int n;
219
220 f = fopen("/proc/self/maps", "r");
221 if (f) {
222 do {
223 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
224 if (n == 2) {
225 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
226 TARGET_PAGE_ALIGN(endaddr),
227 PAGE_RESERVED);
228 }
229 } while (!feof(f));
230 fclose(f);
231 }
232 }
233#endif
54936004
FB
234}
235
fd6ce8f6 236static inline PageDesc *page_find_alloc(unsigned int index)
54936004 237{
54936004
FB
238 PageDesc **lp, *p;
239
54936004
FB
240 lp = &l1_map[index >> L2_BITS];
241 p = *lp;
242 if (!p) {
243 /* allocate if not found */
59817ccb 244 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 245 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
246 *lp = p;
247 }
248 return p + (index & (L2_SIZE - 1));
249}
250
fd6ce8f6 251static inline PageDesc *page_find(unsigned int index)
54936004 252{
54936004
FB
253 PageDesc *p;
254
54936004
FB
255 p = l1_map[index >> L2_BITS];
256 if (!p)
257 return 0;
fd6ce8f6
FB
258 return p + (index & (L2_SIZE - 1));
259}
260
108c49b8 261static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 262{
108c49b8 263 void **lp, **p;
e3f4e2a4 264 PhysPageDesc *pd;
92e873b9 265
108c49b8
FB
266 p = (void **)l1_phys_map;
267#if TARGET_PHYS_ADDR_SPACE_BITS > 32
268
269#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
270#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
271#endif
272 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
273 p = *lp;
274 if (!p) {
275 /* allocate if not found */
108c49b8
FB
276 if (!alloc)
277 return NULL;
278 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
279 memset(p, 0, sizeof(void *) * L1_SIZE);
280 *lp = p;
281 }
282#endif
283 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
284 pd = *lp;
285 if (!pd) {
286 int i;
108c49b8
FB
287 /* allocate if not found */
288 if (!alloc)
289 return NULL;
e3f4e2a4
PB
290 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
291 *lp = pd;
292 for (i = 0; i < L2_SIZE; i++)
293 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 294 }
e3f4e2a4 295 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
296}
297
108c49b8 298static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 299{
108c49b8 300 return phys_page_find_alloc(index, 0);
92e873b9
FB
301}
302
9fa3e853 303#if !defined(CONFIG_USER_ONLY)
6a00d601 304static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 305static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 306 target_ulong vaddr);
9fa3e853 307#endif
fd6ce8f6 308
6a00d601 309void cpu_exec_init(CPUState *env)
fd6ce8f6 310{
6a00d601
FB
311 CPUState **penv;
312 int cpu_index;
313
fd6ce8f6 314 if (!code_gen_ptr) {
57fec1fe 315 cpu_gen_init();
fd6ce8f6 316 code_gen_ptr = code_gen_buffer;
b346ff46 317 page_init();
33417e70 318 io_mem_init();
fd6ce8f6 319 }
6a00d601
FB
320 env->next_cpu = NULL;
321 penv = &first_cpu;
322 cpu_index = 0;
323 while (*penv != NULL) {
324 penv = (CPUState **)&(*penv)->next_cpu;
325 cpu_index++;
326 }
327 env->cpu_index = cpu_index;
6658ffb8 328 env->nb_watchpoints = 0;
6a00d601 329 *penv = env;
fd6ce8f6
FB
330}
331
9fa3e853
FB
332static inline void invalidate_page_bitmap(PageDesc *p)
333{
334 if (p->code_bitmap) {
59817ccb 335 qemu_free(p->code_bitmap);
9fa3e853
FB
336 p->code_bitmap = NULL;
337 }
338 p->code_write_count = 0;
339}
340
fd6ce8f6
FB
341/* set to NULL all the 'first_tb' fields in all PageDescs */
342static void page_flush_tb(void)
343{
344 int i, j;
345 PageDesc *p;
346
347 for(i = 0; i < L1_SIZE; i++) {
348 p = l1_map[i];
349 if (p) {
9fa3e853
FB
350 for(j = 0; j < L2_SIZE; j++) {
351 p->first_tb = NULL;
352 invalidate_page_bitmap(p);
353 p++;
354 }
fd6ce8f6
FB
355 }
356 }
357}
358
359/* flush all the translation blocks */
d4e8164f 360/* XXX: tb_flush is currently not thread safe */
6a00d601 361void tb_flush(CPUState *env1)
fd6ce8f6 362{
6a00d601 363 CPUState *env;
0124311e 364#if defined(DEBUG_FLUSH)
ab3d1727
BS
365 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
366 (unsigned long)(code_gen_ptr - code_gen_buffer),
367 nb_tbs, nb_tbs > 0 ?
368 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6
FB
369#endif
370 nb_tbs = 0;
3b46e624 371
6a00d601
FB
372 for(env = first_cpu; env != NULL; env = env->next_cpu) {
373 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
374 }
9fa3e853 375
8a8a608f 376 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 377 page_flush_tb();
9fa3e853 378
fd6ce8f6 379 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
380 /* XXX: flush processor icache at this point if cache flush is
381 expensive */
e3db7226 382 tb_flush_count++;
fd6ce8f6
FB
383}
384
385#ifdef DEBUG_TB_CHECK
386
bc98a7ef 387static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
388{
389 TranslationBlock *tb;
390 int i;
391 address &= TARGET_PAGE_MASK;
99773bd4
PB
392 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
393 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
394 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
395 address >= tb->pc + tb->size)) {
396 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 397 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
398 }
399 }
400 }
401}
402
403/* verify that all the pages have correct rights for code */
404static void tb_page_check(void)
405{
406 TranslationBlock *tb;
407 int i, flags1, flags2;
3b46e624 408
99773bd4
PB
409 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
410 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
411 flags1 = page_get_flags(tb->pc);
412 flags2 = page_get_flags(tb->pc + tb->size - 1);
413 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
414 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 415 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
416 }
417 }
418 }
419}
420
d4e8164f
FB
421void tb_jmp_check(TranslationBlock *tb)
422{
423 TranslationBlock *tb1;
424 unsigned int n1;
425
426 /* suppress any remaining jumps to this TB */
427 tb1 = tb->jmp_first;
428 for(;;) {
429 n1 = (long)tb1 & 3;
430 tb1 = (TranslationBlock *)((long)tb1 & ~3);
431 if (n1 == 2)
432 break;
433 tb1 = tb1->jmp_next[n1];
434 }
435 /* check end of list */
436 if (tb1 != tb) {
437 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
438 }
439}
440
fd6ce8f6
FB
441#endif
442
443/* invalidate one TB */
444static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
445 int next_offset)
446{
447 TranslationBlock *tb1;
448 for(;;) {
449 tb1 = *ptb;
450 if (tb1 == tb) {
451 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
452 break;
453 }
454 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
455 }
456}
457
9fa3e853
FB
458static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
459{
460 TranslationBlock *tb1;
461 unsigned int n1;
462
463 for(;;) {
464 tb1 = *ptb;
465 n1 = (long)tb1 & 3;
466 tb1 = (TranslationBlock *)((long)tb1 & ~3);
467 if (tb1 == tb) {
468 *ptb = tb1->page_next[n1];
469 break;
470 }
471 ptb = &tb1->page_next[n1];
472 }
473}
474
d4e8164f
FB
475static inline void tb_jmp_remove(TranslationBlock *tb, int n)
476{
477 TranslationBlock *tb1, **ptb;
478 unsigned int n1;
479
480 ptb = &tb->jmp_next[n];
481 tb1 = *ptb;
482 if (tb1) {
483 /* find tb(n) in circular list */
484 for(;;) {
485 tb1 = *ptb;
486 n1 = (long)tb1 & 3;
487 tb1 = (TranslationBlock *)((long)tb1 & ~3);
488 if (n1 == n && tb1 == tb)
489 break;
490 if (n1 == 2) {
491 ptb = &tb1->jmp_first;
492 } else {
493 ptb = &tb1->jmp_next[n1];
494 }
495 }
496 /* now we can suppress tb(n) from the list */
497 *ptb = tb->jmp_next[n];
498
499 tb->jmp_next[n] = NULL;
500 }
501}
502
503/* reset the jump entry 'n' of a TB so that it is not chained to
504 another TB */
505static inline void tb_reset_jump(TranslationBlock *tb, int n)
506{
507 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
508}
509
8a40a180 510static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 511{
6a00d601 512 CPUState *env;
8a40a180 513 PageDesc *p;
d4e8164f 514 unsigned int h, n1;
8a40a180
FB
515 target_ulong phys_pc;
516 TranslationBlock *tb1, *tb2;
3b46e624 517
8a40a180
FB
518 /* remove the TB from the hash list */
519 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
520 h = tb_phys_hash_func(phys_pc);
5fafdf24 521 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
522 offsetof(TranslationBlock, phys_hash_next));
523
524 /* remove the TB from the page list */
525 if (tb->page_addr[0] != page_addr) {
526 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
527 tb_page_remove(&p->first_tb, tb);
528 invalidate_page_bitmap(p);
529 }
530 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
531 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
532 tb_page_remove(&p->first_tb, tb);
533 invalidate_page_bitmap(p);
534 }
535
36bdbe54 536 tb_invalidated_flag = 1;
59817ccb 537
fd6ce8f6 538 /* remove the TB from the hash list */
8a40a180 539 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
540 for(env = first_cpu; env != NULL; env = env->next_cpu) {
541 if (env->tb_jmp_cache[h] == tb)
542 env->tb_jmp_cache[h] = NULL;
543 }
d4e8164f
FB
544
545 /* suppress this TB from the two jump lists */
546 tb_jmp_remove(tb, 0);
547 tb_jmp_remove(tb, 1);
548
549 /* suppress any remaining jumps to this TB */
550 tb1 = tb->jmp_first;
551 for(;;) {
552 n1 = (long)tb1 & 3;
553 if (n1 == 2)
554 break;
555 tb1 = (TranslationBlock *)((long)tb1 & ~3);
556 tb2 = tb1->jmp_next[n1];
557 tb_reset_jump(tb1, n1);
558 tb1->jmp_next[n1] = NULL;
559 tb1 = tb2;
560 }
561 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 562
e3db7226 563 tb_phys_invalidate_count++;
9fa3e853
FB
564}
565
566static inline void set_bits(uint8_t *tab, int start, int len)
567{
568 int end, mask, end1;
569
570 end = start + len;
571 tab += start >> 3;
572 mask = 0xff << (start & 7);
573 if ((start & ~7) == (end & ~7)) {
574 if (start < end) {
575 mask &= ~(0xff << (end & 7));
576 *tab |= mask;
577 }
578 } else {
579 *tab++ |= mask;
580 start = (start + 8) & ~7;
581 end1 = end & ~7;
582 while (start < end1) {
583 *tab++ = 0xff;
584 start += 8;
585 }
586 if (start < end) {
587 mask = ~(0xff << (end & 7));
588 *tab |= mask;
589 }
590 }
591}
592
593static void build_page_bitmap(PageDesc *p)
594{
595 int n, tb_start, tb_end;
596 TranslationBlock *tb;
3b46e624 597
59817ccb 598 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
599 if (!p->code_bitmap)
600 return;
601 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
602
603 tb = p->first_tb;
604 while (tb != NULL) {
605 n = (long)tb & 3;
606 tb = (TranslationBlock *)((long)tb & ~3);
607 /* NOTE: this is subtle as a TB may span two physical pages */
608 if (n == 0) {
609 /* NOTE: tb_end may be after the end of the page, but
610 it is not a problem */
611 tb_start = tb->pc & ~TARGET_PAGE_MASK;
612 tb_end = tb_start + tb->size;
613 if (tb_end > TARGET_PAGE_SIZE)
614 tb_end = TARGET_PAGE_SIZE;
615 } else {
616 tb_start = 0;
617 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
618 }
619 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
620 tb = tb->page_next[n];
621 }
622}
623
d720b93d
FB
624#ifdef TARGET_HAS_PRECISE_SMC
625
5fafdf24 626static void tb_gen_code(CPUState *env,
d720b93d
FB
627 target_ulong pc, target_ulong cs_base, int flags,
628 int cflags)
629{
630 TranslationBlock *tb;
631 uint8_t *tc_ptr;
632 target_ulong phys_pc, phys_page2, virt_page2;
633 int code_gen_size;
634
c27004ec
FB
635 phys_pc = get_phys_addr_code(env, pc);
636 tb = tb_alloc(pc);
d720b93d
FB
637 if (!tb) {
638 /* flush must be done */
639 tb_flush(env);
640 /* cannot fail at this point */
c27004ec 641 tb = tb_alloc(pc);
d720b93d
FB
642 }
643 tc_ptr = code_gen_ptr;
644 tb->tc_ptr = tc_ptr;
645 tb->cs_base = cs_base;
646 tb->flags = flags;
647 tb->cflags = cflags;
d07bde88 648 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 649 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 650
d720b93d 651 /* check next page if needed */
c27004ec 652 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 653 phys_page2 = -1;
c27004ec 654 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
655 phys_page2 = get_phys_addr_code(env, virt_page2);
656 }
657 tb_link_phys(tb, phys_pc, phys_page2);
658}
659#endif
3b46e624 660
9fa3e853
FB
661/* invalidate all TBs which intersect with the target physical page
662 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
663 the same physical page. 'is_cpu_write_access' should be true if called
664 from a real cpu write access: the virtual CPU will exit the current
665 TB if code is modified inside this TB. */
5fafdf24 666void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
d720b93d
FB
667 int is_cpu_write_access)
668{
669 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 670 CPUState *env = cpu_single_env;
9fa3e853 671 PageDesc *p;
ea1c1802 672 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 673 target_ulong tb_start, tb_end;
d720b93d 674 target_ulong current_pc, current_cs_base;
9fa3e853
FB
675
676 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 677 if (!p)
9fa3e853 678 return;
5fafdf24 679 if (!p->code_bitmap &&
d720b93d
FB
680 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
681 is_cpu_write_access) {
9fa3e853
FB
682 /* build code bitmap */
683 build_page_bitmap(p);
684 }
685
686 /* we remove all the TBs in the range [start, end[ */
687 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
688 current_tb_not_found = is_cpu_write_access;
689 current_tb_modified = 0;
690 current_tb = NULL; /* avoid warning */
691 current_pc = 0; /* avoid warning */
692 current_cs_base = 0; /* avoid warning */
693 current_flags = 0; /* avoid warning */
9fa3e853
FB
694 tb = p->first_tb;
695 while (tb != NULL) {
696 n = (long)tb & 3;
697 tb = (TranslationBlock *)((long)tb & ~3);
698 tb_next = tb->page_next[n];
699 /* NOTE: this is subtle as a TB may span two physical pages */
700 if (n == 0) {
701 /* NOTE: tb_end may be after the end of the page, but
702 it is not a problem */
703 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
704 tb_end = tb_start + tb->size;
705 } else {
706 tb_start = tb->page_addr[1];
707 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
708 }
709 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
710#ifdef TARGET_HAS_PRECISE_SMC
711 if (current_tb_not_found) {
712 current_tb_not_found = 0;
713 current_tb = NULL;
714 if (env->mem_write_pc) {
715 /* now we have a real cpu fault */
716 current_tb = tb_find_pc(env->mem_write_pc);
717 }
718 }
719 if (current_tb == tb &&
720 !(current_tb->cflags & CF_SINGLE_INSN)) {
721 /* If we are modifying the current TB, we must stop
722 its execution. We could be more precise by checking
723 that the modification is after the current PC, but it
724 would require a specialized function to partially
725 restore the CPU state */
3b46e624 726
d720b93d 727 current_tb_modified = 1;
5fafdf24 728 cpu_restore_state(current_tb, env,
d720b93d
FB
729 env->mem_write_pc, NULL);
730#if defined(TARGET_I386)
731 current_flags = env->hflags;
732 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
733 current_cs_base = (target_ulong)env->segs[R_CS].base;
734 current_pc = current_cs_base + env->eip;
735#else
736#error unsupported CPU
737#endif
738 }
739#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
740 /* we need to do that to handle the case where a signal
741 occurs while doing tb_phys_invalidate() */
742 saved_tb = NULL;
743 if (env) {
744 saved_tb = env->current_tb;
745 env->current_tb = NULL;
746 }
9fa3e853 747 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
748 if (env) {
749 env->current_tb = saved_tb;
750 if (env->interrupt_request && env->current_tb)
751 cpu_interrupt(env, env->interrupt_request);
752 }
9fa3e853
FB
753 }
754 tb = tb_next;
755 }
756#if !defined(CONFIG_USER_ONLY)
757 /* if no code remaining, no need to continue to use slow writes */
758 if (!p->first_tb) {
759 invalidate_page_bitmap(p);
d720b93d
FB
760 if (is_cpu_write_access) {
761 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
762 }
763 }
764#endif
765#ifdef TARGET_HAS_PRECISE_SMC
766 if (current_tb_modified) {
767 /* we generate a block containing just the instruction
768 modifying the memory. It will ensure that it cannot modify
769 itself */
ea1c1802 770 env->current_tb = NULL;
5fafdf24 771 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
772 CF_SINGLE_INSN);
773 cpu_resume_from_signal(env, NULL);
9fa3e853 774 }
fd6ce8f6 775#endif
9fa3e853 776}
fd6ce8f6 777
9fa3e853 778/* len must be <= 8 and start must be a multiple of len */
d720b93d 779static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
780{
781 PageDesc *p;
782 int offset, b;
59817ccb 783#if 0
a4193c8a
FB
784 if (1) {
785 if (loglevel) {
5fafdf24
TS
786 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
787 cpu_single_env->mem_write_vaddr, len,
788 cpu_single_env->eip,
a4193c8a
FB
789 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
790 }
59817ccb
FB
791 }
792#endif
9fa3e853 793 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 794 if (!p)
9fa3e853
FB
795 return;
796 if (p->code_bitmap) {
797 offset = start & ~TARGET_PAGE_MASK;
798 b = p->code_bitmap[offset >> 3] >> (offset & 7);
799 if (b & ((1 << len) - 1))
800 goto do_invalidate;
801 } else {
802 do_invalidate:
d720b93d 803 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
804 }
805}
806
9fa3e853 807#if !defined(CONFIG_SOFTMMU)
5fafdf24 808static void tb_invalidate_phys_page(target_ulong addr,
d720b93d 809 unsigned long pc, void *puc)
9fa3e853 810{
d720b93d
FB
811 int n, current_flags, current_tb_modified;
812 target_ulong current_pc, current_cs_base;
9fa3e853 813 PageDesc *p;
d720b93d
FB
814 TranslationBlock *tb, *current_tb;
815#ifdef TARGET_HAS_PRECISE_SMC
816 CPUState *env = cpu_single_env;
817#endif
9fa3e853
FB
818
819 addr &= TARGET_PAGE_MASK;
820 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 821 if (!p)
9fa3e853
FB
822 return;
823 tb = p->first_tb;
d720b93d
FB
824 current_tb_modified = 0;
825 current_tb = NULL;
826 current_pc = 0; /* avoid warning */
827 current_cs_base = 0; /* avoid warning */
828 current_flags = 0; /* avoid warning */
829#ifdef TARGET_HAS_PRECISE_SMC
830 if (tb && pc != 0) {
831 current_tb = tb_find_pc(pc);
832 }
833#endif
9fa3e853
FB
834 while (tb != NULL) {
835 n = (long)tb & 3;
836 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
837#ifdef TARGET_HAS_PRECISE_SMC
838 if (current_tb == tb &&
839 !(current_tb->cflags & CF_SINGLE_INSN)) {
840 /* If we are modifying the current TB, we must stop
841 its execution. We could be more precise by checking
842 that the modification is after the current PC, but it
843 would require a specialized function to partially
844 restore the CPU state */
3b46e624 845
d720b93d
FB
846 current_tb_modified = 1;
847 cpu_restore_state(current_tb, env, pc, puc);
848#if defined(TARGET_I386)
849 current_flags = env->hflags;
850 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
851 current_cs_base = (target_ulong)env->segs[R_CS].base;
852 current_pc = current_cs_base + env->eip;
853#else
854#error unsupported CPU
855#endif
856 }
857#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
858 tb_phys_invalidate(tb, addr);
859 tb = tb->page_next[n];
860 }
fd6ce8f6 861 p->first_tb = NULL;
d720b93d
FB
862#ifdef TARGET_HAS_PRECISE_SMC
863 if (current_tb_modified) {
864 /* we generate a block containing just the instruction
865 modifying the memory. It will ensure that it cannot modify
866 itself */
ea1c1802 867 env->current_tb = NULL;
5fafdf24 868 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
869 CF_SINGLE_INSN);
870 cpu_resume_from_signal(env, puc);
871 }
872#endif
fd6ce8f6 873}
9fa3e853 874#endif
fd6ce8f6
FB
875
876/* add the tb in the target page and protect it if necessary */
5fafdf24 877static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 878 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
879{
880 PageDesc *p;
9fa3e853
FB
881 TranslationBlock *last_first_tb;
882
883 tb->page_addr[n] = page_addr;
3a7d929e 884 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
885 tb->page_next[n] = p->first_tb;
886 last_first_tb = p->first_tb;
887 p->first_tb = (TranslationBlock *)((long)tb | n);
888 invalidate_page_bitmap(p);
fd6ce8f6 889
107db443 890#if defined(TARGET_HAS_SMC) || 1
d720b93d 891
9fa3e853 892#if defined(CONFIG_USER_ONLY)
fd6ce8f6 893 if (p->flags & PAGE_WRITE) {
53a5960a
PB
894 target_ulong addr;
895 PageDesc *p2;
9fa3e853
FB
896 int prot;
897
fd6ce8f6
FB
898 /* force the host page as non writable (writes will have a
899 page fault + mprotect overhead) */
53a5960a 900 page_addr &= qemu_host_page_mask;
fd6ce8f6 901 prot = 0;
53a5960a
PB
902 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
903 addr += TARGET_PAGE_SIZE) {
904
905 p2 = page_find (addr >> TARGET_PAGE_BITS);
906 if (!p2)
907 continue;
908 prot |= p2->flags;
909 p2->flags &= ~PAGE_WRITE;
910 page_get_flags(addr);
911 }
5fafdf24 912 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
913 (prot & PAGE_BITS) & ~PAGE_WRITE);
914#ifdef DEBUG_TB_INVALIDATE
ab3d1727 915 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 916 page_addr);
fd6ce8f6 917#endif
fd6ce8f6 918 }
9fa3e853
FB
919#else
920 /* if some code is already present, then the pages are already
921 protected. So we handle the case where only the first TB is
922 allocated in a physical page */
923 if (!last_first_tb) {
6a00d601 924 tlb_protect_code(page_addr);
9fa3e853
FB
925 }
926#endif
d720b93d
FB
927
928#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
929}
930
931/* Allocate a new translation block. Flush the translation buffer if
932 too many translation blocks or too much generated code. */
c27004ec 933TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
934{
935 TranslationBlock *tb;
fd6ce8f6 936
5fafdf24 937 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
fd6ce8f6 938 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 939 return NULL;
fd6ce8f6
FB
940 tb = &tbs[nb_tbs++];
941 tb->pc = pc;
b448f2f3 942 tb->cflags = 0;
d4e8164f
FB
943 return tb;
944}
945
9fa3e853
FB
946/* add a new TB and link it to the physical page tables. phys_page2 is
947 (-1) to indicate that only one page contains the TB. */
5fafdf24 948void tb_link_phys(TranslationBlock *tb,
9fa3e853 949 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 950{
9fa3e853
FB
951 unsigned int h;
952 TranslationBlock **ptb;
953
954 /* add in the physical hash table */
955 h = tb_phys_hash_func(phys_pc);
956 ptb = &tb_phys_hash[h];
957 tb->phys_hash_next = *ptb;
958 *ptb = tb;
fd6ce8f6
FB
959
960 /* add in the page list */
9fa3e853
FB
961 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
962 if (phys_page2 != -1)
963 tb_alloc_page(tb, 1, phys_page2);
964 else
965 tb->page_addr[1] = -1;
9fa3e853 966
d4e8164f
FB
967 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
968 tb->jmp_next[0] = NULL;
969 tb->jmp_next[1] = NULL;
970
971 /* init original jump addresses */
972 if (tb->tb_next_offset[0] != 0xffff)
973 tb_reset_jump(tb, 0);
974 if (tb->tb_next_offset[1] != 0xffff)
975 tb_reset_jump(tb, 1);
8a40a180
FB
976
977#ifdef DEBUG_TB_CHECK
978 tb_page_check();
979#endif
fd6ce8f6
FB
980}
981
9fa3e853
FB
982/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
983 tb[1].tc_ptr. Return NULL if not found */
984TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 985{
9fa3e853
FB
986 int m_min, m_max, m;
987 unsigned long v;
988 TranslationBlock *tb;
a513fe19
FB
989
990 if (nb_tbs <= 0)
991 return NULL;
992 if (tc_ptr < (unsigned long)code_gen_buffer ||
993 tc_ptr >= (unsigned long)code_gen_ptr)
994 return NULL;
995 /* binary search (cf Knuth) */
996 m_min = 0;
997 m_max = nb_tbs - 1;
998 while (m_min <= m_max) {
999 m = (m_min + m_max) >> 1;
1000 tb = &tbs[m];
1001 v = (unsigned long)tb->tc_ptr;
1002 if (v == tc_ptr)
1003 return tb;
1004 else if (tc_ptr < v) {
1005 m_max = m - 1;
1006 } else {
1007 m_min = m + 1;
1008 }
5fafdf24 1009 }
a513fe19
FB
1010 return &tbs[m_max];
1011}
7501267e 1012
ea041c0e
FB
1013static void tb_reset_jump_recursive(TranslationBlock *tb);
1014
1015static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1016{
1017 TranslationBlock *tb1, *tb_next, **ptb;
1018 unsigned int n1;
1019
1020 tb1 = tb->jmp_next[n];
1021 if (tb1 != NULL) {
1022 /* find head of list */
1023 for(;;) {
1024 n1 = (long)tb1 & 3;
1025 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1026 if (n1 == 2)
1027 break;
1028 tb1 = tb1->jmp_next[n1];
1029 }
1030 /* we are now sure now that tb jumps to tb1 */
1031 tb_next = tb1;
1032
1033 /* remove tb from the jmp_first list */
1034 ptb = &tb_next->jmp_first;
1035 for(;;) {
1036 tb1 = *ptb;
1037 n1 = (long)tb1 & 3;
1038 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1039 if (n1 == n && tb1 == tb)
1040 break;
1041 ptb = &tb1->jmp_next[n1];
1042 }
1043 *ptb = tb->jmp_next[n];
1044 tb->jmp_next[n] = NULL;
3b46e624 1045
ea041c0e
FB
1046 /* suppress the jump to next tb in generated code */
1047 tb_reset_jump(tb, n);
1048
0124311e 1049 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1050 tb_reset_jump_recursive(tb_next);
1051 }
1052}
1053
1054static void tb_reset_jump_recursive(TranslationBlock *tb)
1055{
1056 tb_reset_jump_recursive2(tb, 0);
1057 tb_reset_jump_recursive2(tb, 1);
1058}
1059
1fddef4b 1060#if defined(TARGET_HAS_ICE)
d720b93d
FB
1061static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1062{
9b3c35e0
JM
1063 target_phys_addr_t addr;
1064 target_ulong pd;
c2f07f81
PB
1065 ram_addr_t ram_addr;
1066 PhysPageDesc *p;
d720b93d 1067
c2f07f81
PB
1068 addr = cpu_get_phys_page_debug(env, pc);
1069 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1070 if (!p) {
1071 pd = IO_MEM_UNASSIGNED;
1072 } else {
1073 pd = p->phys_offset;
1074 }
1075 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1076 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1077}
c27004ec 1078#endif
d720b93d 1079
6658ffb8
PB
1080/* Add a watchpoint. */
1081int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1082{
1083 int i;
1084
1085 for (i = 0; i < env->nb_watchpoints; i++) {
1086 if (addr == env->watchpoint[i].vaddr)
1087 return 0;
1088 }
1089 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1090 return -1;
1091
1092 i = env->nb_watchpoints++;
1093 env->watchpoint[i].vaddr = addr;
1094 tlb_flush_page(env, addr);
1095 /* FIXME: This flush is needed because of the hack to make memory ops
1096 terminate the TB. It can be removed once the proper IO trap and
1097 re-execute bits are in. */
1098 tb_flush(env);
1099 return i;
1100}
1101
1102/* Remove a watchpoint. */
1103int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1104{
1105 int i;
1106
1107 for (i = 0; i < env->nb_watchpoints; i++) {
1108 if (addr == env->watchpoint[i].vaddr) {
1109 env->nb_watchpoints--;
1110 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1111 tlb_flush_page(env, addr);
1112 return 0;
1113 }
1114 }
1115 return -1;
1116}
1117
c33a346e
FB
1118/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1119 breakpoint is reached */
2e12669a 1120int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1121{
1fddef4b 1122#if defined(TARGET_HAS_ICE)
4c3a88a2 1123 int i;
3b46e624 1124
4c3a88a2
FB
1125 for(i = 0; i < env->nb_breakpoints; i++) {
1126 if (env->breakpoints[i] == pc)
1127 return 0;
1128 }
1129
1130 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1131 return -1;
1132 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1133
d720b93d 1134 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1135 return 0;
1136#else
1137 return -1;
1138#endif
1139}
1140
1141/* remove a breakpoint */
2e12669a 1142int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1143{
1fddef4b 1144#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1145 int i;
1146 for(i = 0; i < env->nb_breakpoints; i++) {
1147 if (env->breakpoints[i] == pc)
1148 goto found;
1149 }
1150 return -1;
1151 found:
4c3a88a2 1152 env->nb_breakpoints--;
1fddef4b
FB
1153 if (i < env->nb_breakpoints)
1154 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1155
1156 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1157 return 0;
1158#else
1159 return -1;
1160#endif
1161}
1162
c33a346e
FB
1163/* enable or disable single step mode. EXCP_DEBUG is returned by the
1164 CPU loop after each instruction */
1165void cpu_single_step(CPUState *env, int enabled)
1166{
1fddef4b 1167#if defined(TARGET_HAS_ICE)
c33a346e
FB
1168 if (env->singlestep_enabled != enabled) {
1169 env->singlestep_enabled = enabled;
1170 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1171 /* XXX: only flush what is necessary */
0124311e 1172 tb_flush(env);
c33a346e
FB
1173 }
1174#endif
1175}
1176
34865134
FB
1177/* enable or disable low levels log */
1178void cpu_set_log(int log_flags)
1179{
1180 loglevel = log_flags;
1181 if (loglevel && !logfile) {
11fcfab4 1182 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1183 if (!logfile) {
1184 perror(logfilename);
1185 _exit(1);
1186 }
9fa3e853
FB
1187#if !defined(CONFIG_SOFTMMU)
1188 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1189 {
1190 static uint8_t logfile_buf[4096];
1191 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1192 }
1193#else
34865134 1194 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1195#endif
e735b91c
PB
1196 log_append = 1;
1197 }
1198 if (!loglevel && logfile) {
1199 fclose(logfile);
1200 logfile = NULL;
34865134
FB
1201 }
1202}
1203
1204void cpu_set_log_filename(const char *filename)
1205{
1206 logfilename = strdup(filename);
e735b91c
PB
1207 if (logfile) {
1208 fclose(logfile);
1209 logfile = NULL;
1210 }
1211 cpu_set_log(loglevel);
34865134 1212}
c33a346e 1213
0124311e 1214/* mask must never be zero, except for A20 change call */
68a79315 1215void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1216{
1217 TranslationBlock *tb;
ee8b7021 1218 static int interrupt_lock;
59817ccb 1219
68a79315 1220 env->interrupt_request |= mask;
ea041c0e
FB
1221 /* if the cpu is currently executing code, we must unlink it and
1222 all the potentially executing TB */
1223 tb = env->current_tb;
ee8b7021
FB
1224 if (tb && !testandset(&interrupt_lock)) {
1225 env->current_tb = NULL;
ea041c0e 1226 tb_reset_jump_recursive(tb);
ee8b7021 1227 interrupt_lock = 0;
ea041c0e
FB
1228 }
1229}
1230
b54ad049
FB
1231void cpu_reset_interrupt(CPUState *env, int mask)
1232{
1233 env->interrupt_request &= ~mask;
1234}
1235
f193c797 1236CPULogItem cpu_log_items[] = {
5fafdf24 1237 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1238 "show generated host assembly code for each compiled TB" },
1239 { CPU_LOG_TB_IN_ASM, "in_asm",
1240 "show target assembly code for each compiled TB" },
5fafdf24 1241 { CPU_LOG_TB_OP, "op",
57fec1fe 1242 "show micro ops for each compiled TB" },
f193c797
FB
1243#ifdef TARGET_I386
1244 { CPU_LOG_TB_OP_OPT, "op_opt",
57fec1fe 1245 "show micro ops before eflags optimization" },
f193c797
FB
1246#endif
1247 { CPU_LOG_INT, "int",
1248 "show interrupts/exceptions in short format" },
1249 { CPU_LOG_EXEC, "exec",
1250 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1251 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1252 "show CPU state before block translation" },
f193c797
FB
1253#ifdef TARGET_I386
1254 { CPU_LOG_PCALL, "pcall",
1255 "show protected mode far calls/returns/exceptions" },
1256#endif
8e3a9fd2 1257#ifdef DEBUG_IOPORT
fd872598
FB
1258 { CPU_LOG_IOPORT, "ioport",
1259 "show all i/o ports accesses" },
8e3a9fd2 1260#endif
f193c797
FB
1261 { 0, NULL, NULL },
1262};
1263
1264static int cmp1(const char *s1, int n, const char *s2)
1265{
1266 if (strlen(s2) != n)
1267 return 0;
1268 return memcmp(s1, s2, n) == 0;
1269}
3b46e624 1270
f193c797
FB
1271/* takes a comma separated list of log masks. Return 0 if error. */
1272int cpu_str_to_log_mask(const char *str)
1273{
1274 CPULogItem *item;
1275 int mask;
1276 const char *p, *p1;
1277
1278 p = str;
1279 mask = 0;
1280 for(;;) {
1281 p1 = strchr(p, ',');
1282 if (!p1)
1283 p1 = p + strlen(p);
8e3a9fd2
FB
1284 if(cmp1(p,p1-p,"all")) {
1285 for(item = cpu_log_items; item->mask != 0; item++) {
1286 mask |= item->mask;
1287 }
1288 } else {
f193c797
FB
1289 for(item = cpu_log_items; item->mask != 0; item++) {
1290 if (cmp1(p, p1 - p, item->name))
1291 goto found;
1292 }
1293 return 0;
8e3a9fd2 1294 }
f193c797
FB
1295 found:
1296 mask |= item->mask;
1297 if (*p1 != ',')
1298 break;
1299 p = p1 + 1;
1300 }
1301 return mask;
1302}
ea041c0e 1303
7501267e
FB
1304void cpu_abort(CPUState *env, const char *fmt, ...)
1305{
1306 va_list ap;
493ae1f0 1307 va_list ap2;
7501267e
FB
1308
1309 va_start(ap, fmt);
493ae1f0 1310 va_copy(ap2, ap);
7501267e
FB
1311 fprintf(stderr, "qemu: fatal: ");
1312 vfprintf(stderr, fmt, ap);
1313 fprintf(stderr, "\n");
1314#ifdef TARGET_I386
0573fbfc
TS
1315 if(env->intercept & INTERCEPT_SVM_MASK) {
1316 /* most probably the virtual machine should not
1317 be shut down but rather caught by the VMM */
1318 vmexit(SVM_EXIT_SHUTDOWN, 0);
1319 }
7fe48483
FB
1320 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1321#else
1322 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1323#endif
924edcae 1324 if (logfile) {
f9373291 1325 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1326 vfprintf(logfile, fmt, ap2);
f9373291
JM
1327 fprintf(logfile, "\n");
1328#ifdef TARGET_I386
1329 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1330#else
1331 cpu_dump_state(env, logfile, fprintf, 0);
1332#endif
924edcae
AZ
1333 fflush(logfile);
1334 fclose(logfile);
1335 }
493ae1f0 1336 va_end(ap2);
f9373291 1337 va_end(ap);
7501267e
FB
1338 abort();
1339}
1340
c5be9f08
TS
1341CPUState *cpu_copy(CPUState *env)
1342{
01ba9816 1343 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1344 /* preserve chaining and index */
1345 CPUState *next_cpu = new_env->next_cpu;
1346 int cpu_index = new_env->cpu_index;
1347 memcpy(new_env, env, sizeof(CPUState));
1348 new_env->next_cpu = next_cpu;
1349 new_env->cpu_index = cpu_index;
1350 return new_env;
1351}
1352
0124311e
FB
1353#if !defined(CONFIG_USER_ONLY)
1354
ee8b7021
FB
1355/* NOTE: if flush_global is true, also flush global entries (not
1356 implemented yet) */
1357void tlb_flush(CPUState *env, int flush_global)
33417e70 1358{
33417e70 1359 int i;
0124311e 1360
9fa3e853
FB
1361#if defined(DEBUG_TLB)
1362 printf("tlb_flush:\n");
1363#endif
0124311e
FB
1364 /* must reset current TB so that interrupts cannot modify the
1365 links while we are modifying them */
1366 env->current_tb = NULL;
1367
33417e70 1368 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1369 env->tlb_table[0][i].addr_read = -1;
1370 env->tlb_table[0][i].addr_write = -1;
1371 env->tlb_table[0][i].addr_code = -1;
1372 env->tlb_table[1][i].addr_read = -1;
1373 env->tlb_table[1][i].addr_write = -1;
1374 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1375#if (NB_MMU_MODES >= 3)
1376 env->tlb_table[2][i].addr_read = -1;
1377 env->tlb_table[2][i].addr_write = -1;
1378 env->tlb_table[2][i].addr_code = -1;
1379#if (NB_MMU_MODES == 4)
1380 env->tlb_table[3][i].addr_read = -1;
1381 env->tlb_table[3][i].addr_write = -1;
1382 env->tlb_table[3][i].addr_code = -1;
1383#endif
1384#endif
33417e70 1385 }
9fa3e853 1386
8a40a180 1387 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1388
1389#if !defined(CONFIG_SOFTMMU)
1390 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1391#endif
1392#ifdef USE_KQEMU
1393 if (env->kqemu_enabled) {
1394 kqemu_flush(env, flush_global);
1395 }
9fa3e853 1396#endif
e3db7226 1397 tlb_flush_count++;
33417e70
FB
1398}
1399
274da6b2 1400static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1401{
5fafdf24 1402 if (addr == (tlb_entry->addr_read &
84b7b8e7 1403 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1404 addr == (tlb_entry->addr_write &
84b7b8e7 1405 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1406 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1407 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1408 tlb_entry->addr_read = -1;
1409 tlb_entry->addr_write = -1;
1410 tlb_entry->addr_code = -1;
1411 }
61382a50
FB
1412}
1413
2e12669a 1414void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1415{
8a40a180 1416 int i;
9fa3e853 1417 TranslationBlock *tb;
0124311e 1418
9fa3e853 1419#if defined(DEBUG_TLB)
108c49b8 1420 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1421#endif
0124311e
FB
1422 /* must reset current TB so that interrupts cannot modify the
1423 links while we are modifying them */
1424 env->current_tb = NULL;
61382a50
FB
1425
1426 addr &= TARGET_PAGE_MASK;
1427 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1428 tlb_flush_entry(&env->tlb_table[0][i], addr);
1429 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1430#if (NB_MMU_MODES >= 3)
1431 tlb_flush_entry(&env->tlb_table[2][i], addr);
1432#if (NB_MMU_MODES == 4)
1433 tlb_flush_entry(&env->tlb_table[3][i], addr);
1434#endif
1435#endif
0124311e 1436
b362e5e0
PB
1437 /* Discard jump cache entries for any tb which might potentially
1438 overlap the flushed page. */
1439 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1440 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1441
1442 i = tb_jmp_cache_hash_page(addr);
1443 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1444
0124311e 1445#if !defined(CONFIG_SOFTMMU)
9fa3e853 1446 if (addr < MMAP_AREA_END)
0124311e 1447 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1448#endif
0a962c02
FB
1449#ifdef USE_KQEMU
1450 if (env->kqemu_enabled) {
1451 kqemu_flush_page(env, addr);
1452 }
1453#endif
9fa3e853
FB
1454}
1455
9fa3e853
FB
1456/* update the TLBs so that writes to code in the virtual page 'addr'
1457 can be detected */
6a00d601 1458static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1459{
5fafdf24 1460 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1461 ram_addr + TARGET_PAGE_SIZE,
1462 CODE_DIRTY_FLAG);
9fa3e853
FB
1463}
1464
9fa3e853 1465/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1466 tested for self modifying code */
5fafdf24 1467static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1468 target_ulong vaddr)
9fa3e853 1469{
3a7d929e 1470 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1471}
1472
5fafdf24 1473static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1474 unsigned long start, unsigned long length)
1475{
1476 unsigned long addr;
84b7b8e7
FB
1477 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1478 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1479 if ((addr - start) < length) {
84b7b8e7 1480 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1481 }
1482 }
1483}
1484
3a7d929e 1485void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1486 int dirty_flags)
1ccde1cb
FB
1487{
1488 CPUState *env;
4f2ac237 1489 unsigned long length, start1;
0a962c02
FB
1490 int i, mask, len;
1491 uint8_t *p;
1ccde1cb
FB
1492
1493 start &= TARGET_PAGE_MASK;
1494 end = TARGET_PAGE_ALIGN(end);
1495
1496 length = end - start;
1497 if (length == 0)
1498 return;
0a962c02 1499 len = length >> TARGET_PAGE_BITS;
3a7d929e 1500#ifdef USE_KQEMU
6a00d601
FB
1501 /* XXX: should not depend on cpu context */
1502 env = first_cpu;
3a7d929e 1503 if (env->kqemu_enabled) {
f23db169
FB
1504 ram_addr_t addr;
1505 addr = start;
1506 for(i = 0; i < len; i++) {
1507 kqemu_set_notdirty(env, addr);
1508 addr += TARGET_PAGE_SIZE;
1509 }
3a7d929e
FB
1510 }
1511#endif
f23db169
FB
1512 mask = ~dirty_flags;
1513 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1514 for(i = 0; i < len; i++)
1515 p[i] &= mask;
1516
1ccde1cb
FB
1517 /* we modify the TLB cache so that the dirty bit will be set again
1518 when accessing the range */
59817ccb 1519 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1520 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1521 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1522 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1523 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1524 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1525#if (NB_MMU_MODES >= 3)
1526 for(i = 0; i < CPU_TLB_SIZE; i++)
1527 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1528#if (NB_MMU_MODES == 4)
1529 for(i = 0; i < CPU_TLB_SIZE; i++)
1530 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1531#endif
1532#endif
6a00d601 1533 }
59817ccb
FB
1534
1535#if !defined(CONFIG_SOFTMMU)
1536 /* XXX: this is expensive */
1537 {
1538 VirtPageDesc *p;
1539 int j;
1540 target_ulong addr;
1541
1542 for(i = 0; i < L1_SIZE; i++) {
1543 p = l1_virt_map[i];
1544 if (p) {
1545 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1546 for(j = 0; j < L2_SIZE; j++) {
1547 if (p->valid_tag == virt_valid_tag &&
1548 p->phys_addr >= start && p->phys_addr < end &&
1549 (p->prot & PROT_WRITE)) {
1550 if (addr < MMAP_AREA_END) {
5fafdf24 1551 mprotect((void *)addr, TARGET_PAGE_SIZE,
59817ccb
FB
1552 p->prot & ~PROT_WRITE);
1553 }
1554 }
1555 addr += TARGET_PAGE_SIZE;
1556 p++;
1557 }
1558 }
1559 }
1560 }
1561#endif
1ccde1cb
FB
1562}
1563
3a7d929e
FB
1564static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1565{
1566 ram_addr_t ram_addr;
1567
84b7b8e7 1568 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1569 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1570 tlb_entry->addend - (unsigned long)phys_ram_base;
1571 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1572 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1573 }
1574 }
1575}
1576
1577/* update the TLB according to the current state of the dirty bits */
1578void cpu_tlb_update_dirty(CPUState *env)
1579{
1580 int i;
1581 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1582 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1583 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1584 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1585#if (NB_MMU_MODES >= 3)
1586 for(i = 0; i < CPU_TLB_SIZE; i++)
1587 tlb_update_dirty(&env->tlb_table[2][i]);
1588#if (NB_MMU_MODES == 4)
1589 for(i = 0; i < CPU_TLB_SIZE; i++)
1590 tlb_update_dirty(&env->tlb_table[3][i]);
1591#endif
1592#endif
3a7d929e
FB
1593}
1594
5fafdf24 1595static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1596 unsigned long start)
1ccde1cb
FB
1597{
1598 unsigned long addr;
84b7b8e7
FB
1599 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1600 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1601 if (addr == start) {
84b7b8e7 1602 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1603 }
1604 }
1605}
1606
1607/* update the TLB corresponding to virtual page vaddr and phys addr
1608 addr so that it is no longer dirty */
6a00d601
FB
1609static inline void tlb_set_dirty(CPUState *env,
1610 unsigned long addr, target_ulong vaddr)
1ccde1cb 1611{
1ccde1cb
FB
1612 int i;
1613
1ccde1cb
FB
1614 addr &= TARGET_PAGE_MASK;
1615 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1616 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1617 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1618#if (NB_MMU_MODES >= 3)
1619 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1620#if (NB_MMU_MODES == 4)
1621 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1622#endif
1623#endif
9fa3e853
FB
1624}
1625
59817ccb
FB
1626/* add a new TLB entry. At most one entry for a given virtual address
1627 is permitted. Return 0 if OK or 2 if the page could not be mapped
1628 (can only happen in non SOFTMMU mode for I/O pages or pages
1629 conflicting with the host address space). */
5fafdf24
TS
1630int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1631 target_phys_addr_t paddr, int prot,
6ebbf390 1632 int mmu_idx, int is_softmmu)
9fa3e853 1633{
92e873b9 1634 PhysPageDesc *p;
4f2ac237 1635 unsigned long pd;
9fa3e853 1636 unsigned int index;
4f2ac237 1637 target_ulong address;
108c49b8 1638 target_phys_addr_t addend;
9fa3e853 1639 int ret;
84b7b8e7 1640 CPUTLBEntry *te;
6658ffb8 1641 int i;
9fa3e853 1642
92e873b9 1643 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1644 if (!p) {
1645 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1646 } else {
1647 pd = p->phys_offset;
9fa3e853
FB
1648 }
1649#if defined(DEBUG_TLB)
6ebbf390
JM
1650 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1651 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1652#endif
1653
1654 ret = 0;
1655#if !defined(CONFIG_SOFTMMU)
5fafdf24 1656 if (is_softmmu)
9fa3e853
FB
1657#endif
1658 {
2a4188a3 1659 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1660 /* IO memory case */
1661 address = vaddr | pd;
1662 addend = paddr;
1663 } else {
1664 /* standard memory */
1665 address = vaddr;
1666 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1667 }
6658ffb8
PB
1668
1669 /* Make accesses to pages with watchpoints go via the
1670 watchpoint trap routines. */
1671 for (i = 0; i < env->nb_watchpoints; i++) {
1672 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1673 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1674 env->watchpoint[i].addend = 0;
6658ffb8
PB
1675 address = vaddr | io_mem_watch;
1676 } else {
d79acba4
AZ
1677 env->watchpoint[i].addend = pd - paddr +
1678 (unsigned long) phys_ram_base;
6658ffb8
PB
1679 /* TODO: Figure out how to make read watchpoints coexist
1680 with code. */
1681 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1682 }
1683 }
1684 }
d79acba4 1685
90f18422 1686 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1687 addend -= vaddr;
6ebbf390 1688 te = &env->tlb_table[mmu_idx][index];
84b7b8e7 1689 te->addend = addend;
67b915a5 1690 if (prot & PAGE_READ) {
84b7b8e7
FB
1691 te->addr_read = address;
1692 } else {
1693 te->addr_read = -1;
1694 }
1695 if (prot & PAGE_EXEC) {
1696 te->addr_code = address;
9fa3e853 1697 } else {
84b7b8e7 1698 te->addr_code = -1;
9fa3e853 1699 }
67b915a5 1700 if (prot & PAGE_WRITE) {
5fafdf24 1701 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
856074ec
FB
1702 (pd & IO_MEM_ROMD)) {
1703 /* write access calls the I/O callback */
5fafdf24 1704 te->addr_write = vaddr |
856074ec 1705 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
5fafdf24 1706 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1707 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1708 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1709 } else {
84b7b8e7 1710 te->addr_write = address;
9fa3e853
FB
1711 }
1712 } else {
84b7b8e7 1713 te->addr_write = -1;
9fa3e853
FB
1714 }
1715 }
1716#if !defined(CONFIG_SOFTMMU)
1717 else {
1718 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1719 /* IO access: no mapping is done as it will be handled by the
1720 soft MMU */
1721 if (!(env->hflags & HF_SOFTMMU_MASK))
1722 ret = 2;
1723 } else {
1724 void *map_addr;
59817ccb
FB
1725
1726 if (vaddr >= MMAP_AREA_END) {
1727 ret = 2;
1728 } else {
1729 if (prot & PROT_WRITE) {
5fafdf24 1730 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1731#if defined(TARGET_HAS_SMC) || 1
59817ccb 1732 first_tb ||
d720b93d 1733#endif
5fafdf24 1734 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
59817ccb
FB
1735 !cpu_physical_memory_is_dirty(pd))) {
1736 /* ROM: we do as if code was inside */
1737 /* if code is present, we only map as read only and save the
1738 original mapping */
1739 VirtPageDesc *vp;
3b46e624 1740
90f18422 1741 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1742 vp->phys_addr = pd;
1743 vp->prot = prot;
1744 vp->valid_tag = virt_valid_tag;
1745 prot &= ~PAGE_WRITE;
1746 }
1747 }
5fafdf24 1748 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
59817ccb
FB
1749 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1750 if (map_addr == MAP_FAILED) {
1751 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1752 paddr, vaddr);
9fa3e853 1753 }
9fa3e853
FB
1754 }
1755 }
1756 }
1757#endif
1758 return ret;
1759}
1760
1761/* called from signal handler: invalidate the code and unprotect the
1762 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1763int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1764{
1765#if !defined(CONFIG_SOFTMMU)
1766 VirtPageDesc *vp;
1767
1768#if defined(DEBUG_TLB)
1769 printf("page_unprotect: addr=0x%08x\n", addr);
1770#endif
1771 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1772
1773 /* if it is not mapped, no need to worry here */
1774 if (addr >= MMAP_AREA_END)
1775 return 0;
9fa3e853
FB
1776 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1777 if (!vp)
1778 return 0;
1779 /* NOTE: in this case, validate_tag is _not_ tested as it
1780 validates only the code TLB */
1781 if (vp->valid_tag != virt_valid_tag)
1782 return 0;
1783 if (!(vp->prot & PAGE_WRITE))
1784 return 0;
1785#if defined(DEBUG_TLB)
5fafdf24 1786 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
9fa3e853
FB
1787 addr, vp->phys_addr, vp->prot);
1788#endif
59817ccb
FB
1789 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1790 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1791 (unsigned long)addr, vp->prot);
d720b93d 1792 /* set the dirty bit */
0a962c02 1793 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1794 /* flush the code inside */
1795 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1796 return 1;
1797#else
1798 return 0;
1799#endif
33417e70
FB
1800}
1801
0124311e
FB
1802#else
1803
ee8b7021 1804void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1805{
1806}
1807
2e12669a 1808void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1809{
1810}
1811
5fafdf24
TS
1812int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1813 target_phys_addr_t paddr, int prot,
6ebbf390 1814 int mmu_idx, int is_softmmu)
9fa3e853
FB
1815{
1816 return 0;
1817}
0124311e 1818
9fa3e853
FB
1819/* dump memory mappings */
1820void page_dump(FILE *f)
33417e70 1821{
9fa3e853
FB
1822 unsigned long start, end;
1823 int i, j, prot, prot1;
1824 PageDesc *p;
33417e70 1825
9fa3e853
FB
1826 fprintf(f, "%-8s %-8s %-8s %s\n",
1827 "start", "end", "size", "prot");
1828 start = -1;
1829 end = -1;
1830 prot = 0;
1831 for(i = 0; i <= L1_SIZE; i++) {
1832 if (i < L1_SIZE)
1833 p = l1_map[i];
1834 else
1835 p = NULL;
1836 for(j = 0;j < L2_SIZE; j++) {
1837 if (!p)
1838 prot1 = 0;
1839 else
1840 prot1 = p[j].flags;
1841 if (prot1 != prot) {
1842 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1843 if (start != -1) {
1844 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1845 start, end, end - start,
9fa3e853
FB
1846 prot & PAGE_READ ? 'r' : '-',
1847 prot & PAGE_WRITE ? 'w' : '-',
1848 prot & PAGE_EXEC ? 'x' : '-');
1849 }
1850 if (prot1 != 0)
1851 start = end;
1852 else
1853 start = -1;
1854 prot = prot1;
1855 }
1856 if (!p)
1857 break;
1858 }
33417e70 1859 }
33417e70
FB
1860}
1861
53a5960a 1862int page_get_flags(target_ulong address)
33417e70 1863{
9fa3e853
FB
1864 PageDesc *p;
1865
1866 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1867 if (!p)
9fa3e853
FB
1868 return 0;
1869 return p->flags;
1870}
1871
1872/* modify the flags of a page and invalidate the code if
1873 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1874 depending on PAGE_WRITE */
53a5960a 1875void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1876{
1877 PageDesc *p;
53a5960a 1878 target_ulong addr;
9fa3e853
FB
1879
1880 start = start & TARGET_PAGE_MASK;
1881 end = TARGET_PAGE_ALIGN(end);
1882 if (flags & PAGE_WRITE)
1883 flags |= PAGE_WRITE_ORG;
1884 spin_lock(&tb_lock);
1885 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1886 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1887 /* if the write protection is set, then we invalidate the code
1888 inside */
5fafdf24 1889 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
1890 (flags & PAGE_WRITE) &&
1891 p->first_tb) {
d720b93d 1892 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1893 }
1894 p->flags = flags;
1895 }
1896 spin_unlock(&tb_lock);
33417e70
FB
1897}
1898
3d97b40b
TS
1899int page_check_range(target_ulong start, target_ulong len, int flags)
1900{
1901 PageDesc *p;
1902 target_ulong end;
1903 target_ulong addr;
1904
1905 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1906 start = start & TARGET_PAGE_MASK;
1907
1908 if( end < start )
1909 /* we've wrapped around */
1910 return -1;
1911 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1912 p = page_find(addr >> TARGET_PAGE_BITS);
1913 if( !p )
1914 return -1;
1915 if( !(p->flags & PAGE_VALID) )
1916 return -1;
1917
dae3270c 1918 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 1919 return -1;
dae3270c
FB
1920 if (flags & PAGE_WRITE) {
1921 if (!(p->flags & PAGE_WRITE_ORG))
1922 return -1;
1923 /* unprotect the page if it was put read-only because it
1924 contains translated code */
1925 if (!(p->flags & PAGE_WRITE)) {
1926 if (!page_unprotect(addr, 0, NULL))
1927 return -1;
1928 }
1929 return 0;
1930 }
3d97b40b
TS
1931 }
1932 return 0;
1933}
1934
9fa3e853
FB
1935/* called from signal handler: invalidate the code and unprotect the
1936 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1937int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1938{
1939 unsigned int page_index, prot, pindex;
1940 PageDesc *p, *p1;
53a5960a 1941 target_ulong host_start, host_end, addr;
9fa3e853 1942
83fb7adf 1943 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1944 page_index = host_start >> TARGET_PAGE_BITS;
1945 p1 = page_find(page_index);
1946 if (!p1)
1947 return 0;
83fb7adf 1948 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1949 p = p1;
1950 prot = 0;
1951 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1952 prot |= p->flags;
1953 p++;
1954 }
1955 /* if the page was really writable, then we change its
1956 protection back to writable */
1957 if (prot & PAGE_WRITE_ORG) {
1958 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1959 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 1960 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1961 (prot & PAGE_BITS) | PAGE_WRITE);
1962 p1[pindex].flags |= PAGE_WRITE;
1963 /* and since the content will be modified, we must invalidate
1964 the corresponding translated code. */
d720b93d 1965 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1966#ifdef DEBUG_TB_CHECK
1967 tb_invalidate_check(address);
1968#endif
1969 return 1;
1970 }
1971 }
1972 return 0;
1973}
1974
6a00d601
FB
1975static inline void tlb_set_dirty(CPUState *env,
1976 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1977{
1978}
9fa3e853
FB
1979#endif /* defined(CONFIG_USER_ONLY) */
1980
db7b5426
BS
1981static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1982 int memory);
1983static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1984 int orig_memory);
1985#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1986 need_subpage) \
1987 do { \
1988 if (addr > start_addr) \
1989 start_addr2 = 0; \
1990 else { \
1991 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1992 if (start_addr2 > 0) \
1993 need_subpage = 1; \
1994 } \
1995 \
49e9fba2 1996 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
1997 end_addr2 = TARGET_PAGE_SIZE - 1; \
1998 else { \
1999 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2000 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2001 need_subpage = 1; \
2002 } \
2003 } while (0)
2004
33417e70
FB
2005/* register physical memory. 'size' must be a multiple of the target
2006 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2007 io memory page */
5fafdf24 2008void cpu_register_physical_memory(target_phys_addr_t start_addr,
2e12669a
FB
2009 unsigned long size,
2010 unsigned long phys_offset)
33417e70 2011{
108c49b8 2012 target_phys_addr_t addr, end_addr;
92e873b9 2013 PhysPageDesc *p;
9d42037b 2014 CPUState *env;
db7b5426
BS
2015 unsigned long orig_size = size;
2016 void *subpage;
33417e70 2017
5fd386f6 2018 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2019 end_addr = start_addr + (target_phys_addr_t)size;
2020 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2021 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2022 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2023 unsigned long orig_memory = p->phys_offset;
2024 target_phys_addr_t start_addr2, end_addr2;
2025 int need_subpage = 0;
2026
2027 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2028 need_subpage);
4254fab8 2029 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2030 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2031 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2032 &p->phys_offset, orig_memory);
2033 } else {
2034 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2035 >> IO_MEM_SHIFT];
2036 }
2037 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2038 } else {
2039 p->phys_offset = phys_offset;
2040 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2041 (phys_offset & IO_MEM_ROMD))
2042 phys_offset += TARGET_PAGE_SIZE;
2043 }
2044 } else {
2045 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2046 p->phys_offset = phys_offset;
2047 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2048 (phys_offset & IO_MEM_ROMD))
2049 phys_offset += TARGET_PAGE_SIZE;
2050 else {
2051 target_phys_addr_t start_addr2, end_addr2;
2052 int need_subpage = 0;
2053
2054 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2055 end_addr2, need_subpage);
2056
4254fab8 2057 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2058 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2059 &p->phys_offset, IO_MEM_UNASSIGNED);
2060 subpage_register(subpage, start_addr2, end_addr2,
2061 phys_offset);
2062 }
2063 }
2064 }
33417e70 2065 }
3b46e624 2066
9d42037b
FB
2067 /* since each CPU stores ram addresses in its TLB cache, we must
2068 reset the modified entries */
2069 /* XXX: slow ! */
2070 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2071 tlb_flush(env, 1);
2072 }
33417e70
FB
2073}
2074
ba863458
FB
2075/* XXX: temporary until new memory mapping API */
2076uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2077{
2078 PhysPageDesc *p;
2079
2080 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2081 if (!p)
2082 return IO_MEM_UNASSIGNED;
2083 return p->phys_offset;
2084}
2085
e9a1ab19
FB
2086/* XXX: better than nothing */
2087ram_addr_t qemu_ram_alloc(unsigned int size)
2088{
2089 ram_addr_t addr;
2090 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
5fafdf24 2091 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
e9a1ab19
FB
2092 size, phys_ram_size);
2093 abort();
2094 }
2095 addr = phys_ram_alloc_offset;
2096 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2097 return addr;
2098}
2099
2100void qemu_ram_free(ram_addr_t addr)
2101{
2102}
2103
a4193c8a 2104static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2105{
67d3b957 2106#ifdef DEBUG_UNASSIGNED
ab3d1727 2107 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2108#endif
2109#ifdef TARGET_SPARC
6c36d3fa 2110 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2111#elif TARGET_CRIS
2112 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2113#endif
33417e70
FB
2114 return 0;
2115}
2116
a4193c8a 2117static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2118{
67d3b957 2119#ifdef DEBUG_UNASSIGNED
ab3d1727 2120 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2121#endif
b4f0a316 2122#ifdef TARGET_SPARC
6c36d3fa 2123 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2124#elif TARGET_CRIS
2125 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2126#endif
33417e70
FB
2127}
2128
2129static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2130 unassigned_mem_readb,
2131 unassigned_mem_readb,
2132 unassigned_mem_readb,
2133};
2134
2135static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2136 unassigned_mem_writeb,
2137 unassigned_mem_writeb,
2138 unassigned_mem_writeb,
2139};
2140
3a7d929e 2141static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2142{
3a7d929e
FB
2143 unsigned long ram_addr;
2144 int dirty_flags;
2145 ram_addr = addr - (unsigned long)phys_ram_base;
2146 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2147 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2148#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2149 tb_invalidate_phys_page_fast(ram_addr, 1);
2150 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2151#endif
3a7d929e 2152 }
c27004ec 2153 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2154#ifdef USE_KQEMU
2155 if (cpu_single_env->kqemu_enabled &&
2156 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2157 kqemu_modify_page(cpu_single_env, ram_addr);
2158#endif
f23db169
FB
2159 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2160 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2161 /* we remove the notdirty callback only if the code has been
2162 flushed */
2163 if (dirty_flags == 0xff)
6a00d601 2164 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2165}
2166
3a7d929e 2167static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2168{
3a7d929e
FB
2169 unsigned long ram_addr;
2170 int dirty_flags;
2171 ram_addr = addr - (unsigned long)phys_ram_base;
2172 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2173 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2174#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2175 tb_invalidate_phys_page_fast(ram_addr, 2);
2176 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2177#endif
3a7d929e 2178 }
c27004ec 2179 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2180#ifdef USE_KQEMU
2181 if (cpu_single_env->kqemu_enabled &&
2182 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2183 kqemu_modify_page(cpu_single_env, ram_addr);
2184#endif
f23db169
FB
2185 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2186 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2187 /* we remove the notdirty callback only if the code has been
2188 flushed */
2189 if (dirty_flags == 0xff)
6a00d601 2190 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2191}
2192
3a7d929e 2193static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2194{
3a7d929e
FB
2195 unsigned long ram_addr;
2196 int dirty_flags;
2197 ram_addr = addr - (unsigned long)phys_ram_base;
2198 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2199 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2200#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2201 tb_invalidate_phys_page_fast(ram_addr, 4);
2202 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2203#endif
3a7d929e 2204 }
c27004ec 2205 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2206#ifdef USE_KQEMU
2207 if (cpu_single_env->kqemu_enabled &&
2208 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2209 kqemu_modify_page(cpu_single_env, ram_addr);
2210#endif
f23db169
FB
2211 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2212 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2213 /* we remove the notdirty callback only if the code has been
2214 flushed */
2215 if (dirty_flags == 0xff)
6a00d601 2216 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2217}
2218
3a7d929e 2219static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2220 NULL, /* never used */
2221 NULL, /* never used */
2222 NULL, /* never used */
2223};
2224
1ccde1cb
FB
2225static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2226 notdirty_mem_writeb,
2227 notdirty_mem_writew,
2228 notdirty_mem_writel,
2229};
2230
6658ffb8
PB
2231#if defined(CONFIG_SOFTMMU)
2232/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2233 so these check for a hit then pass through to the normal out-of-line
2234 phys routines. */
2235static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2236{
2237 return ldub_phys(addr);
2238}
2239
2240static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2241{
2242 return lduw_phys(addr);
2243}
2244
2245static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2246{
2247 return ldl_phys(addr);
2248}
2249
2250/* Generate a debug exception if a watchpoint has been hit.
2251 Returns the real physical address of the access. addr will be a host
d79acba4 2252 address in case of a RAM location. */
6658ffb8
PB
2253static target_ulong check_watchpoint(target_phys_addr_t addr)
2254{
2255 CPUState *env = cpu_single_env;
2256 target_ulong watch;
2257 target_ulong retaddr;
2258 int i;
2259
2260 retaddr = addr;
2261 for (i = 0; i < env->nb_watchpoints; i++) {
2262 watch = env->watchpoint[i].vaddr;
2263 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2264 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2265 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2266 cpu_single_env->watchpoint_hit = i + 1;
2267 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2268 break;
2269 }
2270 }
2271 }
2272 return retaddr;
2273}
2274
2275static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2276 uint32_t val)
2277{
2278 addr = check_watchpoint(addr);
2279 stb_phys(addr, val);
2280}
2281
2282static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2283 uint32_t val)
2284{
2285 addr = check_watchpoint(addr);
2286 stw_phys(addr, val);
2287}
2288
2289static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2290 uint32_t val)
2291{
2292 addr = check_watchpoint(addr);
2293 stl_phys(addr, val);
2294}
2295
2296static CPUReadMemoryFunc *watch_mem_read[3] = {
2297 watch_mem_readb,
2298 watch_mem_readw,
2299 watch_mem_readl,
2300};
2301
2302static CPUWriteMemoryFunc *watch_mem_write[3] = {
2303 watch_mem_writeb,
2304 watch_mem_writew,
2305 watch_mem_writel,
2306};
2307#endif
2308
db7b5426
BS
2309static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2310 unsigned int len)
2311{
db7b5426
BS
2312 uint32_t ret;
2313 unsigned int idx;
2314
2315 idx = SUBPAGE_IDX(addr - mmio->base);
2316#if defined(DEBUG_SUBPAGE)
2317 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2318 mmio, len, addr, idx);
2319#endif
3ee89922 2320 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2321
2322 return ret;
2323}
2324
2325static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2326 uint32_t value, unsigned int len)
2327{
db7b5426
BS
2328 unsigned int idx;
2329
2330 idx = SUBPAGE_IDX(addr - mmio->base);
2331#if defined(DEBUG_SUBPAGE)
2332 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2333 mmio, len, addr, idx, value);
2334#endif
3ee89922 2335 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2336}
2337
2338static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2339{
2340#if defined(DEBUG_SUBPAGE)
2341 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2342#endif
2343
2344 return subpage_readlen(opaque, addr, 0);
2345}
2346
2347static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2348 uint32_t value)
2349{
2350#if defined(DEBUG_SUBPAGE)
2351 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2352#endif
2353 subpage_writelen(opaque, addr, value, 0);
2354}
2355
2356static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2357{
2358#if defined(DEBUG_SUBPAGE)
2359 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2360#endif
2361
2362 return subpage_readlen(opaque, addr, 1);
2363}
2364
2365static void subpage_writew (void *opaque, target_phys_addr_t addr,
2366 uint32_t value)
2367{
2368#if defined(DEBUG_SUBPAGE)
2369 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2370#endif
2371 subpage_writelen(opaque, addr, value, 1);
2372}
2373
2374static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2375{
2376#if defined(DEBUG_SUBPAGE)
2377 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2378#endif
2379
2380 return subpage_readlen(opaque, addr, 2);
2381}
2382
2383static void subpage_writel (void *opaque,
2384 target_phys_addr_t addr, uint32_t value)
2385{
2386#if defined(DEBUG_SUBPAGE)
2387 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2388#endif
2389 subpage_writelen(opaque, addr, value, 2);
2390}
2391
2392static CPUReadMemoryFunc *subpage_read[] = {
2393 &subpage_readb,
2394 &subpage_readw,
2395 &subpage_readl,
2396};
2397
2398static CPUWriteMemoryFunc *subpage_write[] = {
2399 &subpage_writeb,
2400 &subpage_writew,
2401 &subpage_writel,
2402};
2403
2404static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2405 int memory)
2406{
2407 int idx, eidx;
4254fab8 2408 unsigned int i;
db7b5426
BS
2409
2410 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2411 return -1;
2412 idx = SUBPAGE_IDX(start);
2413 eidx = SUBPAGE_IDX(end);
2414#if defined(DEBUG_SUBPAGE)
2415 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2416 mmio, start, end, idx, eidx, memory);
2417#endif
2418 memory >>= IO_MEM_SHIFT;
2419 for (; idx <= eidx; idx++) {
4254fab8 2420 for (i = 0; i < 4; i++) {
3ee89922
BS
2421 if (io_mem_read[memory][i]) {
2422 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2423 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2424 }
2425 if (io_mem_write[memory][i]) {
2426 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2427 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2428 }
4254fab8 2429 }
db7b5426
BS
2430 }
2431
2432 return 0;
2433}
2434
2435static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2436 int orig_memory)
2437{
2438 subpage_t *mmio;
2439 int subpage_memory;
2440
2441 mmio = qemu_mallocz(sizeof(subpage_t));
2442 if (mmio != NULL) {
2443 mmio->base = base;
2444 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2445#if defined(DEBUG_SUBPAGE)
2446 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2447 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2448#endif
2449 *phys = subpage_memory | IO_MEM_SUBPAGE;
2450 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2451 }
2452
2453 return mmio;
2454}
2455
33417e70
FB
2456static void io_mem_init(void)
2457{
3a7d929e 2458 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2459 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2460 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2461 io_mem_nb = 5;
2462
6658ffb8
PB
2463#if defined(CONFIG_SOFTMMU)
2464 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2465 watch_mem_write, NULL);
2466#endif
1ccde1cb 2467 /* alloc dirty bits array */
0a962c02 2468 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2469 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2470}
2471
2472/* mem_read and mem_write are arrays of functions containing the
2473 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2474 2). Functions can be omitted with a NULL function pointer. The
2475 registered functions may be modified dynamically later.
2476 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2477 modified. If it is zero, a new io zone is allocated. The return
2478 value can be used with cpu_register_physical_memory(). (-1) is
2479 returned if error. */
33417e70
FB
2480int cpu_register_io_memory(int io_index,
2481 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2482 CPUWriteMemoryFunc **mem_write,
2483 void *opaque)
33417e70 2484{
4254fab8 2485 int i, subwidth = 0;
33417e70
FB
2486
2487 if (io_index <= 0) {
b5ff1b31 2488 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2489 return -1;
2490 io_index = io_mem_nb++;
2491 } else {
2492 if (io_index >= IO_MEM_NB_ENTRIES)
2493 return -1;
2494 }
b5ff1b31 2495
33417e70 2496 for(i = 0;i < 3; i++) {
4254fab8
BS
2497 if (!mem_read[i] || !mem_write[i])
2498 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2499 io_mem_read[io_index][i] = mem_read[i];
2500 io_mem_write[io_index][i] = mem_write[i];
2501 }
a4193c8a 2502 io_mem_opaque[io_index] = opaque;
4254fab8 2503 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2504}
61382a50 2505
8926b517
FB
2506CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2507{
2508 return io_mem_write[io_index >> IO_MEM_SHIFT];
2509}
2510
2511CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2512{
2513 return io_mem_read[io_index >> IO_MEM_SHIFT];
2514}
2515
13eb76e0
FB
2516/* physical memory access (slow version, mainly for debug) */
2517#if defined(CONFIG_USER_ONLY)
5fafdf24 2518void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2519 int len, int is_write)
2520{
2521 int l, flags;
2522 target_ulong page;
53a5960a 2523 void * p;
13eb76e0
FB
2524
2525 while (len > 0) {
2526 page = addr & TARGET_PAGE_MASK;
2527 l = (page + TARGET_PAGE_SIZE) - addr;
2528 if (l > len)
2529 l = len;
2530 flags = page_get_flags(page);
2531 if (!(flags & PAGE_VALID))
2532 return;
2533 if (is_write) {
2534 if (!(flags & PAGE_WRITE))
2535 return;
579a97f7
FB
2536 /* XXX: this code should not depend on lock_user */
2537 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2538 /* FIXME - should this return an error rather than just fail? */
2539 return;
53a5960a
PB
2540 memcpy(p, buf, len);
2541 unlock_user(p, addr, len);
13eb76e0
FB
2542 } else {
2543 if (!(flags & PAGE_READ))
2544 return;
579a97f7
FB
2545 /* XXX: this code should not depend on lock_user */
2546 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2547 /* FIXME - should this return an error rather than just fail? */
2548 return;
53a5960a
PB
2549 memcpy(buf, p, len);
2550 unlock_user(p, addr, 0);
13eb76e0
FB
2551 }
2552 len -= l;
2553 buf += l;
2554 addr += l;
2555 }
2556}
8df1cd07 2557
13eb76e0 2558#else
5fafdf24 2559void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2560 int len, int is_write)
2561{
2562 int l, io_index;
2563 uint8_t *ptr;
2564 uint32_t val;
2e12669a
FB
2565 target_phys_addr_t page;
2566 unsigned long pd;
92e873b9 2567 PhysPageDesc *p;
3b46e624 2568
13eb76e0
FB
2569 while (len > 0) {
2570 page = addr & TARGET_PAGE_MASK;
2571 l = (page + TARGET_PAGE_SIZE) - addr;
2572 if (l > len)
2573 l = len;
92e873b9 2574 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2575 if (!p) {
2576 pd = IO_MEM_UNASSIGNED;
2577 } else {
2578 pd = p->phys_offset;
2579 }
3b46e624 2580
13eb76e0 2581 if (is_write) {
3a7d929e 2582 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2583 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2584 /* XXX: could force cpu_single_env to NULL to avoid
2585 potential bugs */
13eb76e0 2586 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2587 /* 32 bit write access */
c27004ec 2588 val = ldl_p(buf);
a4193c8a 2589 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2590 l = 4;
2591 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2592 /* 16 bit write access */
c27004ec 2593 val = lduw_p(buf);
a4193c8a 2594 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2595 l = 2;
2596 } else {
1c213d19 2597 /* 8 bit write access */
c27004ec 2598 val = ldub_p(buf);
a4193c8a 2599 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2600 l = 1;
2601 }
2602 } else {
b448f2f3
FB
2603 unsigned long addr1;
2604 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2605 /* RAM case */
b448f2f3 2606 ptr = phys_ram_base + addr1;
13eb76e0 2607 memcpy(ptr, buf, l);
3a7d929e
FB
2608 if (!cpu_physical_memory_is_dirty(addr1)) {
2609 /* invalidate code */
2610 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2611 /* set dirty bit */
5fafdf24 2612 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2613 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2614 }
13eb76e0
FB
2615 }
2616 } else {
5fafdf24 2617 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2618 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2619 /* I/O case */
2620 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2621 if (l >= 4 && ((addr & 3) == 0)) {
2622 /* 32 bit read access */
a4193c8a 2623 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2624 stl_p(buf, val);
13eb76e0
FB
2625 l = 4;
2626 } else if (l >= 2 && ((addr & 1) == 0)) {
2627 /* 16 bit read access */
a4193c8a 2628 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2629 stw_p(buf, val);
13eb76e0
FB
2630 l = 2;
2631 } else {
1c213d19 2632 /* 8 bit read access */
a4193c8a 2633 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2634 stb_p(buf, val);
13eb76e0
FB
2635 l = 1;
2636 }
2637 } else {
2638 /* RAM case */
5fafdf24 2639 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2640 (addr & ~TARGET_PAGE_MASK);
2641 memcpy(buf, ptr, l);
2642 }
2643 }
2644 len -= l;
2645 buf += l;
2646 addr += l;
2647 }
2648}
8df1cd07 2649
d0ecd2aa 2650/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2651void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2652 const uint8_t *buf, int len)
2653{
2654 int l;
2655 uint8_t *ptr;
2656 target_phys_addr_t page;
2657 unsigned long pd;
2658 PhysPageDesc *p;
3b46e624 2659
d0ecd2aa
FB
2660 while (len > 0) {
2661 page = addr & TARGET_PAGE_MASK;
2662 l = (page + TARGET_PAGE_SIZE) - addr;
2663 if (l > len)
2664 l = len;
2665 p = phys_page_find(page >> TARGET_PAGE_BITS);
2666 if (!p) {
2667 pd = IO_MEM_UNASSIGNED;
2668 } else {
2669 pd = p->phys_offset;
2670 }
3b46e624 2671
d0ecd2aa 2672 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2673 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2674 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2675 /* do nothing */
2676 } else {
2677 unsigned long addr1;
2678 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2679 /* ROM/RAM case */
2680 ptr = phys_ram_base + addr1;
2681 memcpy(ptr, buf, l);
2682 }
2683 len -= l;
2684 buf += l;
2685 addr += l;
2686 }
2687}
2688
2689
8df1cd07
FB
2690/* warning: addr must be aligned */
2691uint32_t ldl_phys(target_phys_addr_t addr)
2692{
2693 int io_index;
2694 uint8_t *ptr;
2695 uint32_t val;
2696 unsigned long pd;
2697 PhysPageDesc *p;
2698
2699 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2700 if (!p) {
2701 pd = IO_MEM_UNASSIGNED;
2702 } else {
2703 pd = p->phys_offset;
2704 }
3b46e624 2705
5fafdf24 2706 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2707 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2708 /* I/O case */
2709 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2710 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2711 } else {
2712 /* RAM case */
5fafdf24 2713 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2714 (addr & ~TARGET_PAGE_MASK);
2715 val = ldl_p(ptr);
2716 }
2717 return val;
2718}
2719
84b7b8e7
FB
2720/* warning: addr must be aligned */
2721uint64_t ldq_phys(target_phys_addr_t addr)
2722{
2723 int io_index;
2724 uint8_t *ptr;
2725 uint64_t val;
2726 unsigned long pd;
2727 PhysPageDesc *p;
2728
2729 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2730 if (!p) {
2731 pd = IO_MEM_UNASSIGNED;
2732 } else {
2733 pd = p->phys_offset;
2734 }
3b46e624 2735
2a4188a3
FB
2736 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2737 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2738 /* I/O case */
2739 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2740#ifdef TARGET_WORDS_BIGENDIAN
2741 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2742 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2743#else
2744 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2745 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2746#endif
2747 } else {
2748 /* RAM case */
5fafdf24 2749 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2750 (addr & ~TARGET_PAGE_MASK);
2751 val = ldq_p(ptr);
2752 }
2753 return val;
2754}
2755
aab33094
FB
2756/* XXX: optimize */
2757uint32_t ldub_phys(target_phys_addr_t addr)
2758{
2759 uint8_t val;
2760 cpu_physical_memory_read(addr, &val, 1);
2761 return val;
2762}
2763
2764/* XXX: optimize */
2765uint32_t lduw_phys(target_phys_addr_t addr)
2766{
2767 uint16_t val;
2768 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2769 return tswap16(val);
2770}
2771
8df1cd07
FB
2772/* warning: addr must be aligned. The ram page is not masked as dirty
2773 and the code inside is not invalidated. It is useful if the dirty
2774 bits are used to track modified PTEs */
2775void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2776{
2777 int io_index;
2778 uint8_t *ptr;
2779 unsigned long pd;
2780 PhysPageDesc *p;
2781
2782 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2783 if (!p) {
2784 pd = IO_MEM_UNASSIGNED;
2785 } else {
2786 pd = p->phys_offset;
2787 }
3b46e624 2788
3a7d929e 2789 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2790 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2791 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2792 } else {
5fafdf24 2793 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2794 (addr & ~TARGET_PAGE_MASK);
2795 stl_p(ptr, val);
2796 }
2797}
2798
bc98a7ef
JM
2799void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2800{
2801 int io_index;
2802 uint8_t *ptr;
2803 unsigned long pd;
2804 PhysPageDesc *p;
2805
2806 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2807 if (!p) {
2808 pd = IO_MEM_UNASSIGNED;
2809 } else {
2810 pd = p->phys_offset;
2811 }
3b46e624 2812
bc98a7ef
JM
2813 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2814 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2815#ifdef TARGET_WORDS_BIGENDIAN
2816 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2817 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2818#else
2819 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2820 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2821#endif
2822 } else {
5fafdf24 2823 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2824 (addr & ~TARGET_PAGE_MASK);
2825 stq_p(ptr, val);
2826 }
2827}
2828
8df1cd07 2829/* warning: addr must be aligned */
8df1cd07
FB
2830void stl_phys(target_phys_addr_t addr, uint32_t val)
2831{
2832 int io_index;
2833 uint8_t *ptr;
2834 unsigned long pd;
2835 PhysPageDesc *p;
2836
2837 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2838 if (!p) {
2839 pd = IO_MEM_UNASSIGNED;
2840 } else {
2841 pd = p->phys_offset;
2842 }
3b46e624 2843
3a7d929e 2844 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2845 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2846 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2847 } else {
2848 unsigned long addr1;
2849 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2850 /* RAM case */
2851 ptr = phys_ram_base + addr1;
2852 stl_p(ptr, val);
3a7d929e
FB
2853 if (!cpu_physical_memory_is_dirty(addr1)) {
2854 /* invalidate code */
2855 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2856 /* set dirty bit */
f23db169
FB
2857 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2858 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2859 }
8df1cd07
FB
2860 }
2861}
2862
aab33094
FB
2863/* XXX: optimize */
2864void stb_phys(target_phys_addr_t addr, uint32_t val)
2865{
2866 uint8_t v = val;
2867 cpu_physical_memory_write(addr, &v, 1);
2868}
2869
2870/* XXX: optimize */
2871void stw_phys(target_phys_addr_t addr, uint32_t val)
2872{
2873 uint16_t v = tswap16(val);
2874 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2875}
2876
2877/* XXX: optimize */
2878void stq_phys(target_phys_addr_t addr, uint64_t val)
2879{
2880 val = tswap64(val);
2881 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2882}
2883
13eb76e0
FB
2884#endif
2885
2886/* virtual memory access for debug */
5fafdf24 2887int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 2888 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2889{
2890 int l;
9b3c35e0
JM
2891 target_phys_addr_t phys_addr;
2892 target_ulong page;
13eb76e0
FB
2893
2894 while (len > 0) {
2895 page = addr & TARGET_PAGE_MASK;
2896 phys_addr = cpu_get_phys_page_debug(env, page);
2897 /* if no physical page mapped, return an error */
2898 if (phys_addr == -1)
2899 return -1;
2900 l = (page + TARGET_PAGE_SIZE) - addr;
2901 if (l > len)
2902 l = len;
5fafdf24 2903 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 2904 buf, l, is_write);
13eb76e0
FB
2905 len -= l;
2906 buf += l;
2907 addr += l;
2908 }
2909 return 0;
2910}
2911
e3db7226
FB
2912void dump_exec_info(FILE *f,
2913 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2914{
2915 int i, target_code_size, max_target_code_size;
2916 int direct_jmp_count, direct_jmp2_count, cross_page;
2917 TranslationBlock *tb;
3b46e624 2918
e3db7226
FB
2919 target_code_size = 0;
2920 max_target_code_size = 0;
2921 cross_page = 0;
2922 direct_jmp_count = 0;
2923 direct_jmp2_count = 0;
2924 for(i = 0; i < nb_tbs; i++) {
2925 tb = &tbs[i];
2926 target_code_size += tb->size;
2927 if (tb->size > max_target_code_size)
2928 max_target_code_size = tb->size;
2929 if (tb->page_addr[1] != -1)
2930 cross_page++;
2931 if (tb->tb_next_offset[0] != 0xffff) {
2932 direct_jmp_count++;
2933 if (tb->tb_next_offset[1] != 0xffff) {
2934 direct_jmp2_count++;
2935 }
2936 }
2937 }
2938 /* XXX: avoid using doubles ? */
57fec1fe 2939 cpu_fprintf(f, "Translation buffer state:\n");
e3db7226 2940 cpu_fprintf(f, "TB count %d\n", nb_tbs);
5fafdf24 2941 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
2942 nb_tbs ? target_code_size / nb_tbs : 0,
2943 max_target_code_size);
5fafdf24 2944 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
2945 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2946 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
2947 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2948 cross_page,
e3db7226
FB
2949 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2950 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 2951 direct_jmp_count,
e3db7226
FB
2952 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2953 direct_jmp2_count,
2954 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 2955 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
2956 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2957 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2958 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
57fec1fe
FB
2959#ifdef CONFIG_PROFILER
2960 {
2961 int64_t tot;
2962 tot = dyngen_interm_time + dyngen_code_time;
2963 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2964 tot, tot / 2.4e9);
2965 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2966 dyngen_tb_count,
2967 dyngen_tb_count1 - dyngen_tb_count,
2968 dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
2969 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2970 dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
2971 cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
2972 dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
2973 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2974 dyngen_tb_count ?
2975 (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
2976 cpu_fprintf(f, "cycles/op %0.1f\n",
2977 dyngen_op_count ? (double)tot / dyngen_op_count : 0);
2978 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2979 dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
2980 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2981 dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
2982 if (tot == 0)
2983 tot = 1;
2984 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2985 (double)dyngen_interm_time / tot * 100.0);
2986 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2987 (double)dyngen_code_time / tot * 100.0);
2988 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2989 dyngen_restore_count);
2990 cpu_fprintf(f, " avg cycles %0.1f\n",
2991 dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
2992 {
2993 extern void dump_op_count(void);
2994 dump_op_count();
2995 }
2996 }
2997#endif
e3db7226
FB
2998}
2999
5fafdf24 3000#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3001
3002#define MMUSUFFIX _cmmu
3003#define GETPC() NULL
3004#define env cpu_single_env
b769d8fe 3005#define SOFTMMU_CODE_ACCESS
61382a50
FB
3006
3007#define SHIFT 0
3008#include "softmmu_template.h"
3009
3010#define SHIFT 1
3011#include "softmmu_template.h"
3012
3013#define SHIFT 2
3014#include "softmmu_template.h"
3015
3016#define SHIFT 3
3017#include "softmmu_template.h"
3018
3019#undef env
3020
3021#endif