]> git.proxmox.com Git - qemu.git/blame - exec.c
ARM TCG conversion 16/16.
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
40#endif
54936004 41
fd6ce8f6 42//#define DEBUG_TB_INVALIDATE
66e85a21 43//#define DEBUG_FLUSH
9fa3e853 44//#define DEBUG_TLB
67d3b957 45//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
46
47/* make various TB consistency checks */
5fafdf24
TS
48//#define DEBUG_TB_CHECK
49//#define DEBUG_TLB_CHECK
fd6ce8f6 50
1196be37 51//#define DEBUG_IOPORT
db7b5426 52//#define DEBUG_SUBPAGE
1196be37 53
99773bd4
PB
54#if !defined(CONFIG_USER_ONLY)
55/* TB consistency checks only implemented for usermode emulation. */
56#undef DEBUG_TB_CHECK
57#endif
58
fd6ce8f6 59/* threshold to flush the translated code buffer */
d07bde88 60#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
fd6ce8f6 61
9fa3e853
FB
62#define SMC_BITMAP_USE_THRESHOLD 10
63
64#define MMAP_AREA_START 0x00000000
65#define MMAP_AREA_END 0xa8000000
fd6ce8f6 66
108c49b8
FB
67#if defined(TARGET_SPARC64)
68#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
69#elif defined(TARGET_SPARC)
70#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
71#elif defined(TARGET_ALPHA)
72#define TARGET_PHYS_ADDR_SPACE_BITS 42
73#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
74#elif defined(TARGET_PPC64)
75#define TARGET_PHYS_ADDR_SPACE_BITS 42
76#else
77/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
78#define TARGET_PHYS_ADDR_SPACE_BITS 32
79#endif
80
fd6ce8f6 81TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 82TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 83int nb_tbs;
eb51d102
FB
84/* any access to the tbs or the page table must use this lock */
85spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 86
b8076a74 87uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
88uint8_t *code_gen_ptr;
89
9fa3e853
FB
90int phys_ram_size;
91int phys_ram_fd;
92uint8_t *phys_ram_base;
1ccde1cb 93uint8_t *phys_ram_dirty;
e9a1ab19 94static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 95
6a00d601
FB
96CPUState *first_cpu;
97/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
5fafdf24 99CPUState *cpu_single_env;
6a00d601 100
54936004 101typedef struct PageDesc {
92e873b9 102 /* list of TBs intersecting this ram page */
fd6ce8f6 103 TranslationBlock *first_tb;
9fa3e853
FB
104 /* in order to optimize self modifying code, we count the number
105 of lookups we do to a given page to use a bitmap */
106 unsigned int code_write_count;
107 uint8_t *code_bitmap;
108#if defined(CONFIG_USER_ONLY)
109 unsigned long flags;
110#endif
54936004
FB
111} PageDesc;
112
92e873b9
FB
113typedef struct PhysPageDesc {
114 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 115 uint32_t phys_offset;
92e873b9
FB
116} PhysPageDesc;
117
54936004 118#define L2_BITS 10
bedb69ea
JM
119#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
120/* XXX: this is a temporary hack for alpha target.
121 * In the future, this is to be replaced by a multi-level table
122 * to actually be able to handle the complete 64 bits address space.
123 */
124#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
125#else
54936004 126#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 127#endif
54936004
FB
128
129#define L1_SIZE (1 << L1_BITS)
130#define L2_SIZE (1 << L2_BITS)
131
33417e70 132static void io_mem_init(void);
fd6ce8f6 133
83fb7adf
FB
134unsigned long qemu_real_host_page_size;
135unsigned long qemu_host_page_bits;
136unsigned long qemu_host_page_size;
137unsigned long qemu_host_page_mask;
54936004 138
92e873b9 139/* XXX: for system emulation, it could just be an array */
54936004 140static PageDesc *l1_map[L1_SIZE];
0a962c02 141PhysPageDesc **l1_phys_map;
54936004 142
33417e70 143/* io memory support */
33417e70
FB
144CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
145CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 146void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 147static int io_mem_nb;
6658ffb8
PB
148#if defined(CONFIG_SOFTMMU)
149static int io_mem_watch;
150#endif
33417e70 151
34865134
FB
152/* log support */
153char *logfilename = "/tmp/qemu.log";
154FILE *logfile;
155int loglevel;
e735b91c 156static int log_append = 0;
34865134 157
e3db7226
FB
158/* statistics */
159static int tlb_flush_count;
160static int tb_flush_count;
161static int tb_phys_invalidate_count;
162
db7b5426
BS
163#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
164typedef struct subpage_t {
165 target_phys_addr_t base;
3ee89922
BS
166 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
167 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
168 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
169} subpage_t;
170
b346ff46 171static void page_init(void)
54936004 172{
83fb7adf 173 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 174 TARGET_PAGE_SIZE */
67b915a5 175#ifdef _WIN32
d5a8f07c
FB
176 {
177 SYSTEM_INFO system_info;
178 DWORD old_protect;
3b46e624 179
d5a8f07c
FB
180 GetSystemInfo(&system_info);
181 qemu_real_host_page_size = system_info.dwPageSize;
3b46e624 182
d5a8f07c
FB
183 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
184 PAGE_EXECUTE_READWRITE, &old_protect);
185 }
67b915a5 186#else
83fb7adf 187 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
188 {
189 unsigned long start, end;
190
191 start = (unsigned long)code_gen_buffer;
192 start &= ~(qemu_real_host_page_size - 1);
3b46e624 193
d5a8f07c
FB
194 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
195 end += qemu_real_host_page_size - 1;
196 end &= ~(qemu_real_host_page_size - 1);
3b46e624 197
5fafdf24 198 mprotect((void *)start, end - start,
d5a8f07c
FB
199 PROT_READ | PROT_WRITE | PROT_EXEC);
200 }
67b915a5 201#endif
d5a8f07c 202
83fb7adf
FB
203 if (qemu_host_page_size == 0)
204 qemu_host_page_size = qemu_real_host_page_size;
205 if (qemu_host_page_size < TARGET_PAGE_SIZE)
206 qemu_host_page_size = TARGET_PAGE_SIZE;
207 qemu_host_page_bits = 0;
208 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
209 qemu_host_page_bits++;
210 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
211 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
212 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
213
214#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
215 {
216 long long startaddr, endaddr;
217 FILE *f;
218 int n;
219
220 f = fopen("/proc/self/maps", "r");
221 if (f) {
222 do {
223 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
224 if (n == 2) {
225 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
226 TARGET_PAGE_ALIGN(endaddr),
227 PAGE_RESERVED);
228 }
229 } while (!feof(f));
230 fclose(f);
231 }
232 }
233#endif
54936004
FB
234}
235
fd6ce8f6 236static inline PageDesc *page_find_alloc(unsigned int index)
54936004 237{
54936004
FB
238 PageDesc **lp, *p;
239
54936004
FB
240 lp = &l1_map[index >> L2_BITS];
241 p = *lp;
242 if (!p) {
243 /* allocate if not found */
59817ccb 244 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 245 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
246 *lp = p;
247 }
248 return p + (index & (L2_SIZE - 1));
249}
250
fd6ce8f6 251static inline PageDesc *page_find(unsigned int index)
54936004 252{
54936004
FB
253 PageDesc *p;
254
54936004
FB
255 p = l1_map[index >> L2_BITS];
256 if (!p)
257 return 0;
fd6ce8f6
FB
258 return p + (index & (L2_SIZE - 1));
259}
260
108c49b8 261static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 262{
108c49b8 263 void **lp, **p;
e3f4e2a4 264 PhysPageDesc *pd;
92e873b9 265
108c49b8
FB
266 p = (void **)l1_phys_map;
267#if TARGET_PHYS_ADDR_SPACE_BITS > 32
268
269#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
270#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
271#endif
272 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
273 p = *lp;
274 if (!p) {
275 /* allocate if not found */
108c49b8
FB
276 if (!alloc)
277 return NULL;
278 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
279 memset(p, 0, sizeof(void *) * L1_SIZE);
280 *lp = p;
281 }
282#endif
283 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
284 pd = *lp;
285 if (!pd) {
286 int i;
108c49b8
FB
287 /* allocate if not found */
288 if (!alloc)
289 return NULL;
e3f4e2a4
PB
290 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
291 *lp = pd;
292 for (i = 0; i < L2_SIZE; i++)
293 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 294 }
e3f4e2a4 295 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
296}
297
108c49b8 298static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 299{
108c49b8 300 return phys_page_find_alloc(index, 0);
92e873b9
FB
301}
302
9fa3e853 303#if !defined(CONFIG_USER_ONLY)
6a00d601 304static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 305static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 306 target_ulong vaddr);
9fa3e853 307#endif
fd6ce8f6 308
6a00d601 309void cpu_exec_init(CPUState *env)
fd6ce8f6 310{
6a00d601
FB
311 CPUState **penv;
312 int cpu_index;
313
fd6ce8f6 314 if (!code_gen_ptr) {
57fec1fe 315 cpu_gen_init();
fd6ce8f6 316 code_gen_ptr = code_gen_buffer;
b346ff46 317 page_init();
33417e70 318 io_mem_init();
fd6ce8f6 319 }
6a00d601
FB
320 env->next_cpu = NULL;
321 penv = &first_cpu;
322 cpu_index = 0;
323 while (*penv != NULL) {
324 penv = (CPUState **)&(*penv)->next_cpu;
325 cpu_index++;
326 }
327 env->cpu_index = cpu_index;
6658ffb8 328 env->nb_watchpoints = 0;
6a00d601 329 *penv = env;
fd6ce8f6
FB
330}
331
9fa3e853
FB
332static inline void invalidate_page_bitmap(PageDesc *p)
333{
334 if (p->code_bitmap) {
59817ccb 335 qemu_free(p->code_bitmap);
9fa3e853
FB
336 p->code_bitmap = NULL;
337 }
338 p->code_write_count = 0;
339}
340
fd6ce8f6
FB
341/* set to NULL all the 'first_tb' fields in all PageDescs */
342static void page_flush_tb(void)
343{
344 int i, j;
345 PageDesc *p;
346
347 for(i = 0; i < L1_SIZE; i++) {
348 p = l1_map[i];
349 if (p) {
9fa3e853
FB
350 for(j = 0; j < L2_SIZE; j++) {
351 p->first_tb = NULL;
352 invalidate_page_bitmap(p);
353 p++;
354 }
fd6ce8f6
FB
355 }
356 }
357}
358
359/* flush all the translation blocks */
d4e8164f 360/* XXX: tb_flush is currently not thread safe */
6a00d601 361void tb_flush(CPUState *env1)
fd6ce8f6 362{
6a00d601 363 CPUState *env;
0124311e 364#if defined(DEBUG_FLUSH)
ab3d1727
BS
365 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
366 (unsigned long)(code_gen_ptr - code_gen_buffer),
367 nb_tbs, nb_tbs > 0 ?
368 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6
FB
369#endif
370 nb_tbs = 0;
3b46e624 371
6a00d601
FB
372 for(env = first_cpu; env != NULL; env = env->next_cpu) {
373 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
374 }
9fa3e853 375
8a8a608f 376 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 377 page_flush_tb();
9fa3e853 378
fd6ce8f6 379 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
380 /* XXX: flush processor icache at this point if cache flush is
381 expensive */
e3db7226 382 tb_flush_count++;
fd6ce8f6
FB
383}
384
385#ifdef DEBUG_TB_CHECK
386
bc98a7ef 387static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
388{
389 TranslationBlock *tb;
390 int i;
391 address &= TARGET_PAGE_MASK;
99773bd4
PB
392 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
393 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
394 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
395 address >= tb->pc + tb->size)) {
396 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 397 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
398 }
399 }
400 }
401}
402
403/* verify that all the pages have correct rights for code */
404static void tb_page_check(void)
405{
406 TranslationBlock *tb;
407 int i, flags1, flags2;
3b46e624 408
99773bd4
PB
409 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
410 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
411 flags1 = page_get_flags(tb->pc);
412 flags2 = page_get_flags(tb->pc + tb->size - 1);
413 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
414 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 415 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
416 }
417 }
418 }
419}
420
d4e8164f
FB
421void tb_jmp_check(TranslationBlock *tb)
422{
423 TranslationBlock *tb1;
424 unsigned int n1;
425
426 /* suppress any remaining jumps to this TB */
427 tb1 = tb->jmp_first;
428 for(;;) {
429 n1 = (long)tb1 & 3;
430 tb1 = (TranslationBlock *)((long)tb1 & ~3);
431 if (n1 == 2)
432 break;
433 tb1 = tb1->jmp_next[n1];
434 }
435 /* check end of list */
436 if (tb1 != tb) {
437 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
438 }
439}
440
fd6ce8f6
FB
441#endif
442
443/* invalidate one TB */
444static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
445 int next_offset)
446{
447 TranslationBlock *tb1;
448 for(;;) {
449 tb1 = *ptb;
450 if (tb1 == tb) {
451 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
452 break;
453 }
454 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
455 }
456}
457
9fa3e853
FB
458static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
459{
460 TranslationBlock *tb1;
461 unsigned int n1;
462
463 for(;;) {
464 tb1 = *ptb;
465 n1 = (long)tb1 & 3;
466 tb1 = (TranslationBlock *)((long)tb1 & ~3);
467 if (tb1 == tb) {
468 *ptb = tb1->page_next[n1];
469 break;
470 }
471 ptb = &tb1->page_next[n1];
472 }
473}
474
d4e8164f
FB
475static inline void tb_jmp_remove(TranslationBlock *tb, int n)
476{
477 TranslationBlock *tb1, **ptb;
478 unsigned int n1;
479
480 ptb = &tb->jmp_next[n];
481 tb1 = *ptb;
482 if (tb1) {
483 /* find tb(n) in circular list */
484 for(;;) {
485 tb1 = *ptb;
486 n1 = (long)tb1 & 3;
487 tb1 = (TranslationBlock *)((long)tb1 & ~3);
488 if (n1 == n && tb1 == tb)
489 break;
490 if (n1 == 2) {
491 ptb = &tb1->jmp_first;
492 } else {
493 ptb = &tb1->jmp_next[n1];
494 }
495 }
496 /* now we can suppress tb(n) from the list */
497 *ptb = tb->jmp_next[n];
498
499 tb->jmp_next[n] = NULL;
500 }
501}
502
503/* reset the jump entry 'n' of a TB so that it is not chained to
504 another TB */
505static inline void tb_reset_jump(TranslationBlock *tb, int n)
506{
507 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
508}
509
8a40a180 510static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 511{
6a00d601 512 CPUState *env;
8a40a180 513 PageDesc *p;
d4e8164f 514 unsigned int h, n1;
8a40a180
FB
515 target_ulong phys_pc;
516 TranslationBlock *tb1, *tb2;
3b46e624 517
8a40a180
FB
518 /* remove the TB from the hash list */
519 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
520 h = tb_phys_hash_func(phys_pc);
5fafdf24 521 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
522 offsetof(TranslationBlock, phys_hash_next));
523
524 /* remove the TB from the page list */
525 if (tb->page_addr[0] != page_addr) {
526 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
527 tb_page_remove(&p->first_tb, tb);
528 invalidate_page_bitmap(p);
529 }
530 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
531 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
532 tb_page_remove(&p->first_tb, tb);
533 invalidate_page_bitmap(p);
534 }
535
36bdbe54 536 tb_invalidated_flag = 1;
59817ccb 537
fd6ce8f6 538 /* remove the TB from the hash list */
8a40a180 539 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
540 for(env = first_cpu; env != NULL; env = env->next_cpu) {
541 if (env->tb_jmp_cache[h] == tb)
542 env->tb_jmp_cache[h] = NULL;
543 }
d4e8164f
FB
544
545 /* suppress this TB from the two jump lists */
546 tb_jmp_remove(tb, 0);
547 tb_jmp_remove(tb, 1);
548
549 /* suppress any remaining jumps to this TB */
550 tb1 = tb->jmp_first;
551 for(;;) {
552 n1 = (long)tb1 & 3;
553 if (n1 == 2)
554 break;
555 tb1 = (TranslationBlock *)((long)tb1 & ~3);
556 tb2 = tb1->jmp_next[n1];
557 tb_reset_jump(tb1, n1);
558 tb1->jmp_next[n1] = NULL;
559 tb1 = tb2;
560 }
561 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 562
e3db7226 563 tb_phys_invalidate_count++;
9fa3e853
FB
564}
565
566static inline void set_bits(uint8_t *tab, int start, int len)
567{
568 int end, mask, end1;
569
570 end = start + len;
571 tab += start >> 3;
572 mask = 0xff << (start & 7);
573 if ((start & ~7) == (end & ~7)) {
574 if (start < end) {
575 mask &= ~(0xff << (end & 7));
576 *tab |= mask;
577 }
578 } else {
579 *tab++ |= mask;
580 start = (start + 8) & ~7;
581 end1 = end & ~7;
582 while (start < end1) {
583 *tab++ = 0xff;
584 start += 8;
585 }
586 if (start < end) {
587 mask = ~(0xff << (end & 7));
588 *tab |= mask;
589 }
590 }
591}
592
593static void build_page_bitmap(PageDesc *p)
594{
595 int n, tb_start, tb_end;
596 TranslationBlock *tb;
3b46e624 597
59817ccb 598 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
599 if (!p->code_bitmap)
600 return;
601 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
602
603 tb = p->first_tb;
604 while (tb != NULL) {
605 n = (long)tb & 3;
606 tb = (TranslationBlock *)((long)tb & ~3);
607 /* NOTE: this is subtle as a TB may span two physical pages */
608 if (n == 0) {
609 /* NOTE: tb_end may be after the end of the page, but
610 it is not a problem */
611 tb_start = tb->pc & ~TARGET_PAGE_MASK;
612 tb_end = tb_start + tb->size;
613 if (tb_end > TARGET_PAGE_SIZE)
614 tb_end = TARGET_PAGE_SIZE;
615 } else {
616 tb_start = 0;
617 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
618 }
619 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
620 tb = tb->page_next[n];
621 }
622}
623
d720b93d
FB
624#ifdef TARGET_HAS_PRECISE_SMC
625
5fafdf24 626static void tb_gen_code(CPUState *env,
d720b93d
FB
627 target_ulong pc, target_ulong cs_base, int flags,
628 int cflags)
629{
630 TranslationBlock *tb;
631 uint8_t *tc_ptr;
632 target_ulong phys_pc, phys_page2, virt_page2;
633 int code_gen_size;
634
c27004ec
FB
635 phys_pc = get_phys_addr_code(env, pc);
636 tb = tb_alloc(pc);
d720b93d
FB
637 if (!tb) {
638 /* flush must be done */
639 tb_flush(env);
640 /* cannot fail at this point */
c27004ec 641 tb = tb_alloc(pc);
d720b93d
FB
642 }
643 tc_ptr = code_gen_ptr;
644 tb->tc_ptr = tc_ptr;
645 tb->cs_base = cs_base;
646 tb->flags = flags;
647 tb->cflags = cflags;
d07bde88 648 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 649 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 650
d720b93d 651 /* check next page if needed */
c27004ec 652 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 653 phys_page2 = -1;
c27004ec 654 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
655 phys_page2 = get_phys_addr_code(env, virt_page2);
656 }
657 tb_link_phys(tb, phys_pc, phys_page2);
658}
659#endif
3b46e624 660
9fa3e853
FB
661/* invalidate all TBs which intersect with the target physical page
662 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
663 the same physical page. 'is_cpu_write_access' should be true if called
664 from a real cpu write access: the virtual CPU will exit the current
665 TB if code is modified inside this TB. */
5fafdf24 666void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
d720b93d
FB
667 int is_cpu_write_access)
668{
669 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 670 CPUState *env = cpu_single_env;
9fa3e853 671 PageDesc *p;
ea1c1802 672 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 673 target_ulong tb_start, tb_end;
d720b93d 674 target_ulong current_pc, current_cs_base;
9fa3e853
FB
675
676 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 677 if (!p)
9fa3e853 678 return;
5fafdf24 679 if (!p->code_bitmap &&
d720b93d
FB
680 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
681 is_cpu_write_access) {
9fa3e853
FB
682 /* build code bitmap */
683 build_page_bitmap(p);
684 }
685
686 /* we remove all the TBs in the range [start, end[ */
687 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
688 current_tb_not_found = is_cpu_write_access;
689 current_tb_modified = 0;
690 current_tb = NULL; /* avoid warning */
691 current_pc = 0; /* avoid warning */
692 current_cs_base = 0; /* avoid warning */
693 current_flags = 0; /* avoid warning */
9fa3e853
FB
694 tb = p->first_tb;
695 while (tb != NULL) {
696 n = (long)tb & 3;
697 tb = (TranslationBlock *)((long)tb & ~3);
698 tb_next = tb->page_next[n];
699 /* NOTE: this is subtle as a TB may span two physical pages */
700 if (n == 0) {
701 /* NOTE: tb_end may be after the end of the page, but
702 it is not a problem */
703 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
704 tb_end = tb_start + tb->size;
705 } else {
706 tb_start = tb->page_addr[1];
707 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
708 }
709 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
710#ifdef TARGET_HAS_PRECISE_SMC
711 if (current_tb_not_found) {
712 current_tb_not_found = 0;
713 current_tb = NULL;
714 if (env->mem_write_pc) {
715 /* now we have a real cpu fault */
716 current_tb = tb_find_pc(env->mem_write_pc);
717 }
718 }
719 if (current_tb == tb &&
720 !(current_tb->cflags & CF_SINGLE_INSN)) {
721 /* If we are modifying the current TB, we must stop
722 its execution. We could be more precise by checking
723 that the modification is after the current PC, but it
724 would require a specialized function to partially
725 restore the CPU state */
3b46e624 726
d720b93d 727 current_tb_modified = 1;
5fafdf24 728 cpu_restore_state(current_tb, env,
d720b93d
FB
729 env->mem_write_pc, NULL);
730#if defined(TARGET_I386)
731 current_flags = env->hflags;
732 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
733 current_cs_base = (target_ulong)env->segs[R_CS].base;
734 current_pc = current_cs_base + env->eip;
735#else
736#error unsupported CPU
737#endif
738 }
739#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
740 /* we need to do that to handle the case where a signal
741 occurs while doing tb_phys_invalidate() */
742 saved_tb = NULL;
743 if (env) {
744 saved_tb = env->current_tb;
745 env->current_tb = NULL;
746 }
9fa3e853 747 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
748 if (env) {
749 env->current_tb = saved_tb;
750 if (env->interrupt_request && env->current_tb)
751 cpu_interrupt(env, env->interrupt_request);
752 }
9fa3e853
FB
753 }
754 tb = tb_next;
755 }
756#if !defined(CONFIG_USER_ONLY)
757 /* if no code remaining, no need to continue to use slow writes */
758 if (!p->first_tb) {
759 invalidate_page_bitmap(p);
d720b93d
FB
760 if (is_cpu_write_access) {
761 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
762 }
763 }
764#endif
765#ifdef TARGET_HAS_PRECISE_SMC
766 if (current_tb_modified) {
767 /* we generate a block containing just the instruction
768 modifying the memory. It will ensure that it cannot modify
769 itself */
ea1c1802 770 env->current_tb = NULL;
5fafdf24 771 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
772 CF_SINGLE_INSN);
773 cpu_resume_from_signal(env, NULL);
9fa3e853 774 }
fd6ce8f6 775#endif
9fa3e853 776}
fd6ce8f6 777
9fa3e853 778/* len must be <= 8 and start must be a multiple of len */
d720b93d 779static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
780{
781 PageDesc *p;
782 int offset, b;
59817ccb 783#if 0
a4193c8a
FB
784 if (1) {
785 if (loglevel) {
5fafdf24
TS
786 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
787 cpu_single_env->mem_write_vaddr, len,
788 cpu_single_env->eip,
a4193c8a
FB
789 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
790 }
59817ccb
FB
791 }
792#endif
9fa3e853 793 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 794 if (!p)
9fa3e853
FB
795 return;
796 if (p->code_bitmap) {
797 offset = start & ~TARGET_PAGE_MASK;
798 b = p->code_bitmap[offset >> 3] >> (offset & 7);
799 if (b & ((1 << len) - 1))
800 goto do_invalidate;
801 } else {
802 do_invalidate:
d720b93d 803 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
804 }
805}
806
9fa3e853 807#if !defined(CONFIG_SOFTMMU)
5fafdf24 808static void tb_invalidate_phys_page(target_ulong addr,
d720b93d 809 unsigned long pc, void *puc)
9fa3e853 810{
d720b93d
FB
811 int n, current_flags, current_tb_modified;
812 target_ulong current_pc, current_cs_base;
9fa3e853 813 PageDesc *p;
d720b93d
FB
814 TranslationBlock *tb, *current_tb;
815#ifdef TARGET_HAS_PRECISE_SMC
816 CPUState *env = cpu_single_env;
817#endif
9fa3e853
FB
818
819 addr &= TARGET_PAGE_MASK;
820 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 821 if (!p)
9fa3e853
FB
822 return;
823 tb = p->first_tb;
d720b93d
FB
824 current_tb_modified = 0;
825 current_tb = NULL;
826 current_pc = 0; /* avoid warning */
827 current_cs_base = 0; /* avoid warning */
828 current_flags = 0; /* avoid warning */
829#ifdef TARGET_HAS_PRECISE_SMC
830 if (tb && pc != 0) {
831 current_tb = tb_find_pc(pc);
832 }
833#endif
9fa3e853
FB
834 while (tb != NULL) {
835 n = (long)tb & 3;
836 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
837#ifdef TARGET_HAS_PRECISE_SMC
838 if (current_tb == tb &&
839 !(current_tb->cflags & CF_SINGLE_INSN)) {
840 /* If we are modifying the current TB, we must stop
841 its execution. We could be more precise by checking
842 that the modification is after the current PC, but it
843 would require a specialized function to partially
844 restore the CPU state */
3b46e624 845
d720b93d
FB
846 current_tb_modified = 1;
847 cpu_restore_state(current_tb, env, pc, puc);
848#if defined(TARGET_I386)
849 current_flags = env->hflags;
850 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
851 current_cs_base = (target_ulong)env->segs[R_CS].base;
852 current_pc = current_cs_base + env->eip;
853#else
854#error unsupported CPU
855#endif
856 }
857#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
858 tb_phys_invalidate(tb, addr);
859 tb = tb->page_next[n];
860 }
fd6ce8f6 861 p->first_tb = NULL;
d720b93d
FB
862#ifdef TARGET_HAS_PRECISE_SMC
863 if (current_tb_modified) {
864 /* we generate a block containing just the instruction
865 modifying the memory. It will ensure that it cannot modify
866 itself */
ea1c1802 867 env->current_tb = NULL;
5fafdf24 868 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
869 CF_SINGLE_INSN);
870 cpu_resume_from_signal(env, puc);
871 }
872#endif
fd6ce8f6 873}
9fa3e853 874#endif
fd6ce8f6
FB
875
876/* add the tb in the target page and protect it if necessary */
5fafdf24 877static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 878 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
879{
880 PageDesc *p;
9fa3e853
FB
881 TranslationBlock *last_first_tb;
882
883 tb->page_addr[n] = page_addr;
3a7d929e 884 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
885 tb->page_next[n] = p->first_tb;
886 last_first_tb = p->first_tb;
887 p->first_tb = (TranslationBlock *)((long)tb | n);
888 invalidate_page_bitmap(p);
fd6ce8f6 889
107db443 890#if defined(TARGET_HAS_SMC) || 1
d720b93d 891
9fa3e853 892#if defined(CONFIG_USER_ONLY)
fd6ce8f6 893 if (p->flags & PAGE_WRITE) {
53a5960a
PB
894 target_ulong addr;
895 PageDesc *p2;
9fa3e853
FB
896 int prot;
897
fd6ce8f6
FB
898 /* force the host page as non writable (writes will have a
899 page fault + mprotect overhead) */
53a5960a 900 page_addr &= qemu_host_page_mask;
fd6ce8f6 901 prot = 0;
53a5960a
PB
902 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
903 addr += TARGET_PAGE_SIZE) {
904
905 p2 = page_find (addr >> TARGET_PAGE_BITS);
906 if (!p2)
907 continue;
908 prot |= p2->flags;
909 p2->flags &= ~PAGE_WRITE;
910 page_get_flags(addr);
911 }
5fafdf24 912 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
913 (prot & PAGE_BITS) & ~PAGE_WRITE);
914#ifdef DEBUG_TB_INVALIDATE
ab3d1727 915 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 916 page_addr);
fd6ce8f6 917#endif
fd6ce8f6 918 }
9fa3e853
FB
919#else
920 /* if some code is already present, then the pages are already
921 protected. So we handle the case where only the first TB is
922 allocated in a physical page */
923 if (!last_first_tb) {
6a00d601 924 tlb_protect_code(page_addr);
9fa3e853
FB
925 }
926#endif
d720b93d
FB
927
928#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
929}
930
931/* Allocate a new translation block. Flush the translation buffer if
932 too many translation blocks or too much generated code. */
c27004ec 933TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
934{
935 TranslationBlock *tb;
fd6ce8f6 936
5fafdf24 937 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
fd6ce8f6 938 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 939 return NULL;
fd6ce8f6
FB
940 tb = &tbs[nb_tbs++];
941 tb->pc = pc;
b448f2f3 942 tb->cflags = 0;
d4e8164f
FB
943 return tb;
944}
945
9fa3e853
FB
946/* add a new TB and link it to the physical page tables. phys_page2 is
947 (-1) to indicate that only one page contains the TB. */
5fafdf24 948void tb_link_phys(TranslationBlock *tb,
9fa3e853 949 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 950{
9fa3e853
FB
951 unsigned int h;
952 TranslationBlock **ptb;
953
954 /* add in the physical hash table */
955 h = tb_phys_hash_func(phys_pc);
956 ptb = &tb_phys_hash[h];
957 tb->phys_hash_next = *ptb;
958 *ptb = tb;
fd6ce8f6
FB
959
960 /* add in the page list */
9fa3e853
FB
961 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
962 if (phys_page2 != -1)
963 tb_alloc_page(tb, 1, phys_page2);
964 else
965 tb->page_addr[1] = -1;
9fa3e853 966
d4e8164f
FB
967 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
968 tb->jmp_next[0] = NULL;
969 tb->jmp_next[1] = NULL;
970
971 /* init original jump addresses */
972 if (tb->tb_next_offset[0] != 0xffff)
973 tb_reset_jump(tb, 0);
974 if (tb->tb_next_offset[1] != 0xffff)
975 tb_reset_jump(tb, 1);
8a40a180
FB
976
977#ifdef DEBUG_TB_CHECK
978 tb_page_check();
979#endif
fd6ce8f6
FB
980}
981
9fa3e853
FB
982/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
983 tb[1].tc_ptr. Return NULL if not found */
984TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 985{
9fa3e853
FB
986 int m_min, m_max, m;
987 unsigned long v;
988 TranslationBlock *tb;
a513fe19
FB
989
990 if (nb_tbs <= 0)
991 return NULL;
992 if (tc_ptr < (unsigned long)code_gen_buffer ||
993 tc_ptr >= (unsigned long)code_gen_ptr)
994 return NULL;
995 /* binary search (cf Knuth) */
996 m_min = 0;
997 m_max = nb_tbs - 1;
998 while (m_min <= m_max) {
999 m = (m_min + m_max) >> 1;
1000 tb = &tbs[m];
1001 v = (unsigned long)tb->tc_ptr;
1002 if (v == tc_ptr)
1003 return tb;
1004 else if (tc_ptr < v) {
1005 m_max = m - 1;
1006 } else {
1007 m_min = m + 1;
1008 }
5fafdf24 1009 }
a513fe19
FB
1010 return &tbs[m_max];
1011}
7501267e 1012
ea041c0e
FB
1013static void tb_reset_jump_recursive(TranslationBlock *tb);
1014
1015static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1016{
1017 TranslationBlock *tb1, *tb_next, **ptb;
1018 unsigned int n1;
1019
1020 tb1 = tb->jmp_next[n];
1021 if (tb1 != NULL) {
1022 /* find head of list */
1023 for(;;) {
1024 n1 = (long)tb1 & 3;
1025 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1026 if (n1 == 2)
1027 break;
1028 tb1 = tb1->jmp_next[n1];
1029 }
1030 /* we are now sure now that tb jumps to tb1 */
1031 tb_next = tb1;
1032
1033 /* remove tb from the jmp_first list */
1034 ptb = &tb_next->jmp_first;
1035 for(;;) {
1036 tb1 = *ptb;
1037 n1 = (long)tb1 & 3;
1038 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1039 if (n1 == n && tb1 == tb)
1040 break;
1041 ptb = &tb1->jmp_next[n1];
1042 }
1043 *ptb = tb->jmp_next[n];
1044 tb->jmp_next[n] = NULL;
3b46e624 1045
ea041c0e
FB
1046 /* suppress the jump to next tb in generated code */
1047 tb_reset_jump(tb, n);
1048
0124311e 1049 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1050 tb_reset_jump_recursive(tb_next);
1051 }
1052}
1053
1054static void tb_reset_jump_recursive(TranslationBlock *tb)
1055{
1056 tb_reset_jump_recursive2(tb, 0);
1057 tb_reset_jump_recursive2(tb, 1);
1058}
1059
1fddef4b 1060#if defined(TARGET_HAS_ICE)
d720b93d
FB
1061static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1062{
9b3c35e0
JM
1063 target_phys_addr_t addr;
1064 target_ulong pd;
c2f07f81
PB
1065 ram_addr_t ram_addr;
1066 PhysPageDesc *p;
d720b93d 1067
c2f07f81
PB
1068 addr = cpu_get_phys_page_debug(env, pc);
1069 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1070 if (!p) {
1071 pd = IO_MEM_UNASSIGNED;
1072 } else {
1073 pd = p->phys_offset;
1074 }
1075 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1076 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1077}
c27004ec 1078#endif
d720b93d 1079
6658ffb8
PB
1080/* Add a watchpoint. */
1081int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1082{
1083 int i;
1084
1085 for (i = 0; i < env->nb_watchpoints; i++) {
1086 if (addr == env->watchpoint[i].vaddr)
1087 return 0;
1088 }
1089 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1090 return -1;
1091
1092 i = env->nb_watchpoints++;
1093 env->watchpoint[i].vaddr = addr;
1094 tlb_flush_page(env, addr);
1095 /* FIXME: This flush is needed because of the hack to make memory ops
1096 terminate the TB. It can be removed once the proper IO trap and
1097 re-execute bits are in. */
1098 tb_flush(env);
1099 return i;
1100}
1101
1102/* Remove a watchpoint. */
1103int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1104{
1105 int i;
1106
1107 for (i = 0; i < env->nb_watchpoints; i++) {
1108 if (addr == env->watchpoint[i].vaddr) {
1109 env->nb_watchpoints--;
1110 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1111 tlb_flush_page(env, addr);
1112 return 0;
1113 }
1114 }
1115 return -1;
1116}
1117
c33a346e
FB
1118/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1119 breakpoint is reached */
2e12669a 1120int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1121{
1fddef4b 1122#if defined(TARGET_HAS_ICE)
4c3a88a2 1123 int i;
3b46e624 1124
4c3a88a2
FB
1125 for(i = 0; i < env->nb_breakpoints; i++) {
1126 if (env->breakpoints[i] == pc)
1127 return 0;
1128 }
1129
1130 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1131 return -1;
1132 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1133
d720b93d 1134 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1135 return 0;
1136#else
1137 return -1;
1138#endif
1139}
1140
1141/* remove a breakpoint */
2e12669a 1142int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1143{
1fddef4b 1144#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1145 int i;
1146 for(i = 0; i < env->nb_breakpoints; i++) {
1147 if (env->breakpoints[i] == pc)
1148 goto found;
1149 }
1150 return -1;
1151 found:
4c3a88a2 1152 env->nb_breakpoints--;
1fddef4b
FB
1153 if (i < env->nb_breakpoints)
1154 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1155
1156 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1157 return 0;
1158#else
1159 return -1;
1160#endif
1161}
1162
c33a346e
FB
1163/* enable or disable single step mode. EXCP_DEBUG is returned by the
1164 CPU loop after each instruction */
1165void cpu_single_step(CPUState *env, int enabled)
1166{
1fddef4b 1167#if defined(TARGET_HAS_ICE)
c33a346e
FB
1168 if (env->singlestep_enabled != enabled) {
1169 env->singlestep_enabled = enabled;
1170 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1171 /* XXX: only flush what is necessary */
0124311e 1172 tb_flush(env);
c33a346e
FB
1173 }
1174#endif
1175}
1176
34865134
FB
1177/* enable or disable low levels log */
1178void cpu_set_log(int log_flags)
1179{
1180 loglevel = log_flags;
1181 if (loglevel && !logfile) {
11fcfab4 1182 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1183 if (!logfile) {
1184 perror(logfilename);
1185 _exit(1);
1186 }
9fa3e853
FB
1187#if !defined(CONFIG_SOFTMMU)
1188 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1189 {
1190 static uint8_t logfile_buf[4096];
1191 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1192 }
1193#else
34865134 1194 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1195#endif
e735b91c
PB
1196 log_append = 1;
1197 }
1198 if (!loglevel && logfile) {
1199 fclose(logfile);
1200 logfile = NULL;
34865134
FB
1201 }
1202}
1203
1204void cpu_set_log_filename(const char *filename)
1205{
1206 logfilename = strdup(filename);
e735b91c
PB
1207 if (logfile) {
1208 fclose(logfile);
1209 logfile = NULL;
1210 }
1211 cpu_set_log(loglevel);
34865134 1212}
c33a346e 1213
0124311e 1214/* mask must never be zero, except for A20 change call */
68a79315 1215void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1216{
1217 TranslationBlock *tb;
15a51156 1218 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1219
68a79315 1220 env->interrupt_request |= mask;
ea041c0e
FB
1221 /* if the cpu is currently executing code, we must unlink it and
1222 all the potentially executing TB */
1223 tb = env->current_tb;
ee8b7021
FB
1224 if (tb && !testandset(&interrupt_lock)) {
1225 env->current_tb = NULL;
ea041c0e 1226 tb_reset_jump_recursive(tb);
15a51156 1227 resetlock(&interrupt_lock);
ea041c0e
FB
1228 }
1229}
1230
b54ad049
FB
1231void cpu_reset_interrupt(CPUState *env, int mask)
1232{
1233 env->interrupt_request &= ~mask;
1234}
1235
f193c797 1236CPULogItem cpu_log_items[] = {
5fafdf24 1237 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1238 "show generated host assembly code for each compiled TB" },
1239 { CPU_LOG_TB_IN_ASM, "in_asm",
1240 "show target assembly code for each compiled TB" },
5fafdf24 1241 { CPU_LOG_TB_OP, "op",
57fec1fe 1242 "show micro ops for each compiled TB" },
f193c797 1243 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1244 "show micro ops "
1245#ifdef TARGET_I386
1246 "before eflags optimization and "
f193c797 1247#endif
e01a1157 1248 "after liveness analysis" },
f193c797
FB
1249 { CPU_LOG_INT, "int",
1250 "show interrupts/exceptions in short format" },
1251 { CPU_LOG_EXEC, "exec",
1252 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1253 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1254 "show CPU state before block translation" },
f193c797
FB
1255#ifdef TARGET_I386
1256 { CPU_LOG_PCALL, "pcall",
1257 "show protected mode far calls/returns/exceptions" },
1258#endif
8e3a9fd2 1259#ifdef DEBUG_IOPORT
fd872598
FB
1260 { CPU_LOG_IOPORT, "ioport",
1261 "show all i/o ports accesses" },
8e3a9fd2 1262#endif
f193c797
FB
1263 { 0, NULL, NULL },
1264};
1265
1266static int cmp1(const char *s1, int n, const char *s2)
1267{
1268 if (strlen(s2) != n)
1269 return 0;
1270 return memcmp(s1, s2, n) == 0;
1271}
3b46e624 1272
f193c797
FB
1273/* takes a comma separated list of log masks. Return 0 if error. */
1274int cpu_str_to_log_mask(const char *str)
1275{
1276 CPULogItem *item;
1277 int mask;
1278 const char *p, *p1;
1279
1280 p = str;
1281 mask = 0;
1282 for(;;) {
1283 p1 = strchr(p, ',');
1284 if (!p1)
1285 p1 = p + strlen(p);
8e3a9fd2
FB
1286 if(cmp1(p,p1-p,"all")) {
1287 for(item = cpu_log_items; item->mask != 0; item++) {
1288 mask |= item->mask;
1289 }
1290 } else {
f193c797
FB
1291 for(item = cpu_log_items; item->mask != 0; item++) {
1292 if (cmp1(p, p1 - p, item->name))
1293 goto found;
1294 }
1295 return 0;
8e3a9fd2 1296 }
f193c797
FB
1297 found:
1298 mask |= item->mask;
1299 if (*p1 != ',')
1300 break;
1301 p = p1 + 1;
1302 }
1303 return mask;
1304}
ea041c0e 1305
7501267e
FB
1306void cpu_abort(CPUState *env, const char *fmt, ...)
1307{
1308 va_list ap;
493ae1f0 1309 va_list ap2;
7501267e
FB
1310
1311 va_start(ap, fmt);
493ae1f0 1312 va_copy(ap2, ap);
7501267e
FB
1313 fprintf(stderr, "qemu: fatal: ");
1314 vfprintf(stderr, fmt, ap);
1315 fprintf(stderr, "\n");
1316#ifdef TARGET_I386
0573fbfc
TS
1317 if(env->intercept & INTERCEPT_SVM_MASK) {
1318 /* most probably the virtual machine should not
1319 be shut down but rather caught by the VMM */
1320 vmexit(SVM_EXIT_SHUTDOWN, 0);
1321 }
7fe48483
FB
1322 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1323#else
1324 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1325#endif
924edcae 1326 if (logfile) {
f9373291 1327 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1328 vfprintf(logfile, fmt, ap2);
f9373291
JM
1329 fprintf(logfile, "\n");
1330#ifdef TARGET_I386
1331 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1332#else
1333 cpu_dump_state(env, logfile, fprintf, 0);
1334#endif
924edcae
AZ
1335 fflush(logfile);
1336 fclose(logfile);
1337 }
493ae1f0 1338 va_end(ap2);
f9373291 1339 va_end(ap);
7501267e
FB
1340 abort();
1341}
1342
c5be9f08
TS
1343CPUState *cpu_copy(CPUState *env)
1344{
01ba9816 1345 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1346 /* preserve chaining and index */
1347 CPUState *next_cpu = new_env->next_cpu;
1348 int cpu_index = new_env->cpu_index;
1349 memcpy(new_env, env, sizeof(CPUState));
1350 new_env->next_cpu = next_cpu;
1351 new_env->cpu_index = cpu_index;
1352 return new_env;
1353}
1354
0124311e
FB
1355#if !defined(CONFIG_USER_ONLY)
1356
ee8b7021
FB
1357/* NOTE: if flush_global is true, also flush global entries (not
1358 implemented yet) */
1359void tlb_flush(CPUState *env, int flush_global)
33417e70 1360{
33417e70 1361 int i;
0124311e 1362
9fa3e853
FB
1363#if defined(DEBUG_TLB)
1364 printf("tlb_flush:\n");
1365#endif
0124311e
FB
1366 /* must reset current TB so that interrupts cannot modify the
1367 links while we are modifying them */
1368 env->current_tb = NULL;
1369
33417e70 1370 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1371 env->tlb_table[0][i].addr_read = -1;
1372 env->tlb_table[0][i].addr_write = -1;
1373 env->tlb_table[0][i].addr_code = -1;
1374 env->tlb_table[1][i].addr_read = -1;
1375 env->tlb_table[1][i].addr_write = -1;
1376 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1377#if (NB_MMU_MODES >= 3)
1378 env->tlb_table[2][i].addr_read = -1;
1379 env->tlb_table[2][i].addr_write = -1;
1380 env->tlb_table[2][i].addr_code = -1;
1381#if (NB_MMU_MODES == 4)
1382 env->tlb_table[3][i].addr_read = -1;
1383 env->tlb_table[3][i].addr_write = -1;
1384 env->tlb_table[3][i].addr_code = -1;
1385#endif
1386#endif
33417e70 1387 }
9fa3e853 1388
8a40a180 1389 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1390
1391#if !defined(CONFIG_SOFTMMU)
1392 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1393#endif
1394#ifdef USE_KQEMU
1395 if (env->kqemu_enabled) {
1396 kqemu_flush(env, flush_global);
1397 }
9fa3e853 1398#endif
e3db7226 1399 tlb_flush_count++;
33417e70
FB
1400}
1401
274da6b2 1402static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1403{
5fafdf24 1404 if (addr == (tlb_entry->addr_read &
84b7b8e7 1405 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1406 addr == (tlb_entry->addr_write &
84b7b8e7 1407 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1408 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1409 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1410 tlb_entry->addr_read = -1;
1411 tlb_entry->addr_write = -1;
1412 tlb_entry->addr_code = -1;
1413 }
61382a50
FB
1414}
1415
2e12669a 1416void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1417{
8a40a180 1418 int i;
9fa3e853 1419 TranslationBlock *tb;
0124311e 1420
9fa3e853 1421#if defined(DEBUG_TLB)
108c49b8 1422 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1423#endif
0124311e
FB
1424 /* must reset current TB so that interrupts cannot modify the
1425 links while we are modifying them */
1426 env->current_tb = NULL;
61382a50
FB
1427
1428 addr &= TARGET_PAGE_MASK;
1429 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1430 tlb_flush_entry(&env->tlb_table[0][i], addr);
1431 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1432#if (NB_MMU_MODES >= 3)
1433 tlb_flush_entry(&env->tlb_table[2][i], addr);
1434#if (NB_MMU_MODES == 4)
1435 tlb_flush_entry(&env->tlb_table[3][i], addr);
1436#endif
1437#endif
0124311e 1438
b362e5e0
PB
1439 /* Discard jump cache entries for any tb which might potentially
1440 overlap the flushed page. */
1441 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1442 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1443
1444 i = tb_jmp_cache_hash_page(addr);
1445 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1446
0124311e 1447#if !defined(CONFIG_SOFTMMU)
9fa3e853 1448 if (addr < MMAP_AREA_END)
0124311e 1449 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1450#endif
0a962c02
FB
1451#ifdef USE_KQEMU
1452 if (env->kqemu_enabled) {
1453 kqemu_flush_page(env, addr);
1454 }
1455#endif
9fa3e853
FB
1456}
1457
9fa3e853
FB
1458/* update the TLBs so that writes to code in the virtual page 'addr'
1459 can be detected */
6a00d601 1460static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1461{
5fafdf24 1462 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1463 ram_addr + TARGET_PAGE_SIZE,
1464 CODE_DIRTY_FLAG);
9fa3e853
FB
1465}
1466
9fa3e853 1467/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1468 tested for self modifying code */
5fafdf24 1469static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1470 target_ulong vaddr)
9fa3e853 1471{
3a7d929e 1472 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1473}
1474
5fafdf24 1475static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1476 unsigned long start, unsigned long length)
1477{
1478 unsigned long addr;
84b7b8e7
FB
1479 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1480 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1481 if ((addr - start) < length) {
84b7b8e7 1482 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1483 }
1484 }
1485}
1486
3a7d929e 1487void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1488 int dirty_flags)
1ccde1cb
FB
1489{
1490 CPUState *env;
4f2ac237 1491 unsigned long length, start1;
0a962c02
FB
1492 int i, mask, len;
1493 uint8_t *p;
1ccde1cb
FB
1494
1495 start &= TARGET_PAGE_MASK;
1496 end = TARGET_PAGE_ALIGN(end);
1497
1498 length = end - start;
1499 if (length == 0)
1500 return;
0a962c02 1501 len = length >> TARGET_PAGE_BITS;
3a7d929e 1502#ifdef USE_KQEMU
6a00d601
FB
1503 /* XXX: should not depend on cpu context */
1504 env = first_cpu;
3a7d929e 1505 if (env->kqemu_enabled) {
f23db169
FB
1506 ram_addr_t addr;
1507 addr = start;
1508 for(i = 0; i < len; i++) {
1509 kqemu_set_notdirty(env, addr);
1510 addr += TARGET_PAGE_SIZE;
1511 }
3a7d929e
FB
1512 }
1513#endif
f23db169
FB
1514 mask = ~dirty_flags;
1515 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1516 for(i = 0; i < len; i++)
1517 p[i] &= mask;
1518
1ccde1cb
FB
1519 /* we modify the TLB cache so that the dirty bit will be set again
1520 when accessing the range */
59817ccb 1521 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1522 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1523 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1524 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1525 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1526 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1527#if (NB_MMU_MODES >= 3)
1528 for(i = 0; i < CPU_TLB_SIZE; i++)
1529 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1530#if (NB_MMU_MODES == 4)
1531 for(i = 0; i < CPU_TLB_SIZE; i++)
1532 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1533#endif
1534#endif
6a00d601 1535 }
59817ccb
FB
1536
1537#if !defined(CONFIG_SOFTMMU)
1538 /* XXX: this is expensive */
1539 {
1540 VirtPageDesc *p;
1541 int j;
1542 target_ulong addr;
1543
1544 for(i = 0; i < L1_SIZE; i++) {
1545 p = l1_virt_map[i];
1546 if (p) {
1547 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1548 for(j = 0; j < L2_SIZE; j++) {
1549 if (p->valid_tag == virt_valid_tag &&
1550 p->phys_addr >= start && p->phys_addr < end &&
1551 (p->prot & PROT_WRITE)) {
1552 if (addr < MMAP_AREA_END) {
5fafdf24 1553 mprotect((void *)addr, TARGET_PAGE_SIZE,
59817ccb
FB
1554 p->prot & ~PROT_WRITE);
1555 }
1556 }
1557 addr += TARGET_PAGE_SIZE;
1558 p++;
1559 }
1560 }
1561 }
1562 }
1563#endif
1ccde1cb
FB
1564}
1565
3a7d929e
FB
1566static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1567{
1568 ram_addr_t ram_addr;
1569
84b7b8e7 1570 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1571 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1572 tlb_entry->addend - (unsigned long)phys_ram_base;
1573 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1574 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1575 }
1576 }
1577}
1578
1579/* update the TLB according to the current state of the dirty bits */
1580void cpu_tlb_update_dirty(CPUState *env)
1581{
1582 int i;
1583 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1584 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1585 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1586 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1587#if (NB_MMU_MODES >= 3)
1588 for(i = 0; i < CPU_TLB_SIZE; i++)
1589 tlb_update_dirty(&env->tlb_table[2][i]);
1590#if (NB_MMU_MODES == 4)
1591 for(i = 0; i < CPU_TLB_SIZE; i++)
1592 tlb_update_dirty(&env->tlb_table[3][i]);
1593#endif
1594#endif
3a7d929e
FB
1595}
1596
5fafdf24 1597static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1598 unsigned long start)
1ccde1cb
FB
1599{
1600 unsigned long addr;
84b7b8e7
FB
1601 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1602 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1603 if (addr == start) {
84b7b8e7 1604 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1605 }
1606 }
1607}
1608
1609/* update the TLB corresponding to virtual page vaddr and phys addr
1610 addr so that it is no longer dirty */
6a00d601
FB
1611static inline void tlb_set_dirty(CPUState *env,
1612 unsigned long addr, target_ulong vaddr)
1ccde1cb 1613{
1ccde1cb
FB
1614 int i;
1615
1ccde1cb
FB
1616 addr &= TARGET_PAGE_MASK;
1617 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1618 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1619 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1620#if (NB_MMU_MODES >= 3)
1621 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1622#if (NB_MMU_MODES == 4)
1623 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1624#endif
1625#endif
9fa3e853
FB
1626}
1627
59817ccb
FB
1628/* add a new TLB entry. At most one entry for a given virtual address
1629 is permitted. Return 0 if OK or 2 if the page could not be mapped
1630 (can only happen in non SOFTMMU mode for I/O pages or pages
1631 conflicting with the host address space). */
5fafdf24
TS
1632int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1633 target_phys_addr_t paddr, int prot,
6ebbf390 1634 int mmu_idx, int is_softmmu)
9fa3e853 1635{
92e873b9 1636 PhysPageDesc *p;
4f2ac237 1637 unsigned long pd;
9fa3e853 1638 unsigned int index;
4f2ac237 1639 target_ulong address;
108c49b8 1640 target_phys_addr_t addend;
9fa3e853 1641 int ret;
84b7b8e7 1642 CPUTLBEntry *te;
6658ffb8 1643 int i;
9fa3e853 1644
92e873b9 1645 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1646 if (!p) {
1647 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1648 } else {
1649 pd = p->phys_offset;
9fa3e853
FB
1650 }
1651#if defined(DEBUG_TLB)
6ebbf390
JM
1652 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1653 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1654#endif
1655
1656 ret = 0;
1657#if !defined(CONFIG_SOFTMMU)
5fafdf24 1658 if (is_softmmu)
9fa3e853
FB
1659#endif
1660 {
2a4188a3 1661 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1662 /* IO memory case */
1663 address = vaddr | pd;
1664 addend = paddr;
1665 } else {
1666 /* standard memory */
1667 address = vaddr;
1668 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1669 }
6658ffb8
PB
1670
1671 /* Make accesses to pages with watchpoints go via the
1672 watchpoint trap routines. */
1673 for (i = 0; i < env->nb_watchpoints; i++) {
1674 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1675 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1676 env->watchpoint[i].addend = 0;
6658ffb8
PB
1677 address = vaddr | io_mem_watch;
1678 } else {
d79acba4
AZ
1679 env->watchpoint[i].addend = pd - paddr +
1680 (unsigned long) phys_ram_base;
6658ffb8
PB
1681 /* TODO: Figure out how to make read watchpoints coexist
1682 with code. */
1683 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1684 }
1685 }
1686 }
d79acba4 1687
90f18422 1688 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1689 addend -= vaddr;
6ebbf390 1690 te = &env->tlb_table[mmu_idx][index];
84b7b8e7 1691 te->addend = addend;
67b915a5 1692 if (prot & PAGE_READ) {
84b7b8e7
FB
1693 te->addr_read = address;
1694 } else {
1695 te->addr_read = -1;
1696 }
1697 if (prot & PAGE_EXEC) {
1698 te->addr_code = address;
9fa3e853 1699 } else {
84b7b8e7 1700 te->addr_code = -1;
9fa3e853 1701 }
67b915a5 1702 if (prot & PAGE_WRITE) {
5fafdf24 1703 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
856074ec
FB
1704 (pd & IO_MEM_ROMD)) {
1705 /* write access calls the I/O callback */
5fafdf24 1706 te->addr_write = vaddr |
856074ec 1707 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
5fafdf24 1708 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1709 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1710 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1711 } else {
84b7b8e7 1712 te->addr_write = address;
9fa3e853
FB
1713 }
1714 } else {
84b7b8e7 1715 te->addr_write = -1;
9fa3e853
FB
1716 }
1717 }
1718#if !defined(CONFIG_SOFTMMU)
1719 else {
1720 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1721 /* IO access: no mapping is done as it will be handled by the
1722 soft MMU */
1723 if (!(env->hflags & HF_SOFTMMU_MASK))
1724 ret = 2;
1725 } else {
1726 void *map_addr;
59817ccb
FB
1727
1728 if (vaddr >= MMAP_AREA_END) {
1729 ret = 2;
1730 } else {
1731 if (prot & PROT_WRITE) {
5fafdf24 1732 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1733#if defined(TARGET_HAS_SMC) || 1
59817ccb 1734 first_tb ||
d720b93d 1735#endif
5fafdf24 1736 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
59817ccb
FB
1737 !cpu_physical_memory_is_dirty(pd))) {
1738 /* ROM: we do as if code was inside */
1739 /* if code is present, we only map as read only and save the
1740 original mapping */
1741 VirtPageDesc *vp;
3b46e624 1742
90f18422 1743 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1744 vp->phys_addr = pd;
1745 vp->prot = prot;
1746 vp->valid_tag = virt_valid_tag;
1747 prot &= ~PAGE_WRITE;
1748 }
1749 }
5fafdf24 1750 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
59817ccb
FB
1751 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1752 if (map_addr == MAP_FAILED) {
1753 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1754 paddr, vaddr);
9fa3e853 1755 }
9fa3e853
FB
1756 }
1757 }
1758 }
1759#endif
1760 return ret;
1761}
1762
1763/* called from signal handler: invalidate the code and unprotect the
1764 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1765int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1766{
1767#if !defined(CONFIG_SOFTMMU)
1768 VirtPageDesc *vp;
1769
1770#if defined(DEBUG_TLB)
1771 printf("page_unprotect: addr=0x%08x\n", addr);
1772#endif
1773 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1774
1775 /* if it is not mapped, no need to worry here */
1776 if (addr >= MMAP_AREA_END)
1777 return 0;
9fa3e853
FB
1778 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1779 if (!vp)
1780 return 0;
1781 /* NOTE: in this case, validate_tag is _not_ tested as it
1782 validates only the code TLB */
1783 if (vp->valid_tag != virt_valid_tag)
1784 return 0;
1785 if (!(vp->prot & PAGE_WRITE))
1786 return 0;
1787#if defined(DEBUG_TLB)
5fafdf24 1788 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
9fa3e853
FB
1789 addr, vp->phys_addr, vp->prot);
1790#endif
59817ccb
FB
1791 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1792 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1793 (unsigned long)addr, vp->prot);
d720b93d 1794 /* set the dirty bit */
0a962c02 1795 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1796 /* flush the code inside */
1797 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1798 return 1;
1799#else
1800 return 0;
1801#endif
33417e70
FB
1802}
1803
0124311e
FB
1804#else
1805
ee8b7021 1806void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1807{
1808}
1809
2e12669a 1810void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1811{
1812}
1813
5fafdf24
TS
1814int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1815 target_phys_addr_t paddr, int prot,
6ebbf390 1816 int mmu_idx, int is_softmmu)
9fa3e853
FB
1817{
1818 return 0;
1819}
0124311e 1820
9fa3e853
FB
1821/* dump memory mappings */
1822void page_dump(FILE *f)
33417e70 1823{
9fa3e853
FB
1824 unsigned long start, end;
1825 int i, j, prot, prot1;
1826 PageDesc *p;
33417e70 1827
9fa3e853
FB
1828 fprintf(f, "%-8s %-8s %-8s %s\n",
1829 "start", "end", "size", "prot");
1830 start = -1;
1831 end = -1;
1832 prot = 0;
1833 for(i = 0; i <= L1_SIZE; i++) {
1834 if (i < L1_SIZE)
1835 p = l1_map[i];
1836 else
1837 p = NULL;
1838 for(j = 0;j < L2_SIZE; j++) {
1839 if (!p)
1840 prot1 = 0;
1841 else
1842 prot1 = p[j].flags;
1843 if (prot1 != prot) {
1844 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1845 if (start != -1) {
1846 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1847 start, end, end - start,
9fa3e853
FB
1848 prot & PAGE_READ ? 'r' : '-',
1849 prot & PAGE_WRITE ? 'w' : '-',
1850 prot & PAGE_EXEC ? 'x' : '-');
1851 }
1852 if (prot1 != 0)
1853 start = end;
1854 else
1855 start = -1;
1856 prot = prot1;
1857 }
1858 if (!p)
1859 break;
1860 }
33417e70 1861 }
33417e70
FB
1862}
1863
53a5960a 1864int page_get_flags(target_ulong address)
33417e70 1865{
9fa3e853
FB
1866 PageDesc *p;
1867
1868 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1869 if (!p)
9fa3e853
FB
1870 return 0;
1871 return p->flags;
1872}
1873
1874/* modify the flags of a page and invalidate the code if
1875 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1876 depending on PAGE_WRITE */
53a5960a 1877void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1878{
1879 PageDesc *p;
53a5960a 1880 target_ulong addr;
9fa3e853
FB
1881
1882 start = start & TARGET_PAGE_MASK;
1883 end = TARGET_PAGE_ALIGN(end);
1884 if (flags & PAGE_WRITE)
1885 flags |= PAGE_WRITE_ORG;
1886 spin_lock(&tb_lock);
1887 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1888 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1889 /* if the write protection is set, then we invalidate the code
1890 inside */
5fafdf24 1891 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
1892 (flags & PAGE_WRITE) &&
1893 p->first_tb) {
d720b93d 1894 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1895 }
1896 p->flags = flags;
1897 }
1898 spin_unlock(&tb_lock);
33417e70
FB
1899}
1900
3d97b40b
TS
1901int page_check_range(target_ulong start, target_ulong len, int flags)
1902{
1903 PageDesc *p;
1904 target_ulong end;
1905 target_ulong addr;
1906
1907 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1908 start = start & TARGET_PAGE_MASK;
1909
1910 if( end < start )
1911 /* we've wrapped around */
1912 return -1;
1913 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1914 p = page_find(addr >> TARGET_PAGE_BITS);
1915 if( !p )
1916 return -1;
1917 if( !(p->flags & PAGE_VALID) )
1918 return -1;
1919
dae3270c 1920 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 1921 return -1;
dae3270c
FB
1922 if (flags & PAGE_WRITE) {
1923 if (!(p->flags & PAGE_WRITE_ORG))
1924 return -1;
1925 /* unprotect the page if it was put read-only because it
1926 contains translated code */
1927 if (!(p->flags & PAGE_WRITE)) {
1928 if (!page_unprotect(addr, 0, NULL))
1929 return -1;
1930 }
1931 return 0;
1932 }
3d97b40b
TS
1933 }
1934 return 0;
1935}
1936
9fa3e853
FB
1937/* called from signal handler: invalidate the code and unprotect the
1938 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1939int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1940{
1941 unsigned int page_index, prot, pindex;
1942 PageDesc *p, *p1;
53a5960a 1943 target_ulong host_start, host_end, addr;
9fa3e853 1944
83fb7adf 1945 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1946 page_index = host_start >> TARGET_PAGE_BITS;
1947 p1 = page_find(page_index);
1948 if (!p1)
1949 return 0;
83fb7adf 1950 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1951 p = p1;
1952 prot = 0;
1953 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1954 prot |= p->flags;
1955 p++;
1956 }
1957 /* if the page was really writable, then we change its
1958 protection back to writable */
1959 if (prot & PAGE_WRITE_ORG) {
1960 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1961 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 1962 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1963 (prot & PAGE_BITS) | PAGE_WRITE);
1964 p1[pindex].flags |= PAGE_WRITE;
1965 /* and since the content will be modified, we must invalidate
1966 the corresponding translated code. */
d720b93d 1967 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1968#ifdef DEBUG_TB_CHECK
1969 tb_invalidate_check(address);
1970#endif
1971 return 1;
1972 }
1973 }
1974 return 0;
1975}
1976
6a00d601
FB
1977static inline void tlb_set_dirty(CPUState *env,
1978 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1979{
1980}
9fa3e853
FB
1981#endif /* defined(CONFIG_USER_ONLY) */
1982
db7b5426
BS
1983static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1984 int memory);
1985static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1986 int orig_memory);
1987#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1988 need_subpage) \
1989 do { \
1990 if (addr > start_addr) \
1991 start_addr2 = 0; \
1992 else { \
1993 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1994 if (start_addr2 > 0) \
1995 need_subpage = 1; \
1996 } \
1997 \
49e9fba2 1998 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
1999 end_addr2 = TARGET_PAGE_SIZE - 1; \
2000 else { \
2001 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2002 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2003 need_subpage = 1; \
2004 } \
2005 } while (0)
2006
33417e70
FB
2007/* register physical memory. 'size' must be a multiple of the target
2008 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2009 io memory page */
5fafdf24 2010void cpu_register_physical_memory(target_phys_addr_t start_addr,
2e12669a
FB
2011 unsigned long size,
2012 unsigned long phys_offset)
33417e70 2013{
108c49b8 2014 target_phys_addr_t addr, end_addr;
92e873b9 2015 PhysPageDesc *p;
9d42037b 2016 CPUState *env;
db7b5426
BS
2017 unsigned long orig_size = size;
2018 void *subpage;
33417e70 2019
5fd386f6 2020 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2021 end_addr = start_addr + (target_phys_addr_t)size;
2022 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2023 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2024 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2025 unsigned long orig_memory = p->phys_offset;
2026 target_phys_addr_t start_addr2, end_addr2;
2027 int need_subpage = 0;
2028
2029 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2030 need_subpage);
4254fab8 2031 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2032 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2033 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2034 &p->phys_offset, orig_memory);
2035 } else {
2036 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2037 >> IO_MEM_SHIFT];
2038 }
2039 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2040 } else {
2041 p->phys_offset = phys_offset;
2042 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2043 (phys_offset & IO_MEM_ROMD))
2044 phys_offset += TARGET_PAGE_SIZE;
2045 }
2046 } else {
2047 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2048 p->phys_offset = phys_offset;
2049 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2050 (phys_offset & IO_MEM_ROMD))
2051 phys_offset += TARGET_PAGE_SIZE;
2052 else {
2053 target_phys_addr_t start_addr2, end_addr2;
2054 int need_subpage = 0;
2055
2056 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2057 end_addr2, need_subpage);
2058
4254fab8 2059 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2060 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2061 &p->phys_offset, IO_MEM_UNASSIGNED);
2062 subpage_register(subpage, start_addr2, end_addr2,
2063 phys_offset);
2064 }
2065 }
2066 }
33417e70 2067 }
3b46e624 2068
9d42037b
FB
2069 /* since each CPU stores ram addresses in its TLB cache, we must
2070 reset the modified entries */
2071 /* XXX: slow ! */
2072 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2073 tlb_flush(env, 1);
2074 }
33417e70
FB
2075}
2076
ba863458
FB
2077/* XXX: temporary until new memory mapping API */
2078uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2079{
2080 PhysPageDesc *p;
2081
2082 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2083 if (!p)
2084 return IO_MEM_UNASSIGNED;
2085 return p->phys_offset;
2086}
2087
e9a1ab19
FB
2088/* XXX: better than nothing */
2089ram_addr_t qemu_ram_alloc(unsigned int size)
2090{
2091 ram_addr_t addr;
2092 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
5fafdf24 2093 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
e9a1ab19
FB
2094 size, phys_ram_size);
2095 abort();
2096 }
2097 addr = phys_ram_alloc_offset;
2098 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2099 return addr;
2100}
2101
2102void qemu_ram_free(ram_addr_t addr)
2103{
2104}
2105
a4193c8a 2106static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2107{
67d3b957 2108#ifdef DEBUG_UNASSIGNED
ab3d1727 2109 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2110#endif
2111#ifdef TARGET_SPARC
6c36d3fa 2112 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2113#elif TARGET_CRIS
2114 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2115#endif
33417e70
FB
2116 return 0;
2117}
2118
a4193c8a 2119static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2120{
67d3b957 2121#ifdef DEBUG_UNASSIGNED
ab3d1727 2122 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2123#endif
b4f0a316 2124#ifdef TARGET_SPARC
6c36d3fa 2125 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2126#elif TARGET_CRIS
2127 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2128#endif
33417e70
FB
2129}
2130
2131static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2132 unassigned_mem_readb,
2133 unassigned_mem_readb,
2134 unassigned_mem_readb,
2135};
2136
2137static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2138 unassigned_mem_writeb,
2139 unassigned_mem_writeb,
2140 unassigned_mem_writeb,
2141};
2142
3a7d929e 2143static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2144{
3a7d929e
FB
2145 unsigned long ram_addr;
2146 int dirty_flags;
2147 ram_addr = addr - (unsigned long)phys_ram_base;
2148 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2149 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2150#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2151 tb_invalidate_phys_page_fast(ram_addr, 1);
2152 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2153#endif
3a7d929e 2154 }
c27004ec 2155 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2156#ifdef USE_KQEMU
2157 if (cpu_single_env->kqemu_enabled &&
2158 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2159 kqemu_modify_page(cpu_single_env, ram_addr);
2160#endif
f23db169
FB
2161 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2162 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2163 /* we remove the notdirty callback only if the code has been
2164 flushed */
2165 if (dirty_flags == 0xff)
6a00d601 2166 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2167}
2168
3a7d929e 2169static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2170{
3a7d929e
FB
2171 unsigned long ram_addr;
2172 int dirty_flags;
2173 ram_addr = addr - (unsigned long)phys_ram_base;
2174 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2175 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2176#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2177 tb_invalidate_phys_page_fast(ram_addr, 2);
2178 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2179#endif
3a7d929e 2180 }
c27004ec 2181 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2182#ifdef USE_KQEMU
2183 if (cpu_single_env->kqemu_enabled &&
2184 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2185 kqemu_modify_page(cpu_single_env, ram_addr);
2186#endif
f23db169
FB
2187 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2188 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2189 /* we remove the notdirty callback only if the code has been
2190 flushed */
2191 if (dirty_flags == 0xff)
6a00d601 2192 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2193}
2194
3a7d929e 2195static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2196{
3a7d929e
FB
2197 unsigned long ram_addr;
2198 int dirty_flags;
2199 ram_addr = addr - (unsigned long)phys_ram_base;
2200 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2201 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2202#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2203 tb_invalidate_phys_page_fast(ram_addr, 4);
2204 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2205#endif
3a7d929e 2206 }
c27004ec 2207 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2208#ifdef USE_KQEMU
2209 if (cpu_single_env->kqemu_enabled &&
2210 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2211 kqemu_modify_page(cpu_single_env, ram_addr);
2212#endif
f23db169
FB
2213 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2214 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2215 /* we remove the notdirty callback only if the code has been
2216 flushed */
2217 if (dirty_flags == 0xff)
6a00d601 2218 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2219}
2220
3a7d929e 2221static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2222 NULL, /* never used */
2223 NULL, /* never used */
2224 NULL, /* never used */
2225};
2226
1ccde1cb
FB
2227static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2228 notdirty_mem_writeb,
2229 notdirty_mem_writew,
2230 notdirty_mem_writel,
2231};
2232
6658ffb8
PB
2233#if defined(CONFIG_SOFTMMU)
2234/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2235 so these check for a hit then pass through to the normal out-of-line
2236 phys routines. */
2237static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2238{
2239 return ldub_phys(addr);
2240}
2241
2242static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2243{
2244 return lduw_phys(addr);
2245}
2246
2247static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2248{
2249 return ldl_phys(addr);
2250}
2251
2252/* Generate a debug exception if a watchpoint has been hit.
2253 Returns the real physical address of the access. addr will be a host
d79acba4 2254 address in case of a RAM location. */
6658ffb8
PB
2255static target_ulong check_watchpoint(target_phys_addr_t addr)
2256{
2257 CPUState *env = cpu_single_env;
2258 target_ulong watch;
2259 target_ulong retaddr;
2260 int i;
2261
2262 retaddr = addr;
2263 for (i = 0; i < env->nb_watchpoints; i++) {
2264 watch = env->watchpoint[i].vaddr;
2265 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2266 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2267 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2268 cpu_single_env->watchpoint_hit = i + 1;
2269 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2270 break;
2271 }
2272 }
2273 }
2274 return retaddr;
2275}
2276
2277static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2278 uint32_t val)
2279{
2280 addr = check_watchpoint(addr);
2281 stb_phys(addr, val);
2282}
2283
2284static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2285 uint32_t val)
2286{
2287 addr = check_watchpoint(addr);
2288 stw_phys(addr, val);
2289}
2290
2291static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2292 uint32_t val)
2293{
2294 addr = check_watchpoint(addr);
2295 stl_phys(addr, val);
2296}
2297
2298static CPUReadMemoryFunc *watch_mem_read[3] = {
2299 watch_mem_readb,
2300 watch_mem_readw,
2301 watch_mem_readl,
2302};
2303
2304static CPUWriteMemoryFunc *watch_mem_write[3] = {
2305 watch_mem_writeb,
2306 watch_mem_writew,
2307 watch_mem_writel,
2308};
2309#endif
2310
db7b5426
BS
2311static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2312 unsigned int len)
2313{
db7b5426
BS
2314 uint32_t ret;
2315 unsigned int idx;
2316
2317 idx = SUBPAGE_IDX(addr - mmio->base);
2318#if defined(DEBUG_SUBPAGE)
2319 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2320 mmio, len, addr, idx);
2321#endif
3ee89922 2322 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2323
2324 return ret;
2325}
2326
2327static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2328 uint32_t value, unsigned int len)
2329{
db7b5426
BS
2330 unsigned int idx;
2331
2332 idx = SUBPAGE_IDX(addr - mmio->base);
2333#if defined(DEBUG_SUBPAGE)
2334 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2335 mmio, len, addr, idx, value);
2336#endif
3ee89922 2337 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2338}
2339
2340static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2341{
2342#if defined(DEBUG_SUBPAGE)
2343 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2344#endif
2345
2346 return subpage_readlen(opaque, addr, 0);
2347}
2348
2349static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2350 uint32_t value)
2351{
2352#if defined(DEBUG_SUBPAGE)
2353 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2354#endif
2355 subpage_writelen(opaque, addr, value, 0);
2356}
2357
2358static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2359{
2360#if defined(DEBUG_SUBPAGE)
2361 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2362#endif
2363
2364 return subpage_readlen(opaque, addr, 1);
2365}
2366
2367static void subpage_writew (void *opaque, target_phys_addr_t addr,
2368 uint32_t value)
2369{
2370#if defined(DEBUG_SUBPAGE)
2371 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2372#endif
2373 subpage_writelen(opaque, addr, value, 1);
2374}
2375
2376static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2377{
2378#if defined(DEBUG_SUBPAGE)
2379 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2380#endif
2381
2382 return subpage_readlen(opaque, addr, 2);
2383}
2384
2385static void subpage_writel (void *opaque,
2386 target_phys_addr_t addr, uint32_t value)
2387{
2388#if defined(DEBUG_SUBPAGE)
2389 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2390#endif
2391 subpage_writelen(opaque, addr, value, 2);
2392}
2393
2394static CPUReadMemoryFunc *subpage_read[] = {
2395 &subpage_readb,
2396 &subpage_readw,
2397 &subpage_readl,
2398};
2399
2400static CPUWriteMemoryFunc *subpage_write[] = {
2401 &subpage_writeb,
2402 &subpage_writew,
2403 &subpage_writel,
2404};
2405
2406static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2407 int memory)
2408{
2409 int idx, eidx;
4254fab8 2410 unsigned int i;
db7b5426
BS
2411
2412 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2413 return -1;
2414 idx = SUBPAGE_IDX(start);
2415 eidx = SUBPAGE_IDX(end);
2416#if defined(DEBUG_SUBPAGE)
2417 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2418 mmio, start, end, idx, eidx, memory);
2419#endif
2420 memory >>= IO_MEM_SHIFT;
2421 for (; idx <= eidx; idx++) {
4254fab8 2422 for (i = 0; i < 4; i++) {
3ee89922
BS
2423 if (io_mem_read[memory][i]) {
2424 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2425 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2426 }
2427 if (io_mem_write[memory][i]) {
2428 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2429 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2430 }
4254fab8 2431 }
db7b5426
BS
2432 }
2433
2434 return 0;
2435}
2436
2437static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2438 int orig_memory)
2439{
2440 subpage_t *mmio;
2441 int subpage_memory;
2442
2443 mmio = qemu_mallocz(sizeof(subpage_t));
2444 if (mmio != NULL) {
2445 mmio->base = base;
2446 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2447#if defined(DEBUG_SUBPAGE)
2448 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2449 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2450#endif
2451 *phys = subpage_memory | IO_MEM_SUBPAGE;
2452 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2453 }
2454
2455 return mmio;
2456}
2457
33417e70
FB
2458static void io_mem_init(void)
2459{
3a7d929e 2460 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2461 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2462 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2463 io_mem_nb = 5;
2464
6658ffb8
PB
2465#if defined(CONFIG_SOFTMMU)
2466 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2467 watch_mem_write, NULL);
2468#endif
1ccde1cb 2469 /* alloc dirty bits array */
0a962c02 2470 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2471 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2472}
2473
2474/* mem_read and mem_write are arrays of functions containing the
2475 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2476 2). Functions can be omitted with a NULL function pointer. The
2477 registered functions may be modified dynamically later.
2478 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2479 modified. If it is zero, a new io zone is allocated. The return
2480 value can be used with cpu_register_physical_memory(). (-1) is
2481 returned if error. */
33417e70
FB
2482int cpu_register_io_memory(int io_index,
2483 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2484 CPUWriteMemoryFunc **mem_write,
2485 void *opaque)
33417e70 2486{
4254fab8 2487 int i, subwidth = 0;
33417e70
FB
2488
2489 if (io_index <= 0) {
b5ff1b31 2490 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2491 return -1;
2492 io_index = io_mem_nb++;
2493 } else {
2494 if (io_index >= IO_MEM_NB_ENTRIES)
2495 return -1;
2496 }
b5ff1b31 2497
33417e70 2498 for(i = 0;i < 3; i++) {
4254fab8
BS
2499 if (!mem_read[i] || !mem_write[i])
2500 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2501 io_mem_read[io_index][i] = mem_read[i];
2502 io_mem_write[io_index][i] = mem_write[i];
2503 }
a4193c8a 2504 io_mem_opaque[io_index] = opaque;
4254fab8 2505 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2506}
61382a50 2507
8926b517
FB
2508CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2509{
2510 return io_mem_write[io_index >> IO_MEM_SHIFT];
2511}
2512
2513CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2514{
2515 return io_mem_read[io_index >> IO_MEM_SHIFT];
2516}
2517
13eb76e0
FB
2518/* physical memory access (slow version, mainly for debug) */
2519#if defined(CONFIG_USER_ONLY)
5fafdf24 2520void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2521 int len, int is_write)
2522{
2523 int l, flags;
2524 target_ulong page;
53a5960a 2525 void * p;
13eb76e0
FB
2526
2527 while (len > 0) {
2528 page = addr & TARGET_PAGE_MASK;
2529 l = (page + TARGET_PAGE_SIZE) - addr;
2530 if (l > len)
2531 l = len;
2532 flags = page_get_flags(page);
2533 if (!(flags & PAGE_VALID))
2534 return;
2535 if (is_write) {
2536 if (!(flags & PAGE_WRITE))
2537 return;
579a97f7
FB
2538 /* XXX: this code should not depend on lock_user */
2539 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2540 /* FIXME - should this return an error rather than just fail? */
2541 return;
53a5960a
PB
2542 memcpy(p, buf, len);
2543 unlock_user(p, addr, len);
13eb76e0
FB
2544 } else {
2545 if (!(flags & PAGE_READ))
2546 return;
579a97f7
FB
2547 /* XXX: this code should not depend on lock_user */
2548 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2549 /* FIXME - should this return an error rather than just fail? */
2550 return;
53a5960a
PB
2551 memcpy(buf, p, len);
2552 unlock_user(p, addr, 0);
13eb76e0
FB
2553 }
2554 len -= l;
2555 buf += l;
2556 addr += l;
2557 }
2558}
8df1cd07 2559
13eb76e0 2560#else
5fafdf24 2561void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2562 int len, int is_write)
2563{
2564 int l, io_index;
2565 uint8_t *ptr;
2566 uint32_t val;
2e12669a
FB
2567 target_phys_addr_t page;
2568 unsigned long pd;
92e873b9 2569 PhysPageDesc *p;
3b46e624 2570
13eb76e0
FB
2571 while (len > 0) {
2572 page = addr & TARGET_PAGE_MASK;
2573 l = (page + TARGET_PAGE_SIZE) - addr;
2574 if (l > len)
2575 l = len;
92e873b9 2576 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2577 if (!p) {
2578 pd = IO_MEM_UNASSIGNED;
2579 } else {
2580 pd = p->phys_offset;
2581 }
3b46e624 2582
13eb76e0 2583 if (is_write) {
3a7d929e 2584 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2585 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2586 /* XXX: could force cpu_single_env to NULL to avoid
2587 potential bugs */
13eb76e0 2588 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2589 /* 32 bit write access */
c27004ec 2590 val = ldl_p(buf);
a4193c8a 2591 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2592 l = 4;
2593 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2594 /* 16 bit write access */
c27004ec 2595 val = lduw_p(buf);
a4193c8a 2596 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2597 l = 2;
2598 } else {
1c213d19 2599 /* 8 bit write access */
c27004ec 2600 val = ldub_p(buf);
a4193c8a 2601 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2602 l = 1;
2603 }
2604 } else {
b448f2f3
FB
2605 unsigned long addr1;
2606 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2607 /* RAM case */
b448f2f3 2608 ptr = phys_ram_base + addr1;
13eb76e0 2609 memcpy(ptr, buf, l);
3a7d929e
FB
2610 if (!cpu_physical_memory_is_dirty(addr1)) {
2611 /* invalidate code */
2612 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2613 /* set dirty bit */
5fafdf24 2614 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2615 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2616 }
13eb76e0
FB
2617 }
2618 } else {
5fafdf24 2619 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2620 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2621 /* I/O case */
2622 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2623 if (l >= 4 && ((addr & 3) == 0)) {
2624 /* 32 bit read access */
a4193c8a 2625 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2626 stl_p(buf, val);
13eb76e0
FB
2627 l = 4;
2628 } else if (l >= 2 && ((addr & 1) == 0)) {
2629 /* 16 bit read access */
a4193c8a 2630 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2631 stw_p(buf, val);
13eb76e0
FB
2632 l = 2;
2633 } else {
1c213d19 2634 /* 8 bit read access */
a4193c8a 2635 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2636 stb_p(buf, val);
13eb76e0
FB
2637 l = 1;
2638 }
2639 } else {
2640 /* RAM case */
5fafdf24 2641 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2642 (addr & ~TARGET_PAGE_MASK);
2643 memcpy(buf, ptr, l);
2644 }
2645 }
2646 len -= l;
2647 buf += l;
2648 addr += l;
2649 }
2650}
8df1cd07 2651
d0ecd2aa 2652/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2653void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2654 const uint8_t *buf, int len)
2655{
2656 int l;
2657 uint8_t *ptr;
2658 target_phys_addr_t page;
2659 unsigned long pd;
2660 PhysPageDesc *p;
3b46e624 2661
d0ecd2aa
FB
2662 while (len > 0) {
2663 page = addr & TARGET_PAGE_MASK;
2664 l = (page + TARGET_PAGE_SIZE) - addr;
2665 if (l > len)
2666 l = len;
2667 p = phys_page_find(page >> TARGET_PAGE_BITS);
2668 if (!p) {
2669 pd = IO_MEM_UNASSIGNED;
2670 } else {
2671 pd = p->phys_offset;
2672 }
3b46e624 2673
d0ecd2aa 2674 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2675 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2676 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2677 /* do nothing */
2678 } else {
2679 unsigned long addr1;
2680 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2681 /* ROM/RAM case */
2682 ptr = phys_ram_base + addr1;
2683 memcpy(ptr, buf, l);
2684 }
2685 len -= l;
2686 buf += l;
2687 addr += l;
2688 }
2689}
2690
2691
8df1cd07
FB
2692/* warning: addr must be aligned */
2693uint32_t ldl_phys(target_phys_addr_t addr)
2694{
2695 int io_index;
2696 uint8_t *ptr;
2697 uint32_t val;
2698 unsigned long pd;
2699 PhysPageDesc *p;
2700
2701 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2702 if (!p) {
2703 pd = IO_MEM_UNASSIGNED;
2704 } else {
2705 pd = p->phys_offset;
2706 }
3b46e624 2707
5fafdf24 2708 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2709 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2710 /* I/O case */
2711 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2712 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2713 } else {
2714 /* RAM case */
5fafdf24 2715 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2716 (addr & ~TARGET_PAGE_MASK);
2717 val = ldl_p(ptr);
2718 }
2719 return val;
2720}
2721
84b7b8e7
FB
2722/* warning: addr must be aligned */
2723uint64_t ldq_phys(target_phys_addr_t addr)
2724{
2725 int io_index;
2726 uint8_t *ptr;
2727 uint64_t val;
2728 unsigned long pd;
2729 PhysPageDesc *p;
2730
2731 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2732 if (!p) {
2733 pd = IO_MEM_UNASSIGNED;
2734 } else {
2735 pd = p->phys_offset;
2736 }
3b46e624 2737
2a4188a3
FB
2738 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2739 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2740 /* I/O case */
2741 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2742#ifdef TARGET_WORDS_BIGENDIAN
2743 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2744 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2745#else
2746 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2747 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2748#endif
2749 } else {
2750 /* RAM case */
5fafdf24 2751 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2752 (addr & ~TARGET_PAGE_MASK);
2753 val = ldq_p(ptr);
2754 }
2755 return val;
2756}
2757
aab33094
FB
2758/* XXX: optimize */
2759uint32_t ldub_phys(target_phys_addr_t addr)
2760{
2761 uint8_t val;
2762 cpu_physical_memory_read(addr, &val, 1);
2763 return val;
2764}
2765
2766/* XXX: optimize */
2767uint32_t lduw_phys(target_phys_addr_t addr)
2768{
2769 uint16_t val;
2770 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2771 return tswap16(val);
2772}
2773
8df1cd07
FB
2774/* warning: addr must be aligned. The ram page is not masked as dirty
2775 and the code inside is not invalidated. It is useful if the dirty
2776 bits are used to track modified PTEs */
2777void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2778{
2779 int io_index;
2780 uint8_t *ptr;
2781 unsigned long pd;
2782 PhysPageDesc *p;
2783
2784 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2785 if (!p) {
2786 pd = IO_MEM_UNASSIGNED;
2787 } else {
2788 pd = p->phys_offset;
2789 }
3b46e624 2790
3a7d929e 2791 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2792 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2793 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2794 } else {
5fafdf24 2795 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2796 (addr & ~TARGET_PAGE_MASK);
2797 stl_p(ptr, val);
2798 }
2799}
2800
bc98a7ef
JM
2801void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2802{
2803 int io_index;
2804 uint8_t *ptr;
2805 unsigned long pd;
2806 PhysPageDesc *p;
2807
2808 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2809 if (!p) {
2810 pd = IO_MEM_UNASSIGNED;
2811 } else {
2812 pd = p->phys_offset;
2813 }
3b46e624 2814
bc98a7ef
JM
2815 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2816 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2817#ifdef TARGET_WORDS_BIGENDIAN
2818 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2819 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2820#else
2821 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2822 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2823#endif
2824 } else {
5fafdf24 2825 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2826 (addr & ~TARGET_PAGE_MASK);
2827 stq_p(ptr, val);
2828 }
2829}
2830
8df1cd07 2831/* warning: addr must be aligned */
8df1cd07
FB
2832void stl_phys(target_phys_addr_t addr, uint32_t val)
2833{
2834 int io_index;
2835 uint8_t *ptr;
2836 unsigned long pd;
2837 PhysPageDesc *p;
2838
2839 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2840 if (!p) {
2841 pd = IO_MEM_UNASSIGNED;
2842 } else {
2843 pd = p->phys_offset;
2844 }
3b46e624 2845
3a7d929e 2846 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2847 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2848 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2849 } else {
2850 unsigned long addr1;
2851 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2852 /* RAM case */
2853 ptr = phys_ram_base + addr1;
2854 stl_p(ptr, val);
3a7d929e
FB
2855 if (!cpu_physical_memory_is_dirty(addr1)) {
2856 /* invalidate code */
2857 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2858 /* set dirty bit */
f23db169
FB
2859 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2860 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2861 }
8df1cd07
FB
2862 }
2863}
2864
aab33094
FB
2865/* XXX: optimize */
2866void stb_phys(target_phys_addr_t addr, uint32_t val)
2867{
2868 uint8_t v = val;
2869 cpu_physical_memory_write(addr, &v, 1);
2870}
2871
2872/* XXX: optimize */
2873void stw_phys(target_phys_addr_t addr, uint32_t val)
2874{
2875 uint16_t v = tswap16(val);
2876 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2877}
2878
2879/* XXX: optimize */
2880void stq_phys(target_phys_addr_t addr, uint64_t val)
2881{
2882 val = tswap64(val);
2883 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2884}
2885
13eb76e0
FB
2886#endif
2887
2888/* virtual memory access for debug */
5fafdf24 2889int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 2890 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2891{
2892 int l;
9b3c35e0
JM
2893 target_phys_addr_t phys_addr;
2894 target_ulong page;
13eb76e0
FB
2895
2896 while (len > 0) {
2897 page = addr & TARGET_PAGE_MASK;
2898 phys_addr = cpu_get_phys_page_debug(env, page);
2899 /* if no physical page mapped, return an error */
2900 if (phys_addr == -1)
2901 return -1;
2902 l = (page + TARGET_PAGE_SIZE) - addr;
2903 if (l > len)
2904 l = len;
5fafdf24 2905 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 2906 buf, l, is_write);
13eb76e0
FB
2907 len -= l;
2908 buf += l;
2909 addr += l;
2910 }
2911 return 0;
2912}
2913
e3db7226
FB
2914void dump_exec_info(FILE *f,
2915 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2916{
2917 int i, target_code_size, max_target_code_size;
2918 int direct_jmp_count, direct_jmp2_count, cross_page;
2919 TranslationBlock *tb;
3b46e624 2920
e3db7226
FB
2921 target_code_size = 0;
2922 max_target_code_size = 0;
2923 cross_page = 0;
2924 direct_jmp_count = 0;
2925 direct_jmp2_count = 0;
2926 for(i = 0; i < nb_tbs; i++) {
2927 tb = &tbs[i];
2928 target_code_size += tb->size;
2929 if (tb->size > max_target_code_size)
2930 max_target_code_size = tb->size;
2931 if (tb->page_addr[1] != -1)
2932 cross_page++;
2933 if (tb->tb_next_offset[0] != 0xffff) {
2934 direct_jmp_count++;
2935 if (tb->tb_next_offset[1] != 0xffff) {
2936 direct_jmp2_count++;
2937 }
2938 }
2939 }
2940 /* XXX: avoid using doubles ? */
57fec1fe 2941 cpu_fprintf(f, "Translation buffer state:\n");
e3db7226 2942 cpu_fprintf(f, "TB count %d\n", nb_tbs);
5fafdf24 2943 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
2944 nb_tbs ? target_code_size / nb_tbs : 0,
2945 max_target_code_size);
5fafdf24 2946 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
2947 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2948 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
2949 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2950 cross_page,
e3db7226
FB
2951 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2952 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 2953 direct_jmp_count,
e3db7226
FB
2954 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2955 direct_jmp2_count,
2956 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 2957 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
2958 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2959 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2960 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
57fec1fe
FB
2961#ifdef CONFIG_PROFILER
2962 {
2963 int64_t tot;
2964 tot = dyngen_interm_time + dyngen_code_time;
2965 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2966 tot, tot / 2.4e9);
2967 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2968 dyngen_tb_count,
2969 dyngen_tb_count1 - dyngen_tb_count,
2970 dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
2971 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2972 dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
2973 cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
2974 dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
2975 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2976 dyngen_tb_count ?
2977 (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
2978 cpu_fprintf(f, "cycles/op %0.1f\n",
2979 dyngen_op_count ? (double)tot / dyngen_op_count : 0);
2980 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2981 dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
2982 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2983 dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
2984 if (tot == 0)
2985 tot = 1;
2986 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2987 (double)dyngen_interm_time / tot * 100.0);
2988 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2989 (double)dyngen_code_time / tot * 100.0);
2990 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2991 dyngen_restore_count);
2992 cpu_fprintf(f, " avg cycles %0.1f\n",
2993 dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
2994 {
2995 extern void dump_op_count(void);
2996 dump_op_count();
2997 }
2998 }
2999#endif
e3db7226
FB
3000}
3001
5fafdf24 3002#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3003
3004#define MMUSUFFIX _cmmu
3005#define GETPC() NULL
3006#define env cpu_single_env
b769d8fe 3007#define SOFTMMU_CODE_ACCESS
61382a50
FB
3008
3009#define SHIFT 0
3010#include "softmmu_template.h"
3011
3012#define SHIFT 1
3013#include "softmmu_template.h"
3014
3015#define SHIFT 2
3016#include "softmmu_template.h"
3017
3018#define SHIFT 3
3019#include "softmmu_template.h"
3020
3021#undef env
3022
3023#endif