]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Remove dead code.
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
53a5960a
PB
37#if defined(CONFIG_USER_ONLY)
38#include <qemu.h>
39#endif
54936004 40
fd6ce8f6 41//#define DEBUG_TB_INVALIDATE
66e85a21 42//#define DEBUG_FLUSH
9fa3e853 43//#define DEBUG_TLB
67d3b957 44//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
45
46/* make various TB consistency checks */
47//#define DEBUG_TB_CHECK
98857888 48//#define DEBUG_TLB_CHECK
fd6ce8f6 49
1196be37
TS
50//#define DEBUG_IOPORT
51
99773bd4
PB
52#if !defined(CONFIG_USER_ONLY)
53/* TB consistency checks only implemented for usermode emulation. */
54#undef DEBUG_TB_CHECK
55#endif
56
fd6ce8f6
FB
57/* threshold to flush the translated code buffer */
58#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
59
9fa3e853
FB
60#define SMC_BITMAP_USE_THRESHOLD 10
61
62#define MMAP_AREA_START 0x00000000
63#define MMAP_AREA_END 0xa8000000
fd6ce8f6 64
108c49b8
FB
65#if defined(TARGET_SPARC64)
66#define TARGET_PHYS_ADDR_SPACE_BITS 41
bedb69ea
JM
67#elif defined(TARGET_ALPHA)
68#define TARGET_PHYS_ADDR_SPACE_BITS 42
69#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
70#elif defined(TARGET_PPC64)
71#define TARGET_PHYS_ADDR_SPACE_BITS 42
72#else
73/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
74#define TARGET_PHYS_ADDR_SPACE_BITS 32
75#endif
76
fd6ce8f6 77TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 78TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 79int nb_tbs;
eb51d102
FB
80/* any access to the tbs or the page table must use this lock */
81spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 82
b8076a74 83uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
84uint8_t *code_gen_ptr;
85
9fa3e853
FB
86int phys_ram_size;
87int phys_ram_fd;
88uint8_t *phys_ram_base;
1ccde1cb 89uint8_t *phys_ram_dirty;
e9a1ab19 90static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 91
6a00d601
FB
92CPUState *first_cpu;
93/* current CPU in the current thread. It is only valid inside
94 cpu_exec() */
95CPUState *cpu_single_env;
96
54936004 97typedef struct PageDesc {
92e873b9 98 /* list of TBs intersecting this ram page */
fd6ce8f6 99 TranslationBlock *first_tb;
9fa3e853
FB
100 /* in order to optimize self modifying code, we count the number
101 of lookups we do to a given page to use a bitmap */
102 unsigned int code_write_count;
103 uint8_t *code_bitmap;
104#if defined(CONFIG_USER_ONLY)
105 unsigned long flags;
106#endif
54936004
FB
107} PageDesc;
108
92e873b9
FB
109typedef struct PhysPageDesc {
110 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 111 uint32_t phys_offset;
92e873b9
FB
112} PhysPageDesc;
113
54936004 114#define L2_BITS 10
bedb69ea
JM
115#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
116/* XXX: this is a temporary hack for alpha target.
117 * In the future, this is to be replaced by a multi-level table
118 * to actually be able to handle the complete 64 bits address space.
119 */
120#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
121#else
54936004 122#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 123#endif
54936004
FB
124
125#define L1_SIZE (1 << L1_BITS)
126#define L2_SIZE (1 << L2_BITS)
127
33417e70 128static void io_mem_init(void);
fd6ce8f6 129
83fb7adf
FB
130unsigned long qemu_real_host_page_size;
131unsigned long qemu_host_page_bits;
132unsigned long qemu_host_page_size;
133unsigned long qemu_host_page_mask;
54936004 134
92e873b9 135/* XXX: for system emulation, it could just be an array */
54936004 136static PageDesc *l1_map[L1_SIZE];
0a962c02 137PhysPageDesc **l1_phys_map;
54936004 138
33417e70 139/* io memory support */
33417e70
FB
140CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
141CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 142void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 143static int io_mem_nb;
6658ffb8
PB
144#if defined(CONFIG_SOFTMMU)
145static int io_mem_watch;
146#endif
33417e70 147
34865134
FB
148/* log support */
149char *logfilename = "/tmp/qemu.log";
150FILE *logfile;
151int loglevel;
152
e3db7226
FB
153/* statistics */
154static int tlb_flush_count;
155static int tb_flush_count;
156static int tb_phys_invalidate_count;
157
b346ff46 158static void page_init(void)
54936004 159{
83fb7adf 160 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 161 TARGET_PAGE_SIZE */
67b915a5 162#ifdef _WIN32
d5a8f07c
FB
163 {
164 SYSTEM_INFO system_info;
165 DWORD old_protect;
166
167 GetSystemInfo(&system_info);
168 qemu_real_host_page_size = system_info.dwPageSize;
169
170 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
171 PAGE_EXECUTE_READWRITE, &old_protect);
172 }
67b915a5 173#else
83fb7adf 174 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
175 {
176 unsigned long start, end;
177
178 start = (unsigned long)code_gen_buffer;
179 start &= ~(qemu_real_host_page_size - 1);
180
181 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
182 end += qemu_real_host_page_size - 1;
183 end &= ~(qemu_real_host_page_size - 1);
184
185 mprotect((void *)start, end - start,
186 PROT_READ | PROT_WRITE | PROT_EXEC);
187 }
67b915a5 188#endif
d5a8f07c 189
83fb7adf
FB
190 if (qemu_host_page_size == 0)
191 qemu_host_page_size = qemu_real_host_page_size;
192 if (qemu_host_page_size < TARGET_PAGE_SIZE)
193 qemu_host_page_size = TARGET_PAGE_SIZE;
194 qemu_host_page_bits = 0;
195 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
196 qemu_host_page_bits++;
197 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
198 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
199 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
200}
201
fd6ce8f6 202static inline PageDesc *page_find_alloc(unsigned int index)
54936004 203{
54936004
FB
204 PageDesc **lp, *p;
205
54936004
FB
206 lp = &l1_map[index >> L2_BITS];
207 p = *lp;
208 if (!p) {
209 /* allocate if not found */
59817ccb 210 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 211 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
212 *lp = p;
213 }
214 return p + (index & (L2_SIZE - 1));
215}
216
fd6ce8f6 217static inline PageDesc *page_find(unsigned int index)
54936004 218{
54936004
FB
219 PageDesc *p;
220
54936004
FB
221 p = l1_map[index >> L2_BITS];
222 if (!p)
223 return 0;
fd6ce8f6
FB
224 return p + (index & (L2_SIZE - 1));
225}
226
108c49b8 227static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 228{
108c49b8 229 void **lp, **p;
e3f4e2a4 230 PhysPageDesc *pd;
92e873b9 231
108c49b8
FB
232 p = (void **)l1_phys_map;
233#if TARGET_PHYS_ADDR_SPACE_BITS > 32
234
235#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
236#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
237#endif
238 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
239 p = *lp;
240 if (!p) {
241 /* allocate if not found */
108c49b8
FB
242 if (!alloc)
243 return NULL;
244 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
245 memset(p, 0, sizeof(void *) * L1_SIZE);
246 *lp = p;
247 }
248#endif
249 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
250 pd = *lp;
251 if (!pd) {
252 int i;
108c49b8
FB
253 /* allocate if not found */
254 if (!alloc)
255 return NULL;
e3f4e2a4
PB
256 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
257 *lp = pd;
258 for (i = 0; i < L2_SIZE; i++)
259 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 260 }
e3f4e2a4 261 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
262}
263
108c49b8 264static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 265{
108c49b8 266 return phys_page_find_alloc(index, 0);
92e873b9
FB
267}
268
9fa3e853 269#if !defined(CONFIG_USER_ONLY)
6a00d601 270static void tlb_protect_code(ram_addr_t ram_addr);
3a7d929e
FB
271static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
272 target_ulong vaddr);
9fa3e853 273#endif
fd6ce8f6 274
6a00d601 275void cpu_exec_init(CPUState *env)
fd6ce8f6 276{
6a00d601
FB
277 CPUState **penv;
278 int cpu_index;
279
fd6ce8f6
FB
280 if (!code_gen_ptr) {
281 code_gen_ptr = code_gen_buffer;
b346ff46 282 page_init();
33417e70 283 io_mem_init();
fd6ce8f6 284 }
6a00d601
FB
285 env->next_cpu = NULL;
286 penv = &first_cpu;
287 cpu_index = 0;
288 while (*penv != NULL) {
289 penv = (CPUState **)&(*penv)->next_cpu;
290 cpu_index++;
291 }
292 env->cpu_index = cpu_index;
6658ffb8 293 env->nb_watchpoints = 0;
6a00d601 294 *penv = env;
fd6ce8f6
FB
295}
296
9fa3e853
FB
297static inline void invalidate_page_bitmap(PageDesc *p)
298{
299 if (p->code_bitmap) {
59817ccb 300 qemu_free(p->code_bitmap);
9fa3e853
FB
301 p->code_bitmap = NULL;
302 }
303 p->code_write_count = 0;
304}
305
fd6ce8f6
FB
306/* set to NULL all the 'first_tb' fields in all PageDescs */
307static void page_flush_tb(void)
308{
309 int i, j;
310 PageDesc *p;
311
312 for(i = 0; i < L1_SIZE; i++) {
313 p = l1_map[i];
314 if (p) {
9fa3e853
FB
315 for(j = 0; j < L2_SIZE; j++) {
316 p->first_tb = NULL;
317 invalidate_page_bitmap(p);
318 p++;
319 }
fd6ce8f6
FB
320 }
321 }
322}
323
324/* flush all the translation blocks */
d4e8164f 325/* XXX: tb_flush is currently not thread safe */
6a00d601 326void tb_flush(CPUState *env1)
fd6ce8f6 327{
6a00d601 328 CPUState *env;
0124311e 329#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
330 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
331 code_gen_ptr - code_gen_buffer,
332 nb_tbs,
0124311e 333 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
334#endif
335 nb_tbs = 0;
6a00d601
FB
336
337 for(env = first_cpu; env != NULL; env = env->next_cpu) {
338 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
339 }
9fa3e853 340
8a8a608f 341 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 342 page_flush_tb();
9fa3e853 343
fd6ce8f6 344 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
345 /* XXX: flush processor icache at this point if cache flush is
346 expensive */
e3db7226 347 tb_flush_count++;
fd6ce8f6
FB
348}
349
350#ifdef DEBUG_TB_CHECK
351
bc98a7ef 352static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
353{
354 TranslationBlock *tb;
355 int i;
356 address &= TARGET_PAGE_MASK;
99773bd4
PB
357 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
358 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
359 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
360 address >= tb->pc + tb->size)) {
361 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 362 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
363 }
364 }
365 }
366}
367
368/* verify that all the pages have correct rights for code */
369static void tb_page_check(void)
370{
371 TranslationBlock *tb;
372 int i, flags1, flags2;
373
99773bd4
PB
374 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
375 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
376 flags1 = page_get_flags(tb->pc);
377 flags2 = page_get_flags(tb->pc + tb->size - 1);
378 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
379 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 380 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
381 }
382 }
383 }
384}
385
d4e8164f
FB
386void tb_jmp_check(TranslationBlock *tb)
387{
388 TranslationBlock *tb1;
389 unsigned int n1;
390
391 /* suppress any remaining jumps to this TB */
392 tb1 = tb->jmp_first;
393 for(;;) {
394 n1 = (long)tb1 & 3;
395 tb1 = (TranslationBlock *)((long)tb1 & ~3);
396 if (n1 == 2)
397 break;
398 tb1 = tb1->jmp_next[n1];
399 }
400 /* check end of list */
401 if (tb1 != tb) {
402 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
403 }
404}
405
fd6ce8f6
FB
406#endif
407
408/* invalidate one TB */
409static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
410 int next_offset)
411{
412 TranslationBlock *tb1;
413 for(;;) {
414 tb1 = *ptb;
415 if (tb1 == tb) {
416 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
417 break;
418 }
419 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
420 }
421}
422
9fa3e853
FB
423static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
424{
425 TranslationBlock *tb1;
426 unsigned int n1;
427
428 for(;;) {
429 tb1 = *ptb;
430 n1 = (long)tb1 & 3;
431 tb1 = (TranslationBlock *)((long)tb1 & ~3);
432 if (tb1 == tb) {
433 *ptb = tb1->page_next[n1];
434 break;
435 }
436 ptb = &tb1->page_next[n1];
437 }
438}
439
d4e8164f
FB
440static inline void tb_jmp_remove(TranslationBlock *tb, int n)
441{
442 TranslationBlock *tb1, **ptb;
443 unsigned int n1;
444
445 ptb = &tb->jmp_next[n];
446 tb1 = *ptb;
447 if (tb1) {
448 /* find tb(n) in circular list */
449 for(;;) {
450 tb1 = *ptb;
451 n1 = (long)tb1 & 3;
452 tb1 = (TranslationBlock *)((long)tb1 & ~3);
453 if (n1 == n && tb1 == tb)
454 break;
455 if (n1 == 2) {
456 ptb = &tb1->jmp_first;
457 } else {
458 ptb = &tb1->jmp_next[n1];
459 }
460 }
461 /* now we can suppress tb(n) from the list */
462 *ptb = tb->jmp_next[n];
463
464 tb->jmp_next[n] = NULL;
465 }
466}
467
468/* reset the jump entry 'n' of a TB so that it is not chained to
469 another TB */
470static inline void tb_reset_jump(TranslationBlock *tb, int n)
471{
472 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
473}
474
8a40a180 475static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 476{
6a00d601 477 CPUState *env;
8a40a180 478 PageDesc *p;
d4e8164f 479 unsigned int h, n1;
8a40a180
FB
480 target_ulong phys_pc;
481 TranslationBlock *tb1, *tb2;
d4e8164f 482
8a40a180
FB
483 /* remove the TB from the hash list */
484 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
485 h = tb_phys_hash_func(phys_pc);
486 tb_remove(&tb_phys_hash[h], tb,
487 offsetof(TranslationBlock, phys_hash_next));
488
489 /* remove the TB from the page list */
490 if (tb->page_addr[0] != page_addr) {
491 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
492 tb_page_remove(&p->first_tb, tb);
493 invalidate_page_bitmap(p);
494 }
495 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
496 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
497 tb_page_remove(&p->first_tb, tb);
498 invalidate_page_bitmap(p);
499 }
500
36bdbe54 501 tb_invalidated_flag = 1;
59817ccb 502
fd6ce8f6 503 /* remove the TB from the hash list */
8a40a180 504 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
505 for(env = first_cpu; env != NULL; env = env->next_cpu) {
506 if (env->tb_jmp_cache[h] == tb)
507 env->tb_jmp_cache[h] = NULL;
508 }
d4e8164f
FB
509
510 /* suppress this TB from the two jump lists */
511 tb_jmp_remove(tb, 0);
512 tb_jmp_remove(tb, 1);
513
514 /* suppress any remaining jumps to this TB */
515 tb1 = tb->jmp_first;
516 for(;;) {
517 n1 = (long)tb1 & 3;
518 if (n1 == 2)
519 break;
520 tb1 = (TranslationBlock *)((long)tb1 & ~3);
521 tb2 = tb1->jmp_next[n1];
522 tb_reset_jump(tb1, n1);
523 tb1->jmp_next[n1] = NULL;
524 tb1 = tb2;
525 }
526 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 527
e3db7226 528 tb_phys_invalidate_count++;
9fa3e853
FB
529}
530
531static inline void set_bits(uint8_t *tab, int start, int len)
532{
533 int end, mask, end1;
534
535 end = start + len;
536 tab += start >> 3;
537 mask = 0xff << (start & 7);
538 if ((start & ~7) == (end & ~7)) {
539 if (start < end) {
540 mask &= ~(0xff << (end & 7));
541 *tab |= mask;
542 }
543 } else {
544 *tab++ |= mask;
545 start = (start + 8) & ~7;
546 end1 = end & ~7;
547 while (start < end1) {
548 *tab++ = 0xff;
549 start += 8;
550 }
551 if (start < end) {
552 mask = ~(0xff << (end & 7));
553 *tab |= mask;
554 }
555 }
556}
557
558static void build_page_bitmap(PageDesc *p)
559{
560 int n, tb_start, tb_end;
561 TranslationBlock *tb;
562
59817ccb 563 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
564 if (!p->code_bitmap)
565 return;
566 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
567
568 tb = p->first_tb;
569 while (tb != NULL) {
570 n = (long)tb & 3;
571 tb = (TranslationBlock *)((long)tb & ~3);
572 /* NOTE: this is subtle as a TB may span two physical pages */
573 if (n == 0) {
574 /* NOTE: tb_end may be after the end of the page, but
575 it is not a problem */
576 tb_start = tb->pc & ~TARGET_PAGE_MASK;
577 tb_end = tb_start + tb->size;
578 if (tb_end > TARGET_PAGE_SIZE)
579 tb_end = TARGET_PAGE_SIZE;
580 } else {
581 tb_start = 0;
582 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
583 }
584 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
585 tb = tb->page_next[n];
586 }
587}
588
d720b93d
FB
589#ifdef TARGET_HAS_PRECISE_SMC
590
591static void tb_gen_code(CPUState *env,
592 target_ulong pc, target_ulong cs_base, int flags,
593 int cflags)
594{
595 TranslationBlock *tb;
596 uint8_t *tc_ptr;
597 target_ulong phys_pc, phys_page2, virt_page2;
598 int code_gen_size;
599
c27004ec
FB
600 phys_pc = get_phys_addr_code(env, pc);
601 tb = tb_alloc(pc);
d720b93d
FB
602 if (!tb) {
603 /* flush must be done */
604 tb_flush(env);
605 /* cannot fail at this point */
c27004ec 606 tb = tb_alloc(pc);
d720b93d
FB
607 }
608 tc_ptr = code_gen_ptr;
609 tb->tc_ptr = tc_ptr;
610 tb->cs_base = cs_base;
611 tb->flags = flags;
612 tb->cflags = cflags;
613 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
614 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
615
616 /* check next page if needed */
c27004ec 617 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 618 phys_page2 = -1;
c27004ec 619 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
620 phys_page2 = get_phys_addr_code(env, virt_page2);
621 }
622 tb_link_phys(tb, phys_pc, phys_page2);
623}
624#endif
625
9fa3e853
FB
626/* invalidate all TBs which intersect with the target physical page
627 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
628 the same physical page. 'is_cpu_write_access' should be true if called
629 from a real cpu write access: the virtual CPU will exit the current
630 TB if code is modified inside this TB. */
631void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
632 int is_cpu_write_access)
633{
634 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 635 CPUState *env = cpu_single_env;
9fa3e853 636 PageDesc *p;
ea1c1802 637 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 638 target_ulong tb_start, tb_end;
d720b93d 639 target_ulong current_pc, current_cs_base;
9fa3e853
FB
640
641 p = page_find(start >> TARGET_PAGE_BITS);
642 if (!p)
643 return;
644 if (!p->code_bitmap &&
d720b93d
FB
645 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
646 is_cpu_write_access) {
9fa3e853
FB
647 /* build code bitmap */
648 build_page_bitmap(p);
649 }
650
651 /* we remove all the TBs in the range [start, end[ */
652 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
653 current_tb_not_found = is_cpu_write_access;
654 current_tb_modified = 0;
655 current_tb = NULL; /* avoid warning */
656 current_pc = 0; /* avoid warning */
657 current_cs_base = 0; /* avoid warning */
658 current_flags = 0; /* avoid warning */
9fa3e853
FB
659 tb = p->first_tb;
660 while (tb != NULL) {
661 n = (long)tb & 3;
662 tb = (TranslationBlock *)((long)tb & ~3);
663 tb_next = tb->page_next[n];
664 /* NOTE: this is subtle as a TB may span two physical pages */
665 if (n == 0) {
666 /* NOTE: tb_end may be after the end of the page, but
667 it is not a problem */
668 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
669 tb_end = tb_start + tb->size;
670 } else {
671 tb_start = tb->page_addr[1];
672 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
673 }
674 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
675#ifdef TARGET_HAS_PRECISE_SMC
676 if (current_tb_not_found) {
677 current_tb_not_found = 0;
678 current_tb = NULL;
679 if (env->mem_write_pc) {
680 /* now we have a real cpu fault */
681 current_tb = tb_find_pc(env->mem_write_pc);
682 }
683 }
684 if (current_tb == tb &&
685 !(current_tb->cflags & CF_SINGLE_INSN)) {
686 /* If we are modifying the current TB, we must stop
687 its execution. We could be more precise by checking
688 that the modification is after the current PC, but it
689 would require a specialized function to partially
690 restore the CPU state */
691
692 current_tb_modified = 1;
693 cpu_restore_state(current_tb, env,
694 env->mem_write_pc, NULL);
695#if defined(TARGET_I386)
696 current_flags = env->hflags;
697 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
698 current_cs_base = (target_ulong)env->segs[R_CS].base;
699 current_pc = current_cs_base + env->eip;
700#else
701#error unsupported CPU
702#endif
703 }
704#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
705 /* we need to do that to handle the case where a signal
706 occurs while doing tb_phys_invalidate() */
707 saved_tb = NULL;
708 if (env) {
709 saved_tb = env->current_tb;
710 env->current_tb = NULL;
711 }
9fa3e853 712 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
713 if (env) {
714 env->current_tb = saved_tb;
715 if (env->interrupt_request && env->current_tb)
716 cpu_interrupt(env, env->interrupt_request);
717 }
9fa3e853
FB
718 }
719 tb = tb_next;
720 }
721#if !defined(CONFIG_USER_ONLY)
722 /* if no code remaining, no need to continue to use slow writes */
723 if (!p->first_tb) {
724 invalidate_page_bitmap(p);
d720b93d
FB
725 if (is_cpu_write_access) {
726 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
727 }
728 }
729#endif
730#ifdef TARGET_HAS_PRECISE_SMC
731 if (current_tb_modified) {
732 /* we generate a block containing just the instruction
733 modifying the memory. It will ensure that it cannot modify
734 itself */
ea1c1802 735 env->current_tb = NULL;
d720b93d
FB
736 tb_gen_code(env, current_pc, current_cs_base, current_flags,
737 CF_SINGLE_INSN);
738 cpu_resume_from_signal(env, NULL);
9fa3e853 739 }
fd6ce8f6 740#endif
9fa3e853 741}
fd6ce8f6 742
9fa3e853 743/* len must be <= 8 and start must be a multiple of len */
d720b93d 744static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
745{
746 PageDesc *p;
747 int offset, b;
59817ccb 748#if 0
a4193c8a
FB
749 if (1) {
750 if (loglevel) {
751 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
752 cpu_single_env->mem_write_vaddr, len,
753 cpu_single_env->eip,
754 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
755 }
59817ccb
FB
756 }
757#endif
9fa3e853
FB
758 p = page_find(start >> TARGET_PAGE_BITS);
759 if (!p)
760 return;
761 if (p->code_bitmap) {
762 offset = start & ~TARGET_PAGE_MASK;
763 b = p->code_bitmap[offset >> 3] >> (offset & 7);
764 if (b & ((1 << len) - 1))
765 goto do_invalidate;
766 } else {
767 do_invalidate:
d720b93d 768 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
769 }
770}
771
9fa3e853 772#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
773static void tb_invalidate_phys_page(target_ulong addr,
774 unsigned long pc, void *puc)
9fa3e853 775{
d720b93d
FB
776 int n, current_flags, current_tb_modified;
777 target_ulong current_pc, current_cs_base;
9fa3e853 778 PageDesc *p;
d720b93d
FB
779 TranslationBlock *tb, *current_tb;
780#ifdef TARGET_HAS_PRECISE_SMC
781 CPUState *env = cpu_single_env;
782#endif
9fa3e853
FB
783
784 addr &= TARGET_PAGE_MASK;
785 p = page_find(addr >> TARGET_PAGE_BITS);
786 if (!p)
787 return;
788 tb = p->first_tb;
d720b93d
FB
789 current_tb_modified = 0;
790 current_tb = NULL;
791 current_pc = 0; /* avoid warning */
792 current_cs_base = 0; /* avoid warning */
793 current_flags = 0; /* avoid warning */
794#ifdef TARGET_HAS_PRECISE_SMC
795 if (tb && pc != 0) {
796 current_tb = tb_find_pc(pc);
797 }
798#endif
9fa3e853
FB
799 while (tb != NULL) {
800 n = (long)tb & 3;
801 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
802#ifdef TARGET_HAS_PRECISE_SMC
803 if (current_tb == tb &&
804 !(current_tb->cflags & CF_SINGLE_INSN)) {
805 /* If we are modifying the current TB, we must stop
806 its execution. We could be more precise by checking
807 that the modification is after the current PC, but it
808 would require a specialized function to partially
809 restore the CPU state */
810
811 current_tb_modified = 1;
812 cpu_restore_state(current_tb, env, pc, puc);
813#if defined(TARGET_I386)
814 current_flags = env->hflags;
815 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
816 current_cs_base = (target_ulong)env->segs[R_CS].base;
817 current_pc = current_cs_base + env->eip;
818#else
819#error unsupported CPU
820#endif
821 }
822#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
823 tb_phys_invalidate(tb, addr);
824 tb = tb->page_next[n];
825 }
fd6ce8f6 826 p->first_tb = NULL;
d720b93d
FB
827#ifdef TARGET_HAS_PRECISE_SMC
828 if (current_tb_modified) {
829 /* we generate a block containing just the instruction
830 modifying the memory. It will ensure that it cannot modify
831 itself */
ea1c1802 832 env->current_tb = NULL;
d720b93d
FB
833 tb_gen_code(env, current_pc, current_cs_base, current_flags,
834 CF_SINGLE_INSN);
835 cpu_resume_from_signal(env, puc);
836 }
837#endif
fd6ce8f6 838}
9fa3e853 839#endif
fd6ce8f6
FB
840
841/* add the tb in the target page and protect it if necessary */
9fa3e853 842static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 843 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
844{
845 PageDesc *p;
9fa3e853
FB
846 TranslationBlock *last_first_tb;
847
848 tb->page_addr[n] = page_addr;
3a7d929e 849 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
850 tb->page_next[n] = p->first_tb;
851 last_first_tb = p->first_tb;
852 p->first_tb = (TranslationBlock *)((long)tb | n);
853 invalidate_page_bitmap(p);
fd6ce8f6 854
107db443 855#if defined(TARGET_HAS_SMC) || 1
d720b93d 856
9fa3e853 857#if defined(CONFIG_USER_ONLY)
fd6ce8f6 858 if (p->flags & PAGE_WRITE) {
53a5960a
PB
859 target_ulong addr;
860 PageDesc *p2;
9fa3e853
FB
861 int prot;
862
fd6ce8f6
FB
863 /* force the host page as non writable (writes will have a
864 page fault + mprotect overhead) */
53a5960a 865 page_addr &= qemu_host_page_mask;
fd6ce8f6 866 prot = 0;
53a5960a
PB
867 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
868 addr += TARGET_PAGE_SIZE) {
869
870 p2 = page_find (addr >> TARGET_PAGE_BITS);
871 if (!p2)
872 continue;
873 prot |= p2->flags;
874 p2->flags &= ~PAGE_WRITE;
875 page_get_flags(addr);
876 }
877 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
878 (prot & PAGE_BITS) & ~PAGE_WRITE);
879#ifdef DEBUG_TB_INVALIDATE
880 printf("protecting code page: 0x%08lx\n",
53a5960a 881 page_addr);
fd6ce8f6 882#endif
fd6ce8f6 883 }
9fa3e853
FB
884#else
885 /* if some code is already present, then the pages are already
886 protected. So we handle the case where only the first TB is
887 allocated in a physical page */
888 if (!last_first_tb) {
6a00d601 889 tlb_protect_code(page_addr);
9fa3e853
FB
890 }
891#endif
d720b93d
FB
892
893#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
894}
895
896/* Allocate a new translation block. Flush the translation buffer if
897 too many translation blocks or too much generated code. */
c27004ec 898TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
899{
900 TranslationBlock *tb;
fd6ce8f6
FB
901
902 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
903 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 904 return NULL;
fd6ce8f6
FB
905 tb = &tbs[nb_tbs++];
906 tb->pc = pc;
b448f2f3 907 tb->cflags = 0;
d4e8164f
FB
908 return tb;
909}
910
9fa3e853
FB
911/* add a new TB and link it to the physical page tables. phys_page2 is
912 (-1) to indicate that only one page contains the TB. */
913void tb_link_phys(TranslationBlock *tb,
914 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 915{
9fa3e853
FB
916 unsigned int h;
917 TranslationBlock **ptb;
918
919 /* add in the physical hash table */
920 h = tb_phys_hash_func(phys_pc);
921 ptb = &tb_phys_hash[h];
922 tb->phys_hash_next = *ptb;
923 *ptb = tb;
fd6ce8f6
FB
924
925 /* add in the page list */
9fa3e853
FB
926 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
927 if (phys_page2 != -1)
928 tb_alloc_page(tb, 1, phys_page2);
929 else
930 tb->page_addr[1] = -1;
9fa3e853 931
d4e8164f
FB
932 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
933 tb->jmp_next[0] = NULL;
934 tb->jmp_next[1] = NULL;
b448f2f3
FB
935#ifdef USE_CODE_COPY
936 tb->cflags &= ~CF_FP_USED;
937 if (tb->cflags & CF_TB_FP_USED)
938 tb->cflags |= CF_FP_USED;
939#endif
d4e8164f
FB
940
941 /* init original jump addresses */
942 if (tb->tb_next_offset[0] != 0xffff)
943 tb_reset_jump(tb, 0);
944 if (tb->tb_next_offset[1] != 0xffff)
945 tb_reset_jump(tb, 1);
8a40a180
FB
946
947#ifdef DEBUG_TB_CHECK
948 tb_page_check();
949#endif
fd6ce8f6
FB
950}
951
9fa3e853
FB
952/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
953 tb[1].tc_ptr. Return NULL if not found */
954TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 955{
9fa3e853
FB
956 int m_min, m_max, m;
957 unsigned long v;
958 TranslationBlock *tb;
a513fe19
FB
959
960 if (nb_tbs <= 0)
961 return NULL;
962 if (tc_ptr < (unsigned long)code_gen_buffer ||
963 tc_ptr >= (unsigned long)code_gen_ptr)
964 return NULL;
965 /* binary search (cf Knuth) */
966 m_min = 0;
967 m_max = nb_tbs - 1;
968 while (m_min <= m_max) {
969 m = (m_min + m_max) >> 1;
970 tb = &tbs[m];
971 v = (unsigned long)tb->tc_ptr;
972 if (v == tc_ptr)
973 return tb;
974 else if (tc_ptr < v) {
975 m_max = m - 1;
976 } else {
977 m_min = m + 1;
978 }
979 }
980 return &tbs[m_max];
981}
7501267e 982
ea041c0e
FB
983static void tb_reset_jump_recursive(TranslationBlock *tb);
984
985static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
986{
987 TranslationBlock *tb1, *tb_next, **ptb;
988 unsigned int n1;
989
990 tb1 = tb->jmp_next[n];
991 if (tb1 != NULL) {
992 /* find head of list */
993 for(;;) {
994 n1 = (long)tb1 & 3;
995 tb1 = (TranslationBlock *)((long)tb1 & ~3);
996 if (n1 == 2)
997 break;
998 tb1 = tb1->jmp_next[n1];
999 }
1000 /* we are now sure now that tb jumps to tb1 */
1001 tb_next = tb1;
1002
1003 /* remove tb from the jmp_first list */
1004 ptb = &tb_next->jmp_first;
1005 for(;;) {
1006 tb1 = *ptb;
1007 n1 = (long)tb1 & 3;
1008 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1009 if (n1 == n && tb1 == tb)
1010 break;
1011 ptb = &tb1->jmp_next[n1];
1012 }
1013 *ptb = tb->jmp_next[n];
1014 tb->jmp_next[n] = NULL;
1015
1016 /* suppress the jump to next tb in generated code */
1017 tb_reset_jump(tb, n);
1018
0124311e 1019 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1020 tb_reset_jump_recursive(tb_next);
1021 }
1022}
1023
1024static void tb_reset_jump_recursive(TranslationBlock *tb)
1025{
1026 tb_reset_jump_recursive2(tb, 0);
1027 tb_reset_jump_recursive2(tb, 1);
1028}
1029
1fddef4b 1030#if defined(TARGET_HAS_ICE)
d720b93d
FB
1031static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1032{
c2f07f81
PB
1033 target_ulong addr, pd;
1034 ram_addr_t ram_addr;
1035 PhysPageDesc *p;
d720b93d 1036
c2f07f81
PB
1037 addr = cpu_get_phys_page_debug(env, pc);
1038 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1039 if (!p) {
1040 pd = IO_MEM_UNASSIGNED;
1041 } else {
1042 pd = p->phys_offset;
1043 }
1044 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1045 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1046}
c27004ec 1047#endif
d720b93d 1048
6658ffb8
PB
1049/* Add a watchpoint. */
1050int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1051{
1052 int i;
1053
1054 for (i = 0; i < env->nb_watchpoints; i++) {
1055 if (addr == env->watchpoint[i].vaddr)
1056 return 0;
1057 }
1058 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1059 return -1;
1060
1061 i = env->nb_watchpoints++;
1062 env->watchpoint[i].vaddr = addr;
1063 tlb_flush_page(env, addr);
1064 /* FIXME: This flush is needed because of the hack to make memory ops
1065 terminate the TB. It can be removed once the proper IO trap and
1066 re-execute bits are in. */
1067 tb_flush(env);
1068 return i;
1069}
1070
1071/* Remove a watchpoint. */
1072int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1073{
1074 int i;
1075
1076 for (i = 0; i < env->nb_watchpoints; i++) {
1077 if (addr == env->watchpoint[i].vaddr) {
1078 env->nb_watchpoints--;
1079 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1080 tlb_flush_page(env, addr);
1081 return 0;
1082 }
1083 }
1084 return -1;
1085}
1086
c33a346e
FB
1087/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1088 breakpoint is reached */
2e12669a 1089int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1090{
1fddef4b 1091#if defined(TARGET_HAS_ICE)
4c3a88a2 1092 int i;
d720b93d 1093
4c3a88a2
FB
1094 for(i = 0; i < env->nb_breakpoints; i++) {
1095 if (env->breakpoints[i] == pc)
1096 return 0;
1097 }
1098
1099 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1100 return -1;
1101 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1102
1103 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1104 return 0;
1105#else
1106 return -1;
1107#endif
1108}
1109
1110/* remove a breakpoint */
2e12669a 1111int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1112{
1fddef4b 1113#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1114 int i;
1115 for(i = 0; i < env->nb_breakpoints; i++) {
1116 if (env->breakpoints[i] == pc)
1117 goto found;
1118 }
1119 return -1;
1120 found:
4c3a88a2 1121 env->nb_breakpoints--;
1fddef4b
FB
1122 if (i < env->nb_breakpoints)
1123 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1124
1125 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1126 return 0;
1127#else
1128 return -1;
1129#endif
1130}
1131
c33a346e
FB
1132/* enable or disable single step mode. EXCP_DEBUG is returned by the
1133 CPU loop after each instruction */
1134void cpu_single_step(CPUState *env, int enabled)
1135{
1fddef4b 1136#if defined(TARGET_HAS_ICE)
c33a346e
FB
1137 if (env->singlestep_enabled != enabled) {
1138 env->singlestep_enabled = enabled;
1139 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1140 /* XXX: only flush what is necessary */
0124311e 1141 tb_flush(env);
c33a346e
FB
1142 }
1143#endif
1144}
1145
34865134
FB
1146/* enable or disable low levels log */
1147void cpu_set_log(int log_flags)
1148{
1149 loglevel = log_flags;
1150 if (loglevel && !logfile) {
1151 logfile = fopen(logfilename, "w");
1152 if (!logfile) {
1153 perror(logfilename);
1154 _exit(1);
1155 }
9fa3e853
FB
1156#if !defined(CONFIG_SOFTMMU)
1157 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1158 {
1159 static uint8_t logfile_buf[4096];
1160 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1161 }
1162#else
34865134 1163 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1164#endif
34865134
FB
1165 }
1166}
1167
1168void cpu_set_log_filename(const char *filename)
1169{
1170 logfilename = strdup(filename);
1171}
c33a346e 1172
0124311e 1173/* mask must never be zero, except for A20 change call */
68a79315 1174void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1175{
1176 TranslationBlock *tb;
ee8b7021 1177 static int interrupt_lock;
59817ccb 1178
68a79315 1179 env->interrupt_request |= mask;
ea041c0e
FB
1180 /* if the cpu is currently executing code, we must unlink it and
1181 all the potentially executing TB */
1182 tb = env->current_tb;
ee8b7021
FB
1183 if (tb && !testandset(&interrupt_lock)) {
1184 env->current_tb = NULL;
ea041c0e 1185 tb_reset_jump_recursive(tb);
ee8b7021 1186 interrupt_lock = 0;
ea041c0e
FB
1187 }
1188}
1189
b54ad049
FB
1190void cpu_reset_interrupt(CPUState *env, int mask)
1191{
1192 env->interrupt_request &= ~mask;
1193}
1194
f193c797
FB
1195CPULogItem cpu_log_items[] = {
1196 { CPU_LOG_TB_OUT_ASM, "out_asm",
1197 "show generated host assembly code for each compiled TB" },
1198 { CPU_LOG_TB_IN_ASM, "in_asm",
1199 "show target assembly code for each compiled TB" },
1200 { CPU_LOG_TB_OP, "op",
1201 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1202#ifdef TARGET_I386
1203 { CPU_LOG_TB_OP_OPT, "op_opt",
1204 "show micro ops after optimization for each compiled TB" },
1205#endif
1206 { CPU_LOG_INT, "int",
1207 "show interrupts/exceptions in short format" },
1208 { CPU_LOG_EXEC, "exec",
1209 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1210 { CPU_LOG_TB_CPU, "cpu",
1211 "show CPU state before bloc translation" },
f193c797
FB
1212#ifdef TARGET_I386
1213 { CPU_LOG_PCALL, "pcall",
1214 "show protected mode far calls/returns/exceptions" },
1215#endif
8e3a9fd2 1216#ifdef DEBUG_IOPORT
fd872598
FB
1217 { CPU_LOG_IOPORT, "ioport",
1218 "show all i/o ports accesses" },
8e3a9fd2 1219#endif
f193c797
FB
1220 { 0, NULL, NULL },
1221};
1222
1223static int cmp1(const char *s1, int n, const char *s2)
1224{
1225 if (strlen(s2) != n)
1226 return 0;
1227 return memcmp(s1, s2, n) == 0;
1228}
1229
1230/* takes a comma separated list of log masks. Return 0 if error. */
1231int cpu_str_to_log_mask(const char *str)
1232{
1233 CPULogItem *item;
1234 int mask;
1235 const char *p, *p1;
1236
1237 p = str;
1238 mask = 0;
1239 for(;;) {
1240 p1 = strchr(p, ',');
1241 if (!p1)
1242 p1 = p + strlen(p);
8e3a9fd2
FB
1243 if(cmp1(p,p1-p,"all")) {
1244 for(item = cpu_log_items; item->mask != 0; item++) {
1245 mask |= item->mask;
1246 }
1247 } else {
f193c797
FB
1248 for(item = cpu_log_items; item->mask != 0; item++) {
1249 if (cmp1(p, p1 - p, item->name))
1250 goto found;
1251 }
1252 return 0;
8e3a9fd2 1253 }
f193c797
FB
1254 found:
1255 mask |= item->mask;
1256 if (*p1 != ',')
1257 break;
1258 p = p1 + 1;
1259 }
1260 return mask;
1261}
ea041c0e 1262
7501267e
FB
1263void cpu_abort(CPUState *env, const char *fmt, ...)
1264{
1265 va_list ap;
1266
1267 va_start(ap, fmt);
1268 fprintf(stderr, "qemu: fatal: ");
1269 vfprintf(stderr, fmt, ap);
1270 fprintf(stderr, "\n");
1271#ifdef TARGET_I386
7fe48483
FB
1272 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1273#else
1274 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1275#endif
1276 va_end(ap);
1277 abort();
1278}
1279
c5be9f08
TS
1280CPUState *cpu_copy(CPUState *env)
1281{
1282 CPUState *new_env = cpu_init();
1283 /* preserve chaining and index */
1284 CPUState *next_cpu = new_env->next_cpu;
1285 int cpu_index = new_env->cpu_index;
1286 memcpy(new_env, env, sizeof(CPUState));
1287 new_env->next_cpu = next_cpu;
1288 new_env->cpu_index = cpu_index;
1289 return new_env;
1290}
1291
0124311e
FB
1292#if !defined(CONFIG_USER_ONLY)
1293
ee8b7021
FB
1294/* NOTE: if flush_global is true, also flush global entries (not
1295 implemented yet) */
1296void tlb_flush(CPUState *env, int flush_global)
33417e70 1297{
33417e70 1298 int i;
0124311e 1299
9fa3e853
FB
1300#if defined(DEBUG_TLB)
1301 printf("tlb_flush:\n");
1302#endif
0124311e
FB
1303 /* must reset current TB so that interrupts cannot modify the
1304 links while we are modifying them */
1305 env->current_tb = NULL;
1306
33417e70 1307 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1308 env->tlb_table[0][i].addr_read = -1;
1309 env->tlb_table[0][i].addr_write = -1;
1310 env->tlb_table[0][i].addr_code = -1;
1311 env->tlb_table[1][i].addr_read = -1;
1312 env->tlb_table[1][i].addr_write = -1;
1313 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1314#if (NB_MMU_MODES >= 3)
1315 env->tlb_table[2][i].addr_read = -1;
1316 env->tlb_table[2][i].addr_write = -1;
1317 env->tlb_table[2][i].addr_code = -1;
1318#if (NB_MMU_MODES == 4)
1319 env->tlb_table[3][i].addr_read = -1;
1320 env->tlb_table[3][i].addr_write = -1;
1321 env->tlb_table[3][i].addr_code = -1;
1322#endif
1323#endif
33417e70 1324 }
9fa3e853 1325
8a40a180 1326 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1327
1328#if !defined(CONFIG_SOFTMMU)
1329 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1330#endif
1331#ifdef USE_KQEMU
1332 if (env->kqemu_enabled) {
1333 kqemu_flush(env, flush_global);
1334 }
9fa3e853 1335#endif
e3db7226 1336 tlb_flush_count++;
33417e70
FB
1337}
1338
274da6b2 1339static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1340{
84b7b8e7
FB
1341 if (addr == (tlb_entry->addr_read &
1342 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1343 addr == (tlb_entry->addr_write &
1344 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1345 addr == (tlb_entry->addr_code &
1346 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1347 tlb_entry->addr_read = -1;
1348 tlb_entry->addr_write = -1;
1349 tlb_entry->addr_code = -1;
1350 }
61382a50
FB
1351}
1352
2e12669a 1353void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1354{
8a40a180 1355 int i;
9fa3e853 1356 TranslationBlock *tb;
0124311e 1357
9fa3e853 1358#if defined(DEBUG_TLB)
108c49b8 1359 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1360#endif
0124311e
FB
1361 /* must reset current TB so that interrupts cannot modify the
1362 links while we are modifying them */
1363 env->current_tb = NULL;
61382a50
FB
1364
1365 addr &= TARGET_PAGE_MASK;
1366 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1367 tlb_flush_entry(&env->tlb_table[0][i], addr);
1368 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1369#if (NB_MMU_MODES >= 3)
1370 tlb_flush_entry(&env->tlb_table[2][i], addr);
1371#if (NB_MMU_MODES == 4)
1372 tlb_flush_entry(&env->tlb_table[3][i], addr);
1373#endif
1374#endif
0124311e 1375
b362e5e0
PB
1376 /* Discard jump cache entries for any tb which might potentially
1377 overlap the flushed page. */
1378 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1379 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1380
1381 i = tb_jmp_cache_hash_page(addr);
1382 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1383
0124311e 1384#if !defined(CONFIG_SOFTMMU)
9fa3e853 1385 if (addr < MMAP_AREA_END)
0124311e 1386 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1387#endif
0a962c02
FB
1388#ifdef USE_KQEMU
1389 if (env->kqemu_enabled) {
1390 kqemu_flush_page(env, addr);
1391 }
1392#endif
9fa3e853
FB
1393}
1394
9fa3e853
FB
1395/* update the TLBs so that writes to code in the virtual page 'addr'
1396 can be detected */
6a00d601 1397static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1398{
6a00d601
FB
1399 cpu_physical_memory_reset_dirty(ram_addr,
1400 ram_addr + TARGET_PAGE_SIZE,
1401 CODE_DIRTY_FLAG);
9fa3e853
FB
1402}
1403
9fa3e853 1404/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1405 tested for self modifying code */
1406static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1407 target_ulong vaddr)
9fa3e853 1408{
3a7d929e 1409 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1410}
1411
1412static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1413 unsigned long start, unsigned long length)
1414{
1415 unsigned long addr;
84b7b8e7
FB
1416 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1417 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1418 if ((addr - start) < length) {
84b7b8e7 1419 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1420 }
1421 }
1422}
1423
3a7d929e 1424void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1425 int dirty_flags)
1ccde1cb
FB
1426{
1427 CPUState *env;
4f2ac237 1428 unsigned long length, start1;
0a962c02
FB
1429 int i, mask, len;
1430 uint8_t *p;
1ccde1cb
FB
1431
1432 start &= TARGET_PAGE_MASK;
1433 end = TARGET_PAGE_ALIGN(end);
1434
1435 length = end - start;
1436 if (length == 0)
1437 return;
0a962c02 1438 len = length >> TARGET_PAGE_BITS;
3a7d929e 1439#ifdef USE_KQEMU
6a00d601
FB
1440 /* XXX: should not depend on cpu context */
1441 env = first_cpu;
3a7d929e 1442 if (env->kqemu_enabled) {
f23db169
FB
1443 ram_addr_t addr;
1444 addr = start;
1445 for(i = 0; i < len; i++) {
1446 kqemu_set_notdirty(env, addr);
1447 addr += TARGET_PAGE_SIZE;
1448 }
3a7d929e
FB
1449 }
1450#endif
f23db169
FB
1451 mask = ~dirty_flags;
1452 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1453 for(i = 0; i < len; i++)
1454 p[i] &= mask;
1455
1ccde1cb
FB
1456 /* we modify the TLB cache so that the dirty bit will be set again
1457 when accessing the range */
59817ccb 1458 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1459 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1460 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1461 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1462 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1463 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1464#if (NB_MMU_MODES >= 3)
1465 for(i = 0; i < CPU_TLB_SIZE; i++)
1466 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1467#if (NB_MMU_MODES == 4)
1468 for(i = 0; i < CPU_TLB_SIZE; i++)
1469 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1470#endif
1471#endif
6a00d601 1472 }
59817ccb
FB
1473
1474#if !defined(CONFIG_SOFTMMU)
1475 /* XXX: this is expensive */
1476 {
1477 VirtPageDesc *p;
1478 int j;
1479 target_ulong addr;
1480
1481 for(i = 0; i < L1_SIZE; i++) {
1482 p = l1_virt_map[i];
1483 if (p) {
1484 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1485 for(j = 0; j < L2_SIZE; j++) {
1486 if (p->valid_tag == virt_valid_tag &&
1487 p->phys_addr >= start && p->phys_addr < end &&
1488 (p->prot & PROT_WRITE)) {
1489 if (addr < MMAP_AREA_END) {
1490 mprotect((void *)addr, TARGET_PAGE_SIZE,
1491 p->prot & ~PROT_WRITE);
1492 }
1493 }
1494 addr += TARGET_PAGE_SIZE;
1495 p++;
1496 }
1497 }
1498 }
1499 }
1500#endif
1ccde1cb
FB
1501}
1502
3a7d929e
FB
1503static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1504{
1505 ram_addr_t ram_addr;
1506
84b7b8e7
FB
1507 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1508 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1509 tlb_entry->addend - (unsigned long)phys_ram_base;
1510 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1511 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1512 }
1513 }
1514}
1515
1516/* update the TLB according to the current state of the dirty bits */
1517void cpu_tlb_update_dirty(CPUState *env)
1518{
1519 int i;
1520 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1521 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1522 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1523 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1524#if (NB_MMU_MODES >= 3)
1525 for(i = 0; i < CPU_TLB_SIZE; i++)
1526 tlb_update_dirty(&env->tlb_table[2][i]);
1527#if (NB_MMU_MODES == 4)
1528 for(i = 0; i < CPU_TLB_SIZE; i++)
1529 tlb_update_dirty(&env->tlb_table[3][i]);
1530#endif
1531#endif
3a7d929e
FB
1532}
1533
1ccde1cb 1534static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1535 unsigned long start)
1ccde1cb
FB
1536{
1537 unsigned long addr;
84b7b8e7
FB
1538 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1539 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1540 if (addr == start) {
84b7b8e7 1541 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1542 }
1543 }
1544}
1545
1546/* update the TLB corresponding to virtual page vaddr and phys addr
1547 addr so that it is no longer dirty */
6a00d601
FB
1548static inline void tlb_set_dirty(CPUState *env,
1549 unsigned long addr, target_ulong vaddr)
1ccde1cb 1550{
1ccde1cb
FB
1551 int i;
1552
1ccde1cb
FB
1553 addr &= TARGET_PAGE_MASK;
1554 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1555 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1556 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1557#if (NB_MMU_MODES >= 3)
1558 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1559#if (NB_MMU_MODES == 4)
1560 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1561#endif
1562#endif
9fa3e853
FB
1563}
1564
59817ccb
FB
1565/* add a new TLB entry. At most one entry for a given virtual address
1566 is permitted. Return 0 if OK or 2 if the page could not be mapped
1567 (can only happen in non SOFTMMU mode for I/O pages or pages
1568 conflicting with the host address space). */
84b7b8e7
FB
1569int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1570 target_phys_addr_t paddr, int prot,
1571 int is_user, int is_softmmu)
9fa3e853 1572{
92e873b9 1573 PhysPageDesc *p;
4f2ac237 1574 unsigned long pd;
9fa3e853 1575 unsigned int index;
4f2ac237 1576 target_ulong address;
108c49b8 1577 target_phys_addr_t addend;
9fa3e853 1578 int ret;
84b7b8e7 1579 CPUTLBEntry *te;
6658ffb8 1580 int i;
9fa3e853 1581
92e873b9 1582 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1583 if (!p) {
1584 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1585 } else {
1586 pd = p->phys_offset;
9fa3e853
FB
1587 }
1588#if defined(DEBUG_TLB)
3a7d929e 1589 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
84b7b8e7 1590 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1591#endif
1592
1593 ret = 0;
1594#if !defined(CONFIG_SOFTMMU)
1595 if (is_softmmu)
1596#endif
1597 {
2a4188a3 1598 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1599 /* IO memory case */
1600 address = vaddr | pd;
1601 addend = paddr;
1602 } else {
1603 /* standard memory */
1604 address = vaddr;
1605 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1606 }
6658ffb8
PB
1607
1608 /* Make accesses to pages with watchpoints go via the
1609 watchpoint trap routines. */
1610 for (i = 0; i < env->nb_watchpoints; i++) {
1611 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1612 if (address & ~TARGET_PAGE_MASK) {
1613 env->watchpoint[i].is_ram = 0;
1614 address = vaddr | io_mem_watch;
1615 } else {
1616 env->watchpoint[i].is_ram = 1;
1617 /* TODO: Figure out how to make read watchpoints coexist
1618 with code. */
1619 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1620 }
1621 }
1622 }
9fa3e853 1623
90f18422 1624 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1625 addend -= vaddr;
84b7b8e7
FB
1626 te = &env->tlb_table[is_user][index];
1627 te->addend = addend;
67b915a5 1628 if (prot & PAGE_READ) {
84b7b8e7
FB
1629 te->addr_read = address;
1630 } else {
1631 te->addr_read = -1;
1632 }
1633 if (prot & PAGE_EXEC) {
1634 te->addr_code = address;
9fa3e853 1635 } else {
84b7b8e7 1636 te->addr_code = -1;
9fa3e853 1637 }
67b915a5 1638 if (prot & PAGE_WRITE) {
856074ec
FB
1639 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1640 (pd & IO_MEM_ROMD)) {
1641 /* write access calls the I/O callback */
1642 te->addr_write = vaddr |
1643 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
3a7d929e 1644 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1645 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1646 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1647 } else {
84b7b8e7 1648 te->addr_write = address;
9fa3e853
FB
1649 }
1650 } else {
84b7b8e7 1651 te->addr_write = -1;
9fa3e853
FB
1652 }
1653 }
1654#if !defined(CONFIG_SOFTMMU)
1655 else {
1656 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1657 /* IO access: no mapping is done as it will be handled by the
1658 soft MMU */
1659 if (!(env->hflags & HF_SOFTMMU_MASK))
1660 ret = 2;
1661 } else {
1662 void *map_addr;
59817ccb
FB
1663
1664 if (vaddr >= MMAP_AREA_END) {
1665 ret = 2;
1666 } else {
1667 if (prot & PROT_WRITE) {
1668 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1669#if defined(TARGET_HAS_SMC) || 1
59817ccb 1670 first_tb ||
d720b93d 1671#endif
59817ccb
FB
1672 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1673 !cpu_physical_memory_is_dirty(pd))) {
1674 /* ROM: we do as if code was inside */
1675 /* if code is present, we only map as read only and save the
1676 original mapping */
1677 VirtPageDesc *vp;
1678
90f18422 1679 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1680 vp->phys_addr = pd;
1681 vp->prot = prot;
1682 vp->valid_tag = virt_valid_tag;
1683 prot &= ~PAGE_WRITE;
1684 }
1685 }
1686 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1687 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1688 if (map_addr == MAP_FAILED) {
1689 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1690 paddr, vaddr);
9fa3e853 1691 }
9fa3e853
FB
1692 }
1693 }
1694 }
1695#endif
1696 return ret;
1697}
1698
1699/* called from signal handler: invalidate the code and unprotect the
1700 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1701int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1702{
1703#if !defined(CONFIG_SOFTMMU)
1704 VirtPageDesc *vp;
1705
1706#if defined(DEBUG_TLB)
1707 printf("page_unprotect: addr=0x%08x\n", addr);
1708#endif
1709 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1710
1711 /* if it is not mapped, no need to worry here */
1712 if (addr >= MMAP_AREA_END)
1713 return 0;
9fa3e853
FB
1714 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1715 if (!vp)
1716 return 0;
1717 /* NOTE: in this case, validate_tag is _not_ tested as it
1718 validates only the code TLB */
1719 if (vp->valid_tag != virt_valid_tag)
1720 return 0;
1721 if (!(vp->prot & PAGE_WRITE))
1722 return 0;
1723#if defined(DEBUG_TLB)
1724 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1725 addr, vp->phys_addr, vp->prot);
1726#endif
59817ccb
FB
1727 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1728 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1729 (unsigned long)addr, vp->prot);
d720b93d 1730 /* set the dirty bit */
0a962c02 1731 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1732 /* flush the code inside */
1733 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1734 return 1;
1735#else
1736 return 0;
1737#endif
33417e70
FB
1738}
1739
0124311e
FB
1740#else
1741
ee8b7021 1742void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1743{
1744}
1745
2e12669a 1746void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1747{
1748}
1749
84b7b8e7
FB
1750int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1751 target_phys_addr_t paddr, int prot,
1752 int is_user, int is_softmmu)
9fa3e853
FB
1753{
1754 return 0;
1755}
0124311e 1756
9fa3e853
FB
1757/* dump memory mappings */
1758void page_dump(FILE *f)
33417e70 1759{
9fa3e853
FB
1760 unsigned long start, end;
1761 int i, j, prot, prot1;
1762 PageDesc *p;
33417e70 1763
9fa3e853
FB
1764 fprintf(f, "%-8s %-8s %-8s %s\n",
1765 "start", "end", "size", "prot");
1766 start = -1;
1767 end = -1;
1768 prot = 0;
1769 for(i = 0; i <= L1_SIZE; i++) {
1770 if (i < L1_SIZE)
1771 p = l1_map[i];
1772 else
1773 p = NULL;
1774 for(j = 0;j < L2_SIZE; j++) {
1775 if (!p)
1776 prot1 = 0;
1777 else
1778 prot1 = p[j].flags;
1779 if (prot1 != prot) {
1780 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1781 if (start != -1) {
1782 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1783 start, end, end - start,
1784 prot & PAGE_READ ? 'r' : '-',
1785 prot & PAGE_WRITE ? 'w' : '-',
1786 prot & PAGE_EXEC ? 'x' : '-');
1787 }
1788 if (prot1 != 0)
1789 start = end;
1790 else
1791 start = -1;
1792 prot = prot1;
1793 }
1794 if (!p)
1795 break;
1796 }
33417e70 1797 }
33417e70
FB
1798}
1799
53a5960a 1800int page_get_flags(target_ulong address)
33417e70 1801{
9fa3e853
FB
1802 PageDesc *p;
1803
1804 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1805 if (!p)
9fa3e853
FB
1806 return 0;
1807 return p->flags;
1808}
1809
1810/* modify the flags of a page and invalidate the code if
1811 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1812 depending on PAGE_WRITE */
53a5960a 1813void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1814{
1815 PageDesc *p;
53a5960a 1816 target_ulong addr;
9fa3e853
FB
1817
1818 start = start & TARGET_PAGE_MASK;
1819 end = TARGET_PAGE_ALIGN(end);
1820 if (flags & PAGE_WRITE)
1821 flags |= PAGE_WRITE_ORG;
1822 spin_lock(&tb_lock);
1823 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1824 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1825 /* if the write protection is set, then we invalidate the code
1826 inside */
1827 if (!(p->flags & PAGE_WRITE) &&
1828 (flags & PAGE_WRITE) &&
1829 p->first_tb) {
d720b93d 1830 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1831 }
1832 p->flags = flags;
1833 }
1834 spin_unlock(&tb_lock);
33417e70
FB
1835}
1836
9fa3e853
FB
1837/* called from signal handler: invalidate the code and unprotect the
1838 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1839int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1840{
1841 unsigned int page_index, prot, pindex;
1842 PageDesc *p, *p1;
53a5960a 1843 target_ulong host_start, host_end, addr;
9fa3e853 1844
83fb7adf 1845 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1846 page_index = host_start >> TARGET_PAGE_BITS;
1847 p1 = page_find(page_index);
1848 if (!p1)
1849 return 0;
83fb7adf 1850 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1851 p = p1;
1852 prot = 0;
1853 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1854 prot |= p->flags;
1855 p++;
1856 }
1857 /* if the page was really writable, then we change its
1858 protection back to writable */
1859 if (prot & PAGE_WRITE_ORG) {
1860 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1861 if (!(p1[pindex].flags & PAGE_WRITE)) {
53a5960a 1862 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1863 (prot & PAGE_BITS) | PAGE_WRITE);
1864 p1[pindex].flags |= PAGE_WRITE;
1865 /* and since the content will be modified, we must invalidate
1866 the corresponding translated code. */
d720b93d 1867 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1868#ifdef DEBUG_TB_CHECK
1869 tb_invalidate_check(address);
1870#endif
1871 return 1;
1872 }
1873 }
1874 return 0;
1875}
1876
1877/* call this function when system calls directly modify a memory area */
53a5960a
PB
1878/* ??? This should be redundant now we have lock_user. */
1879void page_unprotect_range(target_ulong data, target_ulong data_size)
9fa3e853 1880{
53a5960a 1881 target_ulong start, end, addr;
9fa3e853 1882
53a5960a 1883 start = data;
9fa3e853
FB
1884 end = start + data_size;
1885 start &= TARGET_PAGE_MASK;
1886 end = TARGET_PAGE_ALIGN(end);
1887 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1888 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1889 }
1890}
1891
6a00d601
FB
1892static inline void tlb_set_dirty(CPUState *env,
1893 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1894{
1895}
9fa3e853
FB
1896#endif /* defined(CONFIG_USER_ONLY) */
1897
33417e70
FB
1898/* register physical memory. 'size' must be a multiple of the target
1899 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1900 io memory page */
2e12669a
FB
1901void cpu_register_physical_memory(target_phys_addr_t start_addr,
1902 unsigned long size,
1903 unsigned long phys_offset)
33417e70 1904{
108c49b8 1905 target_phys_addr_t addr, end_addr;
92e873b9 1906 PhysPageDesc *p;
9d42037b 1907 CPUState *env;
33417e70 1908
5fd386f6 1909 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1910 end_addr = start_addr + size;
5fd386f6 1911 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
108c49b8 1912 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1913 p->phys_offset = phys_offset;
2a4188a3
FB
1914 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1915 (phys_offset & IO_MEM_ROMD))
33417e70
FB
1916 phys_offset += TARGET_PAGE_SIZE;
1917 }
9d42037b
FB
1918
1919 /* since each CPU stores ram addresses in its TLB cache, we must
1920 reset the modified entries */
1921 /* XXX: slow ! */
1922 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1923 tlb_flush(env, 1);
1924 }
33417e70
FB
1925}
1926
ba863458
FB
1927/* XXX: temporary until new memory mapping API */
1928uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1929{
1930 PhysPageDesc *p;
1931
1932 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1933 if (!p)
1934 return IO_MEM_UNASSIGNED;
1935 return p->phys_offset;
1936}
1937
e9a1ab19
FB
1938/* XXX: better than nothing */
1939ram_addr_t qemu_ram_alloc(unsigned int size)
1940{
1941 ram_addr_t addr;
1942 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1943 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
1944 size, phys_ram_size);
1945 abort();
1946 }
1947 addr = phys_ram_alloc_offset;
1948 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1949 return addr;
1950}
1951
1952void qemu_ram_free(ram_addr_t addr)
1953{
1954}
1955
a4193c8a 1956static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 1957{
67d3b957
PB
1958#ifdef DEBUG_UNASSIGNED
1959 printf("Unassigned mem read 0x%08x\n", (int)addr);
1960#endif
33417e70
FB
1961 return 0;
1962}
1963
a4193c8a 1964static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 1965{
67d3b957
PB
1966#ifdef DEBUG_UNASSIGNED
1967 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1968#endif
33417e70
FB
1969}
1970
1971static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1972 unassigned_mem_readb,
1973 unassigned_mem_readb,
1974 unassigned_mem_readb,
1975};
1976
1977static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1978 unassigned_mem_writeb,
1979 unassigned_mem_writeb,
1980 unassigned_mem_writeb,
1981};
1982
3a7d929e 1983static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1984{
3a7d929e
FB
1985 unsigned long ram_addr;
1986 int dirty_flags;
1987 ram_addr = addr - (unsigned long)phys_ram_base;
1988 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1989 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1990#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1991 tb_invalidate_phys_page_fast(ram_addr, 1);
1992 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1993#endif
3a7d929e 1994 }
c27004ec 1995 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
1996#ifdef USE_KQEMU
1997 if (cpu_single_env->kqemu_enabled &&
1998 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1999 kqemu_modify_page(cpu_single_env, ram_addr);
2000#endif
f23db169
FB
2001 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2002 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2003 /* we remove the notdirty callback only if the code has been
2004 flushed */
2005 if (dirty_flags == 0xff)
6a00d601 2006 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2007}
2008
3a7d929e 2009static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2010{
3a7d929e
FB
2011 unsigned long ram_addr;
2012 int dirty_flags;
2013 ram_addr = addr - (unsigned long)phys_ram_base;
2014 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2015 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2016#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2017 tb_invalidate_phys_page_fast(ram_addr, 2);
2018 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2019#endif
3a7d929e 2020 }
c27004ec 2021 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2022#ifdef USE_KQEMU
2023 if (cpu_single_env->kqemu_enabled &&
2024 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2025 kqemu_modify_page(cpu_single_env, ram_addr);
2026#endif
f23db169
FB
2027 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2028 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2029 /* we remove the notdirty callback only if the code has been
2030 flushed */
2031 if (dirty_flags == 0xff)
6a00d601 2032 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2033}
2034
3a7d929e 2035static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2036{
3a7d929e
FB
2037 unsigned long ram_addr;
2038 int dirty_flags;
2039 ram_addr = addr - (unsigned long)phys_ram_base;
2040 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2041 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2042#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2043 tb_invalidate_phys_page_fast(ram_addr, 4);
2044 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2045#endif
3a7d929e 2046 }
c27004ec 2047 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2048#ifdef USE_KQEMU
2049 if (cpu_single_env->kqemu_enabled &&
2050 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2051 kqemu_modify_page(cpu_single_env, ram_addr);
2052#endif
f23db169
FB
2053 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2054 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2055 /* we remove the notdirty callback only if the code has been
2056 flushed */
2057 if (dirty_flags == 0xff)
6a00d601 2058 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2059}
2060
3a7d929e 2061static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2062 NULL, /* never used */
2063 NULL, /* never used */
2064 NULL, /* never used */
2065};
2066
1ccde1cb
FB
2067static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2068 notdirty_mem_writeb,
2069 notdirty_mem_writew,
2070 notdirty_mem_writel,
2071};
2072
6658ffb8
PB
2073#if defined(CONFIG_SOFTMMU)
2074/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2075 so these check for a hit then pass through to the normal out-of-line
2076 phys routines. */
2077static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2078{
2079 return ldub_phys(addr);
2080}
2081
2082static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2083{
2084 return lduw_phys(addr);
2085}
2086
2087static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2088{
2089 return ldl_phys(addr);
2090}
2091
2092/* Generate a debug exception if a watchpoint has been hit.
2093 Returns the real physical address of the access. addr will be a host
2094 address in the is_ram case. */
2095static target_ulong check_watchpoint(target_phys_addr_t addr)
2096{
2097 CPUState *env = cpu_single_env;
2098 target_ulong watch;
2099 target_ulong retaddr;
2100 int i;
2101
2102 retaddr = addr;
2103 for (i = 0; i < env->nb_watchpoints; i++) {
2104 watch = env->watchpoint[i].vaddr;
2105 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2106 if (env->watchpoint[i].is_ram)
2107 retaddr = addr - (unsigned long)phys_ram_base;
2108 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2109 cpu_single_env->watchpoint_hit = i + 1;
2110 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2111 break;
2112 }
2113 }
2114 }
2115 return retaddr;
2116}
2117
2118static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2119 uint32_t val)
2120{
2121 addr = check_watchpoint(addr);
2122 stb_phys(addr, val);
2123}
2124
2125static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2126 uint32_t val)
2127{
2128 addr = check_watchpoint(addr);
2129 stw_phys(addr, val);
2130}
2131
2132static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2133 uint32_t val)
2134{
2135 addr = check_watchpoint(addr);
2136 stl_phys(addr, val);
2137}
2138
2139static CPUReadMemoryFunc *watch_mem_read[3] = {
2140 watch_mem_readb,
2141 watch_mem_readw,
2142 watch_mem_readl,
2143};
2144
2145static CPUWriteMemoryFunc *watch_mem_write[3] = {
2146 watch_mem_writeb,
2147 watch_mem_writew,
2148 watch_mem_writel,
2149};
2150#endif
2151
33417e70
FB
2152static void io_mem_init(void)
2153{
3a7d929e 2154 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2155 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2156 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2157 io_mem_nb = 5;
2158
6658ffb8
PB
2159#if defined(CONFIG_SOFTMMU)
2160 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2161 watch_mem_write, NULL);
2162#endif
1ccde1cb 2163 /* alloc dirty bits array */
0a962c02 2164 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2165 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2166}
2167
2168/* mem_read and mem_write are arrays of functions containing the
2169 function to access byte (index 0), word (index 1) and dword (index
2170 2). All functions must be supplied. If io_index is non zero, the
2171 corresponding io zone is modified. If it is zero, a new io zone is
2172 allocated. The return value can be used with
2173 cpu_register_physical_memory(). (-1) is returned if error. */
2174int cpu_register_io_memory(int io_index,
2175 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2176 CPUWriteMemoryFunc **mem_write,
2177 void *opaque)
33417e70
FB
2178{
2179 int i;
2180
2181 if (io_index <= 0) {
b5ff1b31 2182 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2183 return -1;
2184 io_index = io_mem_nb++;
2185 } else {
2186 if (io_index >= IO_MEM_NB_ENTRIES)
2187 return -1;
2188 }
b5ff1b31 2189
33417e70
FB
2190 for(i = 0;i < 3; i++) {
2191 io_mem_read[io_index][i] = mem_read[i];
2192 io_mem_write[io_index][i] = mem_write[i];
2193 }
a4193c8a 2194 io_mem_opaque[io_index] = opaque;
33417e70
FB
2195 return io_index << IO_MEM_SHIFT;
2196}
61382a50 2197
8926b517
FB
2198CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2199{
2200 return io_mem_write[io_index >> IO_MEM_SHIFT];
2201}
2202
2203CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2204{
2205 return io_mem_read[io_index >> IO_MEM_SHIFT];
2206}
2207
13eb76e0
FB
2208/* physical memory access (slow version, mainly for debug) */
2209#if defined(CONFIG_USER_ONLY)
2e12669a 2210void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2211 int len, int is_write)
2212{
2213 int l, flags;
2214 target_ulong page;
53a5960a 2215 void * p;
13eb76e0
FB
2216
2217 while (len > 0) {
2218 page = addr & TARGET_PAGE_MASK;
2219 l = (page + TARGET_PAGE_SIZE) - addr;
2220 if (l > len)
2221 l = len;
2222 flags = page_get_flags(page);
2223 if (!(flags & PAGE_VALID))
2224 return;
2225 if (is_write) {
2226 if (!(flags & PAGE_WRITE))
2227 return;
53a5960a
PB
2228 p = lock_user(addr, len, 0);
2229 memcpy(p, buf, len);
2230 unlock_user(p, addr, len);
13eb76e0
FB
2231 } else {
2232 if (!(flags & PAGE_READ))
2233 return;
53a5960a
PB
2234 p = lock_user(addr, len, 1);
2235 memcpy(buf, p, len);
2236 unlock_user(p, addr, 0);
13eb76e0
FB
2237 }
2238 len -= l;
2239 buf += l;
2240 addr += l;
2241 }
2242}
8df1cd07 2243
13eb76e0 2244#else
2e12669a 2245void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2246 int len, int is_write)
2247{
2248 int l, io_index;
2249 uint8_t *ptr;
2250 uint32_t val;
2e12669a
FB
2251 target_phys_addr_t page;
2252 unsigned long pd;
92e873b9 2253 PhysPageDesc *p;
13eb76e0
FB
2254
2255 while (len > 0) {
2256 page = addr & TARGET_PAGE_MASK;
2257 l = (page + TARGET_PAGE_SIZE) - addr;
2258 if (l > len)
2259 l = len;
92e873b9 2260 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2261 if (!p) {
2262 pd = IO_MEM_UNASSIGNED;
2263 } else {
2264 pd = p->phys_offset;
2265 }
2266
2267 if (is_write) {
3a7d929e 2268 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2269 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2270 /* XXX: could force cpu_single_env to NULL to avoid
2271 potential bugs */
13eb76e0 2272 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2273 /* 32 bit write access */
c27004ec 2274 val = ldl_p(buf);
a4193c8a 2275 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2276 l = 4;
2277 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2278 /* 16 bit write access */
c27004ec 2279 val = lduw_p(buf);
a4193c8a 2280 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2281 l = 2;
2282 } else {
1c213d19 2283 /* 8 bit write access */
c27004ec 2284 val = ldub_p(buf);
a4193c8a 2285 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2286 l = 1;
2287 }
2288 } else {
b448f2f3
FB
2289 unsigned long addr1;
2290 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2291 /* RAM case */
b448f2f3 2292 ptr = phys_ram_base + addr1;
13eb76e0 2293 memcpy(ptr, buf, l);
3a7d929e
FB
2294 if (!cpu_physical_memory_is_dirty(addr1)) {
2295 /* invalidate code */
2296 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2297 /* set dirty bit */
f23db169
FB
2298 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2299 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2300 }
13eb76e0
FB
2301 }
2302 } else {
2a4188a3
FB
2303 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2304 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2305 /* I/O case */
2306 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2307 if (l >= 4 && ((addr & 3) == 0)) {
2308 /* 32 bit read access */
a4193c8a 2309 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2310 stl_p(buf, val);
13eb76e0
FB
2311 l = 4;
2312 } else if (l >= 2 && ((addr & 1) == 0)) {
2313 /* 16 bit read access */
a4193c8a 2314 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2315 stw_p(buf, val);
13eb76e0
FB
2316 l = 2;
2317 } else {
1c213d19 2318 /* 8 bit read access */
a4193c8a 2319 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2320 stb_p(buf, val);
13eb76e0
FB
2321 l = 1;
2322 }
2323 } else {
2324 /* RAM case */
2325 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2326 (addr & ~TARGET_PAGE_MASK);
2327 memcpy(buf, ptr, l);
2328 }
2329 }
2330 len -= l;
2331 buf += l;
2332 addr += l;
2333 }
2334}
8df1cd07 2335
d0ecd2aa
FB
2336/* used for ROM loading : can write in RAM and ROM */
2337void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2338 const uint8_t *buf, int len)
2339{
2340 int l;
2341 uint8_t *ptr;
2342 target_phys_addr_t page;
2343 unsigned long pd;
2344 PhysPageDesc *p;
2345
2346 while (len > 0) {
2347 page = addr & TARGET_PAGE_MASK;
2348 l = (page + TARGET_PAGE_SIZE) - addr;
2349 if (l > len)
2350 l = len;
2351 p = phys_page_find(page >> TARGET_PAGE_BITS);
2352 if (!p) {
2353 pd = IO_MEM_UNASSIGNED;
2354 } else {
2355 pd = p->phys_offset;
2356 }
2357
2358 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2359 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2360 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2361 /* do nothing */
2362 } else {
2363 unsigned long addr1;
2364 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2365 /* ROM/RAM case */
2366 ptr = phys_ram_base + addr1;
2367 memcpy(ptr, buf, l);
2368 }
2369 len -= l;
2370 buf += l;
2371 addr += l;
2372 }
2373}
2374
2375
8df1cd07
FB
2376/* warning: addr must be aligned */
2377uint32_t ldl_phys(target_phys_addr_t addr)
2378{
2379 int io_index;
2380 uint8_t *ptr;
2381 uint32_t val;
2382 unsigned long pd;
2383 PhysPageDesc *p;
2384
2385 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2386 if (!p) {
2387 pd = IO_MEM_UNASSIGNED;
2388 } else {
2389 pd = p->phys_offset;
2390 }
2391
2a4188a3
FB
2392 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2393 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2394 /* I/O case */
2395 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2396 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2397 } else {
2398 /* RAM case */
2399 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2400 (addr & ~TARGET_PAGE_MASK);
2401 val = ldl_p(ptr);
2402 }
2403 return val;
2404}
2405
84b7b8e7
FB
2406/* warning: addr must be aligned */
2407uint64_t ldq_phys(target_phys_addr_t addr)
2408{
2409 int io_index;
2410 uint8_t *ptr;
2411 uint64_t val;
2412 unsigned long pd;
2413 PhysPageDesc *p;
2414
2415 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2416 if (!p) {
2417 pd = IO_MEM_UNASSIGNED;
2418 } else {
2419 pd = p->phys_offset;
2420 }
2421
2a4188a3
FB
2422 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2423 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2424 /* I/O case */
2425 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2426#ifdef TARGET_WORDS_BIGENDIAN
2427 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2428 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2429#else
2430 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2431 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2432#endif
2433 } else {
2434 /* RAM case */
2435 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2436 (addr & ~TARGET_PAGE_MASK);
2437 val = ldq_p(ptr);
2438 }
2439 return val;
2440}
2441
aab33094
FB
2442/* XXX: optimize */
2443uint32_t ldub_phys(target_phys_addr_t addr)
2444{
2445 uint8_t val;
2446 cpu_physical_memory_read(addr, &val, 1);
2447 return val;
2448}
2449
2450/* XXX: optimize */
2451uint32_t lduw_phys(target_phys_addr_t addr)
2452{
2453 uint16_t val;
2454 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2455 return tswap16(val);
2456}
2457
8df1cd07
FB
2458/* warning: addr must be aligned. The ram page is not masked as dirty
2459 and the code inside is not invalidated. It is useful if the dirty
2460 bits are used to track modified PTEs */
2461void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2462{
2463 int io_index;
2464 uint8_t *ptr;
2465 unsigned long pd;
2466 PhysPageDesc *p;
2467
2468 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2469 if (!p) {
2470 pd = IO_MEM_UNASSIGNED;
2471 } else {
2472 pd = p->phys_offset;
2473 }
2474
3a7d929e 2475 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2476 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2477 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2478 } else {
2479 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2480 (addr & ~TARGET_PAGE_MASK);
2481 stl_p(ptr, val);
2482 }
2483}
2484
bc98a7ef
JM
2485void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2486{
2487 int io_index;
2488 uint8_t *ptr;
2489 unsigned long pd;
2490 PhysPageDesc *p;
2491
2492 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2493 if (!p) {
2494 pd = IO_MEM_UNASSIGNED;
2495 } else {
2496 pd = p->phys_offset;
2497 }
2498
2499 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2500 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2501#ifdef TARGET_WORDS_BIGENDIAN
2502 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2503 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2504#else
2505 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2506 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2507#endif
2508 } else {
2509 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2510 (addr & ~TARGET_PAGE_MASK);
2511 stq_p(ptr, val);
2512 }
2513}
2514
8df1cd07 2515/* warning: addr must be aligned */
8df1cd07
FB
2516void stl_phys(target_phys_addr_t addr, uint32_t val)
2517{
2518 int io_index;
2519 uint8_t *ptr;
2520 unsigned long pd;
2521 PhysPageDesc *p;
2522
2523 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2524 if (!p) {
2525 pd = IO_MEM_UNASSIGNED;
2526 } else {
2527 pd = p->phys_offset;
2528 }
2529
3a7d929e 2530 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2531 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2532 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2533 } else {
2534 unsigned long addr1;
2535 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2536 /* RAM case */
2537 ptr = phys_ram_base + addr1;
2538 stl_p(ptr, val);
3a7d929e
FB
2539 if (!cpu_physical_memory_is_dirty(addr1)) {
2540 /* invalidate code */
2541 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2542 /* set dirty bit */
f23db169
FB
2543 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2544 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2545 }
8df1cd07
FB
2546 }
2547}
2548
aab33094
FB
2549/* XXX: optimize */
2550void stb_phys(target_phys_addr_t addr, uint32_t val)
2551{
2552 uint8_t v = val;
2553 cpu_physical_memory_write(addr, &v, 1);
2554}
2555
2556/* XXX: optimize */
2557void stw_phys(target_phys_addr_t addr, uint32_t val)
2558{
2559 uint16_t v = tswap16(val);
2560 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2561}
2562
2563/* XXX: optimize */
2564void stq_phys(target_phys_addr_t addr, uint64_t val)
2565{
2566 val = tswap64(val);
2567 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2568}
2569
13eb76e0
FB
2570#endif
2571
2572/* virtual memory access for debug */
b448f2f3
FB
2573int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2574 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2575{
2576 int l;
2577 target_ulong page, phys_addr;
2578
2579 while (len > 0) {
2580 page = addr & TARGET_PAGE_MASK;
2581 phys_addr = cpu_get_phys_page_debug(env, page);
2582 /* if no physical page mapped, return an error */
2583 if (phys_addr == -1)
2584 return -1;
2585 l = (page + TARGET_PAGE_SIZE) - addr;
2586 if (l > len)
2587 l = len;
b448f2f3
FB
2588 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2589 buf, l, is_write);
13eb76e0
FB
2590 len -= l;
2591 buf += l;
2592 addr += l;
2593 }
2594 return 0;
2595}
2596
e3db7226
FB
2597void dump_exec_info(FILE *f,
2598 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2599{
2600 int i, target_code_size, max_target_code_size;
2601 int direct_jmp_count, direct_jmp2_count, cross_page;
2602 TranslationBlock *tb;
2603
2604 target_code_size = 0;
2605 max_target_code_size = 0;
2606 cross_page = 0;
2607 direct_jmp_count = 0;
2608 direct_jmp2_count = 0;
2609 for(i = 0; i < nb_tbs; i++) {
2610 tb = &tbs[i];
2611 target_code_size += tb->size;
2612 if (tb->size > max_target_code_size)
2613 max_target_code_size = tb->size;
2614 if (tb->page_addr[1] != -1)
2615 cross_page++;
2616 if (tb->tb_next_offset[0] != 0xffff) {
2617 direct_jmp_count++;
2618 if (tb->tb_next_offset[1] != 0xffff) {
2619 direct_jmp2_count++;
2620 }
2621 }
2622 }
2623 /* XXX: avoid using doubles ? */
2624 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2625 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2626 nb_tbs ? target_code_size / nb_tbs : 0,
2627 max_target_code_size);
2628 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2629 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2630 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2631 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2632 cross_page,
2633 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2634 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2635 direct_jmp_count,
2636 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2637 direct_jmp2_count,
2638 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2639 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2640 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2641 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2642}
2643
61382a50
FB
2644#if !defined(CONFIG_USER_ONLY)
2645
2646#define MMUSUFFIX _cmmu
2647#define GETPC() NULL
2648#define env cpu_single_env
b769d8fe 2649#define SOFTMMU_CODE_ACCESS
61382a50
FB
2650
2651#define SHIFT 0
2652#include "softmmu_template.h"
2653
2654#define SHIFT 1
2655#include "softmmu_template.h"
2656
2657#define SHIFT 2
2658#include "softmmu_template.h"
2659
2660#define SHIFT 3
2661#include "softmmu_template.h"
2662
2663#undef env
2664
2665#endif