]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Fix erraneous fallthrough in MIPS trap implementation, thanks Atsushi Nemoto.
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
53a5960a
PB
37#if defined(CONFIG_USER_ONLY)
38#include <qemu.h>
39#endif
54936004 40
fd6ce8f6 41//#define DEBUG_TB_INVALIDATE
66e85a21 42//#define DEBUG_FLUSH
9fa3e853 43//#define DEBUG_TLB
fd6ce8f6
FB
44
45/* make various TB consistency checks */
46//#define DEBUG_TB_CHECK
98857888 47//#define DEBUG_TLB_CHECK
fd6ce8f6 48
99773bd4
PB
49#if !defined(CONFIG_USER_ONLY)
50/* TB consistency checks only implemented for usermode emulation. */
51#undef DEBUG_TB_CHECK
52#endif
53
fd6ce8f6
FB
54/* threshold to flush the translated code buffer */
55#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
56
9fa3e853
FB
57#define SMC_BITMAP_USE_THRESHOLD 10
58
59#define MMAP_AREA_START 0x00000000
60#define MMAP_AREA_END 0xa8000000
fd6ce8f6 61
108c49b8
FB
62#if defined(TARGET_SPARC64)
63#define TARGET_PHYS_ADDR_SPACE_BITS 41
64#elif defined(TARGET_PPC64)
65#define TARGET_PHYS_ADDR_SPACE_BITS 42
66#else
67/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
68#define TARGET_PHYS_ADDR_SPACE_BITS 32
69#endif
70
fd6ce8f6 71TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 72TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 73int nb_tbs;
eb51d102
FB
74/* any access to the tbs or the page table must use this lock */
75spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 76
b8076a74 77uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
78uint8_t *code_gen_ptr;
79
9fa3e853
FB
80int phys_ram_size;
81int phys_ram_fd;
82uint8_t *phys_ram_base;
1ccde1cb 83uint8_t *phys_ram_dirty;
9fa3e853 84
6a00d601
FB
85CPUState *first_cpu;
86/* current CPU in the current thread. It is only valid inside
87 cpu_exec() */
88CPUState *cpu_single_env;
89
54936004 90typedef struct PageDesc {
92e873b9 91 /* list of TBs intersecting this ram page */
fd6ce8f6 92 TranslationBlock *first_tb;
9fa3e853
FB
93 /* in order to optimize self modifying code, we count the number
94 of lookups we do to a given page to use a bitmap */
95 unsigned int code_write_count;
96 uint8_t *code_bitmap;
97#if defined(CONFIG_USER_ONLY)
98 unsigned long flags;
99#endif
54936004
FB
100} PageDesc;
101
92e873b9
FB
102typedef struct PhysPageDesc {
103 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 104 uint32_t phys_offset;
92e873b9
FB
105} PhysPageDesc;
106
54936004
FB
107#define L2_BITS 10
108#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
109
110#define L1_SIZE (1 << L1_BITS)
111#define L2_SIZE (1 << L2_BITS)
112
33417e70 113static void io_mem_init(void);
fd6ce8f6 114
83fb7adf
FB
115unsigned long qemu_real_host_page_size;
116unsigned long qemu_host_page_bits;
117unsigned long qemu_host_page_size;
118unsigned long qemu_host_page_mask;
54936004 119
92e873b9 120/* XXX: for system emulation, it could just be an array */
54936004 121static PageDesc *l1_map[L1_SIZE];
0a962c02 122PhysPageDesc **l1_phys_map;
54936004 123
33417e70 124/* io memory support */
33417e70
FB
125CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
126CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 127void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
128static int io_mem_nb;
129
34865134
FB
130/* log support */
131char *logfilename = "/tmp/qemu.log";
132FILE *logfile;
133int loglevel;
134
e3db7226
FB
135/* statistics */
136static int tlb_flush_count;
137static int tb_flush_count;
138static int tb_phys_invalidate_count;
139
b346ff46 140static void page_init(void)
54936004 141{
83fb7adf 142 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 143 TARGET_PAGE_SIZE */
67b915a5 144#ifdef _WIN32
d5a8f07c
FB
145 {
146 SYSTEM_INFO system_info;
147 DWORD old_protect;
148
149 GetSystemInfo(&system_info);
150 qemu_real_host_page_size = system_info.dwPageSize;
151
152 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
153 PAGE_EXECUTE_READWRITE, &old_protect);
154 }
67b915a5 155#else
83fb7adf 156 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
157 {
158 unsigned long start, end;
159
160 start = (unsigned long)code_gen_buffer;
161 start &= ~(qemu_real_host_page_size - 1);
162
163 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
164 end += qemu_real_host_page_size - 1;
165 end &= ~(qemu_real_host_page_size - 1);
166
167 mprotect((void *)start, end - start,
168 PROT_READ | PROT_WRITE | PROT_EXEC);
169 }
67b915a5 170#endif
d5a8f07c 171
83fb7adf
FB
172 if (qemu_host_page_size == 0)
173 qemu_host_page_size = qemu_real_host_page_size;
174 if (qemu_host_page_size < TARGET_PAGE_SIZE)
175 qemu_host_page_size = TARGET_PAGE_SIZE;
176 qemu_host_page_bits = 0;
177 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
178 qemu_host_page_bits++;
179 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
180 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
181 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
182}
183
fd6ce8f6 184static inline PageDesc *page_find_alloc(unsigned int index)
54936004 185{
54936004
FB
186 PageDesc **lp, *p;
187
54936004
FB
188 lp = &l1_map[index >> L2_BITS];
189 p = *lp;
190 if (!p) {
191 /* allocate if not found */
59817ccb 192 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 193 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
194 *lp = p;
195 }
196 return p + (index & (L2_SIZE - 1));
197}
198
fd6ce8f6 199static inline PageDesc *page_find(unsigned int index)
54936004 200{
54936004
FB
201 PageDesc *p;
202
54936004
FB
203 p = l1_map[index >> L2_BITS];
204 if (!p)
205 return 0;
fd6ce8f6
FB
206 return p + (index & (L2_SIZE - 1));
207}
208
108c49b8 209static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 210{
108c49b8 211 void **lp, **p;
e3f4e2a4 212 PhysPageDesc *pd;
92e873b9 213
108c49b8
FB
214 p = (void **)l1_phys_map;
215#if TARGET_PHYS_ADDR_SPACE_BITS > 32
216
217#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
218#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
219#endif
220 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
221 p = *lp;
222 if (!p) {
223 /* allocate if not found */
108c49b8
FB
224 if (!alloc)
225 return NULL;
226 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
227 memset(p, 0, sizeof(void *) * L1_SIZE);
228 *lp = p;
229 }
230#endif
231 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
232 pd = *lp;
233 if (!pd) {
234 int i;
108c49b8
FB
235 /* allocate if not found */
236 if (!alloc)
237 return NULL;
e3f4e2a4
PB
238 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
239 *lp = pd;
240 for (i = 0; i < L2_SIZE; i++)
241 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 242 }
e3f4e2a4 243 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
244}
245
108c49b8 246static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 247{
108c49b8 248 return phys_page_find_alloc(index, 0);
92e873b9
FB
249}
250
9fa3e853 251#if !defined(CONFIG_USER_ONLY)
6a00d601 252static void tlb_protect_code(ram_addr_t ram_addr);
3a7d929e
FB
253static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
254 target_ulong vaddr);
9fa3e853 255#endif
fd6ce8f6 256
6a00d601 257void cpu_exec_init(CPUState *env)
fd6ce8f6 258{
6a00d601
FB
259 CPUState **penv;
260 int cpu_index;
261
fd6ce8f6
FB
262 if (!code_gen_ptr) {
263 code_gen_ptr = code_gen_buffer;
b346ff46 264 page_init();
33417e70 265 io_mem_init();
fd6ce8f6 266 }
6a00d601
FB
267 env->next_cpu = NULL;
268 penv = &first_cpu;
269 cpu_index = 0;
270 while (*penv != NULL) {
271 penv = (CPUState **)&(*penv)->next_cpu;
272 cpu_index++;
273 }
274 env->cpu_index = cpu_index;
275 *penv = env;
fd6ce8f6
FB
276}
277
9fa3e853
FB
278static inline void invalidate_page_bitmap(PageDesc *p)
279{
280 if (p->code_bitmap) {
59817ccb 281 qemu_free(p->code_bitmap);
9fa3e853
FB
282 p->code_bitmap = NULL;
283 }
284 p->code_write_count = 0;
285}
286
fd6ce8f6
FB
287/* set to NULL all the 'first_tb' fields in all PageDescs */
288static void page_flush_tb(void)
289{
290 int i, j;
291 PageDesc *p;
292
293 for(i = 0; i < L1_SIZE; i++) {
294 p = l1_map[i];
295 if (p) {
9fa3e853
FB
296 for(j = 0; j < L2_SIZE; j++) {
297 p->first_tb = NULL;
298 invalidate_page_bitmap(p);
299 p++;
300 }
fd6ce8f6
FB
301 }
302 }
303}
304
305/* flush all the translation blocks */
d4e8164f 306/* XXX: tb_flush is currently not thread safe */
6a00d601 307void tb_flush(CPUState *env1)
fd6ce8f6 308{
6a00d601 309 CPUState *env;
0124311e 310#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
311 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
312 code_gen_ptr - code_gen_buffer,
313 nb_tbs,
0124311e 314 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
315#endif
316 nb_tbs = 0;
6a00d601
FB
317
318 for(env = first_cpu; env != NULL; env = env->next_cpu) {
319 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
320 }
9fa3e853 321
8a8a608f 322 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 323 page_flush_tb();
9fa3e853 324
fd6ce8f6 325 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
326 /* XXX: flush processor icache at this point if cache flush is
327 expensive */
e3db7226 328 tb_flush_count++;
fd6ce8f6
FB
329}
330
331#ifdef DEBUG_TB_CHECK
332
333static void tb_invalidate_check(unsigned long address)
334{
335 TranslationBlock *tb;
336 int i;
337 address &= TARGET_PAGE_MASK;
99773bd4
PB
338 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
339 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
340 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
341 address >= tb->pc + tb->size)) {
342 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 343 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
344 }
345 }
346 }
347}
348
349/* verify that all the pages have correct rights for code */
350static void tb_page_check(void)
351{
352 TranslationBlock *tb;
353 int i, flags1, flags2;
354
99773bd4
PB
355 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
356 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
357 flags1 = page_get_flags(tb->pc);
358 flags2 = page_get_flags(tb->pc + tb->size - 1);
359 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
360 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 361 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
362 }
363 }
364 }
365}
366
d4e8164f
FB
367void tb_jmp_check(TranslationBlock *tb)
368{
369 TranslationBlock *tb1;
370 unsigned int n1;
371
372 /* suppress any remaining jumps to this TB */
373 tb1 = tb->jmp_first;
374 for(;;) {
375 n1 = (long)tb1 & 3;
376 tb1 = (TranslationBlock *)((long)tb1 & ~3);
377 if (n1 == 2)
378 break;
379 tb1 = tb1->jmp_next[n1];
380 }
381 /* check end of list */
382 if (tb1 != tb) {
383 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
384 }
385}
386
fd6ce8f6
FB
387#endif
388
389/* invalidate one TB */
390static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
391 int next_offset)
392{
393 TranslationBlock *tb1;
394 for(;;) {
395 tb1 = *ptb;
396 if (tb1 == tb) {
397 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
398 break;
399 }
400 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
401 }
402}
403
9fa3e853
FB
404static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
405{
406 TranslationBlock *tb1;
407 unsigned int n1;
408
409 for(;;) {
410 tb1 = *ptb;
411 n1 = (long)tb1 & 3;
412 tb1 = (TranslationBlock *)((long)tb1 & ~3);
413 if (tb1 == tb) {
414 *ptb = tb1->page_next[n1];
415 break;
416 }
417 ptb = &tb1->page_next[n1];
418 }
419}
420
d4e8164f
FB
421static inline void tb_jmp_remove(TranslationBlock *tb, int n)
422{
423 TranslationBlock *tb1, **ptb;
424 unsigned int n1;
425
426 ptb = &tb->jmp_next[n];
427 tb1 = *ptb;
428 if (tb1) {
429 /* find tb(n) in circular list */
430 for(;;) {
431 tb1 = *ptb;
432 n1 = (long)tb1 & 3;
433 tb1 = (TranslationBlock *)((long)tb1 & ~3);
434 if (n1 == n && tb1 == tb)
435 break;
436 if (n1 == 2) {
437 ptb = &tb1->jmp_first;
438 } else {
439 ptb = &tb1->jmp_next[n1];
440 }
441 }
442 /* now we can suppress tb(n) from the list */
443 *ptb = tb->jmp_next[n];
444
445 tb->jmp_next[n] = NULL;
446 }
447}
448
449/* reset the jump entry 'n' of a TB so that it is not chained to
450 another TB */
451static inline void tb_reset_jump(TranslationBlock *tb, int n)
452{
453 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
454}
455
8a40a180 456static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 457{
6a00d601 458 CPUState *env;
8a40a180 459 PageDesc *p;
d4e8164f 460 unsigned int h, n1;
8a40a180
FB
461 target_ulong phys_pc;
462 TranslationBlock *tb1, *tb2;
d4e8164f 463
8a40a180
FB
464 /* remove the TB from the hash list */
465 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
466 h = tb_phys_hash_func(phys_pc);
467 tb_remove(&tb_phys_hash[h], tb,
468 offsetof(TranslationBlock, phys_hash_next));
469
470 /* remove the TB from the page list */
471 if (tb->page_addr[0] != page_addr) {
472 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
473 tb_page_remove(&p->first_tb, tb);
474 invalidate_page_bitmap(p);
475 }
476 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
477 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
478 tb_page_remove(&p->first_tb, tb);
479 invalidate_page_bitmap(p);
480 }
481
36bdbe54 482 tb_invalidated_flag = 1;
59817ccb 483
fd6ce8f6 484 /* remove the TB from the hash list */
8a40a180 485 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
486 for(env = first_cpu; env != NULL; env = env->next_cpu) {
487 if (env->tb_jmp_cache[h] == tb)
488 env->tb_jmp_cache[h] = NULL;
489 }
d4e8164f
FB
490
491 /* suppress this TB from the two jump lists */
492 tb_jmp_remove(tb, 0);
493 tb_jmp_remove(tb, 1);
494
495 /* suppress any remaining jumps to this TB */
496 tb1 = tb->jmp_first;
497 for(;;) {
498 n1 = (long)tb1 & 3;
499 if (n1 == 2)
500 break;
501 tb1 = (TranslationBlock *)((long)tb1 & ~3);
502 tb2 = tb1->jmp_next[n1];
503 tb_reset_jump(tb1, n1);
504 tb1->jmp_next[n1] = NULL;
505 tb1 = tb2;
506 }
507 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 508
e3db7226 509 tb_phys_invalidate_count++;
9fa3e853
FB
510}
511
512static inline void set_bits(uint8_t *tab, int start, int len)
513{
514 int end, mask, end1;
515
516 end = start + len;
517 tab += start >> 3;
518 mask = 0xff << (start & 7);
519 if ((start & ~7) == (end & ~7)) {
520 if (start < end) {
521 mask &= ~(0xff << (end & 7));
522 *tab |= mask;
523 }
524 } else {
525 *tab++ |= mask;
526 start = (start + 8) & ~7;
527 end1 = end & ~7;
528 while (start < end1) {
529 *tab++ = 0xff;
530 start += 8;
531 }
532 if (start < end) {
533 mask = ~(0xff << (end & 7));
534 *tab |= mask;
535 }
536 }
537}
538
539static void build_page_bitmap(PageDesc *p)
540{
541 int n, tb_start, tb_end;
542 TranslationBlock *tb;
543
59817ccb 544 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
545 if (!p->code_bitmap)
546 return;
547 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
548
549 tb = p->first_tb;
550 while (tb != NULL) {
551 n = (long)tb & 3;
552 tb = (TranslationBlock *)((long)tb & ~3);
553 /* NOTE: this is subtle as a TB may span two physical pages */
554 if (n == 0) {
555 /* NOTE: tb_end may be after the end of the page, but
556 it is not a problem */
557 tb_start = tb->pc & ~TARGET_PAGE_MASK;
558 tb_end = tb_start + tb->size;
559 if (tb_end > TARGET_PAGE_SIZE)
560 tb_end = TARGET_PAGE_SIZE;
561 } else {
562 tb_start = 0;
563 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
564 }
565 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
566 tb = tb->page_next[n];
567 }
568}
569
d720b93d
FB
570#ifdef TARGET_HAS_PRECISE_SMC
571
572static void tb_gen_code(CPUState *env,
573 target_ulong pc, target_ulong cs_base, int flags,
574 int cflags)
575{
576 TranslationBlock *tb;
577 uint8_t *tc_ptr;
578 target_ulong phys_pc, phys_page2, virt_page2;
579 int code_gen_size;
580
c27004ec
FB
581 phys_pc = get_phys_addr_code(env, pc);
582 tb = tb_alloc(pc);
d720b93d
FB
583 if (!tb) {
584 /* flush must be done */
585 tb_flush(env);
586 /* cannot fail at this point */
c27004ec 587 tb = tb_alloc(pc);
d720b93d
FB
588 }
589 tc_ptr = code_gen_ptr;
590 tb->tc_ptr = tc_ptr;
591 tb->cs_base = cs_base;
592 tb->flags = flags;
593 tb->cflags = cflags;
594 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
595 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
596
597 /* check next page if needed */
c27004ec 598 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 599 phys_page2 = -1;
c27004ec 600 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
601 phys_page2 = get_phys_addr_code(env, virt_page2);
602 }
603 tb_link_phys(tb, phys_pc, phys_page2);
604}
605#endif
606
9fa3e853
FB
607/* invalidate all TBs which intersect with the target physical page
608 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
609 the same physical page. 'is_cpu_write_access' should be true if called
610 from a real cpu write access: the virtual CPU will exit the current
611 TB if code is modified inside this TB. */
612void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
613 int is_cpu_write_access)
614{
615 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 616 CPUState *env = cpu_single_env;
9fa3e853 617 PageDesc *p;
ea1c1802 618 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 619 target_ulong tb_start, tb_end;
d720b93d 620 target_ulong current_pc, current_cs_base;
9fa3e853
FB
621
622 p = page_find(start >> TARGET_PAGE_BITS);
623 if (!p)
624 return;
625 if (!p->code_bitmap &&
d720b93d
FB
626 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
627 is_cpu_write_access) {
9fa3e853
FB
628 /* build code bitmap */
629 build_page_bitmap(p);
630 }
631
632 /* we remove all the TBs in the range [start, end[ */
633 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
634 current_tb_not_found = is_cpu_write_access;
635 current_tb_modified = 0;
636 current_tb = NULL; /* avoid warning */
637 current_pc = 0; /* avoid warning */
638 current_cs_base = 0; /* avoid warning */
639 current_flags = 0; /* avoid warning */
9fa3e853
FB
640 tb = p->first_tb;
641 while (tb != NULL) {
642 n = (long)tb & 3;
643 tb = (TranslationBlock *)((long)tb & ~3);
644 tb_next = tb->page_next[n];
645 /* NOTE: this is subtle as a TB may span two physical pages */
646 if (n == 0) {
647 /* NOTE: tb_end may be after the end of the page, but
648 it is not a problem */
649 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
650 tb_end = tb_start + tb->size;
651 } else {
652 tb_start = tb->page_addr[1];
653 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
654 }
655 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
656#ifdef TARGET_HAS_PRECISE_SMC
657 if (current_tb_not_found) {
658 current_tb_not_found = 0;
659 current_tb = NULL;
660 if (env->mem_write_pc) {
661 /* now we have a real cpu fault */
662 current_tb = tb_find_pc(env->mem_write_pc);
663 }
664 }
665 if (current_tb == tb &&
666 !(current_tb->cflags & CF_SINGLE_INSN)) {
667 /* If we are modifying the current TB, we must stop
668 its execution. We could be more precise by checking
669 that the modification is after the current PC, but it
670 would require a specialized function to partially
671 restore the CPU state */
672
673 current_tb_modified = 1;
674 cpu_restore_state(current_tb, env,
675 env->mem_write_pc, NULL);
676#if defined(TARGET_I386)
677 current_flags = env->hflags;
678 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
679 current_cs_base = (target_ulong)env->segs[R_CS].base;
680 current_pc = current_cs_base + env->eip;
681#else
682#error unsupported CPU
683#endif
684 }
685#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
686 /* we need to do that to handle the case where a signal
687 occurs while doing tb_phys_invalidate() */
688 saved_tb = NULL;
689 if (env) {
690 saved_tb = env->current_tb;
691 env->current_tb = NULL;
692 }
9fa3e853 693 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
694 if (env) {
695 env->current_tb = saved_tb;
696 if (env->interrupt_request && env->current_tb)
697 cpu_interrupt(env, env->interrupt_request);
698 }
9fa3e853
FB
699 }
700 tb = tb_next;
701 }
702#if !defined(CONFIG_USER_ONLY)
703 /* if no code remaining, no need to continue to use slow writes */
704 if (!p->first_tb) {
705 invalidate_page_bitmap(p);
d720b93d
FB
706 if (is_cpu_write_access) {
707 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
708 }
709 }
710#endif
711#ifdef TARGET_HAS_PRECISE_SMC
712 if (current_tb_modified) {
713 /* we generate a block containing just the instruction
714 modifying the memory. It will ensure that it cannot modify
715 itself */
ea1c1802 716 env->current_tb = NULL;
d720b93d
FB
717 tb_gen_code(env, current_pc, current_cs_base, current_flags,
718 CF_SINGLE_INSN);
719 cpu_resume_from_signal(env, NULL);
9fa3e853 720 }
fd6ce8f6 721#endif
9fa3e853 722}
fd6ce8f6 723
9fa3e853 724/* len must be <= 8 and start must be a multiple of len */
d720b93d 725static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
726{
727 PageDesc *p;
728 int offset, b;
59817ccb 729#if 0
a4193c8a
FB
730 if (1) {
731 if (loglevel) {
732 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
733 cpu_single_env->mem_write_vaddr, len,
734 cpu_single_env->eip,
735 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
736 }
59817ccb
FB
737 }
738#endif
9fa3e853
FB
739 p = page_find(start >> TARGET_PAGE_BITS);
740 if (!p)
741 return;
742 if (p->code_bitmap) {
743 offset = start & ~TARGET_PAGE_MASK;
744 b = p->code_bitmap[offset >> 3] >> (offset & 7);
745 if (b & ((1 << len) - 1))
746 goto do_invalidate;
747 } else {
748 do_invalidate:
d720b93d 749 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
750 }
751}
752
9fa3e853 753#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
754static void tb_invalidate_phys_page(target_ulong addr,
755 unsigned long pc, void *puc)
9fa3e853 756{
d720b93d
FB
757 int n, current_flags, current_tb_modified;
758 target_ulong current_pc, current_cs_base;
9fa3e853 759 PageDesc *p;
d720b93d
FB
760 TranslationBlock *tb, *current_tb;
761#ifdef TARGET_HAS_PRECISE_SMC
762 CPUState *env = cpu_single_env;
763#endif
9fa3e853
FB
764
765 addr &= TARGET_PAGE_MASK;
766 p = page_find(addr >> TARGET_PAGE_BITS);
767 if (!p)
768 return;
769 tb = p->first_tb;
d720b93d
FB
770 current_tb_modified = 0;
771 current_tb = NULL;
772 current_pc = 0; /* avoid warning */
773 current_cs_base = 0; /* avoid warning */
774 current_flags = 0; /* avoid warning */
775#ifdef TARGET_HAS_PRECISE_SMC
776 if (tb && pc != 0) {
777 current_tb = tb_find_pc(pc);
778 }
779#endif
9fa3e853
FB
780 while (tb != NULL) {
781 n = (long)tb & 3;
782 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
783#ifdef TARGET_HAS_PRECISE_SMC
784 if (current_tb == tb &&
785 !(current_tb->cflags & CF_SINGLE_INSN)) {
786 /* If we are modifying the current TB, we must stop
787 its execution. We could be more precise by checking
788 that the modification is after the current PC, but it
789 would require a specialized function to partially
790 restore the CPU state */
791
792 current_tb_modified = 1;
793 cpu_restore_state(current_tb, env, pc, puc);
794#if defined(TARGET_I386)
795 current_flags = env->hflags;
796 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
797 current_cs_base = (target_ulong)env->segs[R_CS].base;
798 current_pc = current_cs_base + env->eip;
799#else
800#error unsupported CPU
801#endif
802 }
803#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
804 tb_phys_invalidate(tb, addr);
805 tb = tb->page_next[n];
806 }
fd6ce8f6 807 p->first_tb = NULL;
d720b93d
FB
808#ifdef TARGET_HAS_PRECISE_SMC
809 if (current_tb_modified) {
810 /* we generate a block containing just the instruction
811 modifying the memory. It will ensure that it cannot modify
812 itself */
ea1c1802 813 env->current_tb = NULL;
d720b93d
FB
814 tb_gen_code(env, current_pc, current_cs_base, current_flags,
815 CF_SINGLE_INSN);
816 cpu_resume_from_signal(env, puc);
817 }
818#endif
fd6ce8f6 819}
9fa3e853 820#endif
fd6ce8f6
FB
821
822/* add the tb in the target page and protect it if necessary */
9fa3e853 823static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 824 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
825{
826 PageDesc *p;
9fa3e853
FB
827 TranslationBlock *last_first_tb;
828
829 tb->page_addr[n] = page_addr;
3a7d929e 830 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
831 tb->page_next[n] = p->first_tb;
832 last_first_tb = p->first_tb;
833 p->first_tb = (TranslationBlock *)((long)tb | n);
834 invalidate_page_bitmap(p);
fd6ce8f6 835
107db443 836#if defined(TARGET_HAS_SMC) || 1
d720b93d 837
9fa3e853 838#if defined(CONFIG_USER_ONLY)
fd6ce8f6 839 if (p->flags & PAGE_WRITE) {
53a5960a
PB
840 target_ulong addr;
841 PageDesc *p2;
9fa3e853
FB
842 int prot;
843
fd6ce8f6
FB
844 /* force the host page as non writable (writes will have a
845 page fault + mprotect overhead) */
53a5960a 846 page_addr &= qemu_host_page_mask;
fd6ce8f6 847 prot = 0;
53a5960a
PB
848 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
849 addr += TARGET_PAGE_SIZE) {
850
851 p2 = page_find (addr >> TARGET_PAGE_BITS);
852 if (!p2)
853 continue;
854 prot |= p2->flags;
855 p2->flags &= ~PAGE_WRITE;
856 page_get_flags(addr);
857 }
858 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
859 (prot & PAGE_BITS) & ~PAGE_WRITE);
860#ifdef DEBUG_TB_INVALIDATE
861 printf("protecting code page: 0x%08lx\n",
53a5960a 862 page_addr);
fd6ce8f6 863#endif
fd6ce8f6 864 }
9fa3e853
FB
865#else
866 /* if some code is already present, then the pages are already
867 protected. So we handle the case where only the first TB is
868 allocated in a physical page */
869 if (!last_first_tb) {
6a00d601 870 tlb_protect_code(page_addr);
9fa3e853
FB
871 }
872#endif
d720b93d
FB
873
874#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
875}
876
877/* Allocate a new translation block. Flush the translation buffer if
878 too many translation blocks or too much generated code. */
c27004ec 879TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
880{
881 TranslationBlock *tb;
fd6ce8f6
FB
882
883 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
884 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 885 return NULL;
fd6ce8f6
FB
886 tb = &tbs[nb_tbs++];
887 tb->pc = pc;
b448f2f3 888 tb->cflags = 0;
d4e8164f
FB
889 return tb;
890}
891
9fa3e853
FB
892/* add a new TB and link it to the physical page tables. phys_page2 is
893 (-1) to indicate that only one page contains the TB. */
894void tb_link_phys(TranslationBlock *tb,
895 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 896{
9fa3e853
FB
897 unsigned int h;
898 TranslationBlock **ptb;
899
900 /* add in the physical hash table */
901 h = tb_phys_hash_func(phys_pc);
902 ptb = &tb_phys_hash[h];
903 tb->phys_hash_next = *ptb;
904 *ptb = tb;
fd6ce8f6
FB
905
906 /* add in the page list */
9fa3e853
FB
907 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
908 if (phys_page2 != -1)
909 tb_alloc_page(tb, 1, phys_page2);
910 else
911 tb->page_addr[1] = -1;
9fa3e853 912
d4e8164f
FB
913 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
914 tb->jmp_next[0] = NULL;
915 tb->jmp_next[1] = NULL;
b448f2f3
FB
916#ifdef USE_CODE_COPY
917 tb->cflags &= ~CF_FP_USED;
918 if (tb->cflags & CF_TB_FP_USED)
919 tb->cflags |= CF_FP_USED;
920#endif
d4e8164f
FB
921
922 /* init original jump addresses */
923 if (tb->tb_next_offset[0] != 0xffff)
924 tb_reset_jump(tb, 0);
925 if (tb->tb_next_offset[1] != 0xffff)
926 tb_reset_jump(tb, 1);
8a40a180
FB
927
928#ifdef DEBUG_TB_CHECK
929 tb_page_check();
930#endif
fd6ce8f6
FB
931}
932
9fa3e853
FB
933/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
934 tb[1].tc_ptr. Return NULL if not found */
935TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 936{
9fa3e853
FB
937 int m_min, m_max, m;
938 unsigned long v;
939 TranslationBlock *tb;
a513fe19
FB
940
941 if (nb_tbs <= 0)
942 return NULL;
943 if (tc_ptr < (unsigned long)code_gen_buffer ||
944 tc_ptr >= (unsigned long)code_gen_ptr)
945 return NULL;
946 /* binary search (cf Knuth) */
947 m_min = 0;
948 m_max = nb_tbs - 1;
949 while (m_min <= m_max) {
950 m = (m_min + m_max) >> 1;
951 tb = &tbs[m];
952 v = (unsigned long)tb->tc_ptr;
953 if (v == tc_ptr)
954 return tb;
955 else if (tc_ptr < v) {
956 m_max = m - 1;
957 } else {
958 m_min = m + 1;
959 }
960 }
961 return &tbs[m_max];
962}
7501267e 963
ea041c0e
FB
964static void tb_reset_jump_recursive(TranslationBlock *tb);
965
966static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
967{
968 TranslationBlock *tb1, *tb_next, **ptb;
969 unsigned int n1;
970
971 tb1 = tb->jmp_next[n];
972 if (tb1 != NULL) {
973 /* find head of list */
974 for(;;) {
975 n1 = (long)tb1 & 3;
976 tb1 = (TranslationBlock *)((long)tb1 & ~3);
977 if (n1 == 2)
978 break;
979 tb1 = tb1->jmp_next[n1];
980 }
981 /* we are now sure now that tb jumps to tb1 */
982 tb_next = tb1;
983
984 /* remove tb from the jmp_first list */
985 ptb = &tb_next->jmp_first;
986 for(;;) {
987 tb1 = *ptb;
988 n1 = (long)tb1 & 3;
989 tb1 = (TranslationBlock *)((long)tb1 & ~3);
990 if (n1 == n && tb1 == tb)
991 break;
992 ptb = &tb1->jmp_next[n1];
993 }
994 *ptb = tb->jmp_next[n];
995 tb->jmp_next[n] = NULL;
996
997 /* suppress the jump to next tb in generated code */
998 tb_reset_jump(tb, n);
999
0124311e 1000 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1001 tb_reset_jump_recursive(tb_next);
1002 }
1003}
1004
1005static void tb_reset_jump_recursive(TranslationBlock *tb)
1006{
1007 tb_reset_jump_recursive2(tb, 0);
1008 tb_reset_jump_recursive2(tb, 1);
1009}
1010
1fddef4b 1011#if defined(TARGET_HAS_ICE)
d720b93d
FB
1012static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1013{
c2f07f81
PB
1014 target_ulong addr, pd;
1015 ram_addr_t ram_addr;
1016 PhysPageDesc *p;
d720b93d 1017
c2f07f81
PB
1018 addr = cpu_get_phys_page_debug(env, pc);
1019 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1020 if (!p) {
1021 pd = IO_MEM_UNASSIGNED;
1022 } else {
1023 pd = p->phys_offset;
1024 }
1025 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1026 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1027}
c27004ec 1028#endif
d720b93d 1029
c33a346e
FB
1030/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1031 breakpoint is reached */
2e12669a 1032int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1033{
1fddef4b 1034#if defined(TARGET_HAS_ICE)
4c3a88a2 1035 int i;
d720b93d 1036
4c3a88a2
FB
1037 for(i = 0; i < env->nb_breakpoints; i++) {
1038 if (env->breakpoints[i] == pc)
1039 return 0;
1040 }
1041
1042 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1043 return -1;
1044 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1045
1046 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1047 return 0;
1048#else
1049 return -1;
1050#endif
1051}
1052
1053/* remove a breakpoint */
2e12669a 1054int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1055{
1fddef4b 1056#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1057 int i;
1058 for(i = 0; i < env->nb_breakpoints; i++) {
1059 if (env->breakpoints[i] == pc)
1060 goto found;
1061 }
1062 return -1;
1063 found:
4c3a88a2 1064 env->nb_breakpoints--;
1fddef4b
FB
1065 if (i < env->nb_breakpoints)
1066 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1067
1068 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1069 return 0;
1070#else
1071 return -1;
1072#endif
1073}
1074
c33a346e
FB
1075/* enable or disable single step mode. EXCP_DEBUG is returned by the
1076 CPU loop after each instruction */
1077void cpu_single_step(CPUState *env, int enabled)
1078{
1fddef4b 1079#if defined(TARGET_HAS_ICE)
c33a346e
FB
1080 if (env->singlestep_enabled != enabled) {
1081 env->singlestep_enabled = enabled;
1082 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1083 /* XXX: only flush what is necessary */
0124311e 1084 tb_flush(env);
c33a346e
FB
1085 }
1086#endif
1087}
1088
34865134
FB
1089/* enable or disable low levels log */
1090void cpu_set_log(int log_flags)
1091{
1092 loglevel = log_flags;
1093 if (loglevel && !logfile) {
1094 logfile = fopen(logfilename, "w");
1095 if (!logfile) {
1096 perror(logfilename);
1097 _exit(1);
1098 }
9fa3e853
FB
1099#if !defined(CONFIG_SOFTMMU)
1100 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1101 {
1102 static uint8_t logfile_buf[4096];
1103 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1104 }
1105#else
34865134 1106 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1107#endif
34865134
FB
1108 }
1109}
1110
1111void cpu_set_log_filename(const char *filename)
1112{
1113 logfilename = strdup(filename);
1114}
c33a346e 1115
0124311e 1116/* mask must never be zero, except for A20 change call */
68a79315 1117void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1118{
1119 TranslationBlock *tb;
ee8b7021 1120 static int interrupt_lock;
59817ccb 1121
68a79315 1122 env->interrupt_request |= mask;
ea041c0e
FB
1123 /* if the cpu is currently executing code, we must unlink it and
1124 all the potentially executing TB */
1125 tb = env->current_tb;
ee8b7021
FB
1126 if (tb && !testandset(&interrupt_lock)) {
1127 env->current_tb = NULL;
ea041c0e 1128 tb_reset_jump_recursive(tb);
ee8b7021 1129 interrupt_lock = 0;
ea041c0e
FB
1130 }
1131}
1132
b54ad049
FB
1133void cpu_reset_interrupt(CPUState *env, int mask)
1134{
1135 env->interrupt_request &= ~mask;
1136}
1137
f193c797
FB
1138CPULogItem cpu_log_items[] = {
1139 { CPU_LOG_TB_OUT_ASM, "out_asm",
1140 "show generated host assembly code for each compiled TB" },
1141 { CPU_LOG_TB_IN_ASM, "in_asm",
1142 "show target assembly code for each compiled TB" },
1143 { CPU_LOG_TB_OP, "op",
1144 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1145#ifdef TARGET_I386
1146 { CPU_LOG_TB_OP_OPT, "op_opt",
1147 "show micro ops after optimization for each compiled TB" },
1148#endif
1149 { CPU_LOG_INT, "int",
1150 "show interrupts/exceptions in short format" },
1151 { CPU_LOG_EXEC, "exec",
1152 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1153 { CPU_LOG_TB_CPU, "cpu",
1154 "show CPU state before bloc translation" },
f193c797
FB
1155#ifdef TARGET_I386
1156 { CPU_LOG_PCALL, "pcall",
1157 "show protected mode far calls/returns/exceptions" },
1158#endif
8e3a9fd2 1159#ifdef DEBUG_IOPORT
fd872598
FB
1160 { CPU_LOG_IOPORT, "ioport",
1161 "show all i/o ports accesses" },
8e3a9fd2 1162#endif
f193c797
FB
1163 { 0, NULL, NULL },
1164};
1165
1166static int cmp1(const char *s1, int n, const char *s2)
1167{
1168 if (strlen(s2) != n)
1169 return 0;
1170 return memcmp(s1, s2, n) == 0;
1171}
1172
1173/* takes a comma separated list of log masks. Return 0 if error. */
1174int cpu_str_to_log_mask(const char *str)
1175{
1176 CPULogItem *item;
1177 int mask;
1178 const char *p, *p1;
1179
1180 p = str;
1181 mask = 0;
1182 for(;;) {
1183 p1 = strchr(p, ',');
1184 if (!p1)
1185 p1 = p + strlen(p);
8e3a9fd2
FB
1186 if(cmp1(p,p1-p,"all")) {
1187 for(item = cpu_log_items; item->mask != 0; item++) {
1188 mask |= item->mask;
1189 }
1190 } else {
f193c797
FB
1191 for(item = cpu_log_items; item->mask != 0; item++) {
1192 if (cmp1(p, p1 - p, item->name))
1193 goto found;
1194 }
1195 return 0;
8e3a9fd2 1196 }
f193c797
FB
1197 found:
1198 mask |= item->mask;
1199 if (*p1 != ',')
1200 break;
1201 p = p1 + 1;
1202 }
1203 return mask;
1204}
ea041c0e 1205
7501267e
FB
1206void cpu_abort(CPUState *env, const char *fmt, ...)
1207{
1208 va_list ap;
1209
1210 va_start(ap, fmt);
1211 fprintf(stderr, "qemu: fatal: ");
1212 vfprintf(stderr, fmt, ap);
1213 fprintf(stderr, "\n");
1214#ifdef TARGET_I386
7fe48483
FB
1215 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1216#else
1217 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1218#endif
1219 va_end(ap);
1220 abort();
1221}
1222
0124311e
FB
1223#if !defined(CONFIG_USER_ONLY)
1224
ee8b7021
FB
1225/* NOTE: if flush_global is true, also flush global entries (not
1226 implemented yet) */
1227void tlb_flush(CPUState *env, int flush_global)
33417e70 1228{
33417e70 1229 int i;
0124311e 1230
9fa3e853
FB
1231#if defined(DEBUG_TLB)
1232 printf("tlb_flush:\n");
1233#endif
0124311e
FB
1234 /* must reset current TB so that interrupts cannot modify the
1235 links while we are modifying them */
1236 env->current_tb = NULL;
1237
33417e70 1238 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1239 env->tlb_table[0][i].addr_read = -1;
1240 env->tlb_table[0][i].addr_write = -1;
1241 env->tlb_table[0][i].addr_code = -1;
1242 env->tlb_table[1][i].addr_read = -1;
1243 env->tlb_table[1][i].addr_write = -1;
1244 env->tlb_table[1][i].addr_code = -1;
33417e70 1245 }
9fa3e853 1246
8a40a180 1247 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1248
1249#if !defined(CONFIG_SOFTMMU)
1250 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1251#endif
1252#ifdef USE_KQEMU
1253 if (env->kqemu_enabled) {
1254 kqemu_flush(env, flush_global);
1255 }
9fa3e853 1256#endif
e3db7226 1257 tlb_flush_count++;
33417e70
FB
1258}
1259
274da6b2 1260static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1261{
84b7b8e7
FB
1262 if (addr == (tlb_entry->addr_read &
1263 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1264 addr == (tlb_entry->addr_write &
1265 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1266 addr == (tlb_entry->addr_code &
1267 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1268 tlb_entry->addr_read = -1;
1269 tlb_entry->addr_write = -1;
1270 tlb_entry->addr_code = -1;
1271 }
61382a50
FB
1272}
1273
2e12669a 1274void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1275{
8a40a180 1276 int i;
9fa3e853 1277 TranslationBlock *tb;
0124311e 1278
9fa3e853 1279#if defined(DEBUG_TLB)
108c49b8 1280 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1281#endif
0124311e
FB
1282 /* must reset current TB so that interrupts cannot modify the
1283 links while we are modifying them */
1284 env->current_tb = NULL;
61382a50
FB
1285
1286 addr &= TARGET_PAGE_MASK;
1287 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1288 tlb_flush_entry(&env->tlb_table[0][i], addr);
1289 tlb_flush_entry(&env->tlb_table[1][i], addr);
0124311e 1290
b362e5e0
PB
1291 /* Discard jump cache entries for any tb which might potentially
1292 overlap the flushed page. */
1293 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1294 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1295
1296 i = tb_jmp_cache_hash_page(addr);
1297 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1298
0124311e 1299#if !defined(CONFIG_SOFTMMU)
9fa3e853 1300 if (addr < MMAP_AREA_END)
0124311e 1301 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1302#endif
0a962c02
FB
1303#ifdef USE_KQEMU
1304 if (env->kqemu_enabled) {
1305 kqemu_flush_page(env, addr);
1306 }
1307#endif
9fa3e853
FB
1308}
1309
9fa3e853
FB
1310/* update the TLBs so that writes to code in the virtual page 'addr'
1311 can be detected */
6a00d601 1312static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1313{
6a00d601
FB
1314 cpu_physical_memory_reset_dirty(ram_addr,
1315 ram_addr + TARGET_PAGE_SIZE,
1316 CODE_DIRTY_FLAG);
9fa3e853
FB
1317}
1318
9fa3e853 1319/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1320 tested for self modifying code */
1321static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1322 target_ulong vaddr)
9fa3e853 1323{
3a7d929e 1324 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1325}
1326
1327static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1328 unsigned long start, unsigned long length)
1329{
1330 unsigned long addr;
84b7b8e7
FB
1331 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1332 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1333 if ((addr - start) < length) {
84b7b8e7 1334 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1335 }
1336 }
1337}
1338
3a7d929e 1339void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1340 int dirty_flags)
1ccde1cb
FB
1341{
1342 CPUState *env;
4f2ac237 1343 unsigned long length, start1;
0a962c02
FB
1344 int i, mask, len;
1345 uint8_t *p;
1ccde1cb
FB
1346
1347 start &= TARGET_PAGE_MASK;
1348 end = TARGET_PAGE_ALIGN(end);
1349
1350 length = end - start;
1351 if (length == 0)
1352 return;
0a962c02 1353 len = length >> TARGET_PAGE_BITS;
3a7d929e 1354#ifdef USE_KQEMU
6a00d601
FB
1355 /* XXX: should not depend on cpu context */
1356 env = first_cpu;
3a7d929e 1357 if (env->kqemu_enabled) {
f23db169
FB
1358 ram_addr_t addr;
1359 addr = start;
1360 for(i = 0; i < len; i++) {
1361 kqemu_set_notdirty(env, addr);
1362 addr += TARGET_PAGE_SIZE;
1363 }
3a7d929e
FB
1364 }
1365#endif
f23db169
FB
1366 mask = ~dirty_flags;
1367 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1368 for(i = 0; i < len; i++)
1369 p[i] &= mask;
1370
1ccde1cb
FB
1371 /* we modify the TLB cache so that the dirty bit will be set again
1372 when accessing the range */
59817ccb 1373 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1374 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1375 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1376 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1377 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1378 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6a00d601 1379 }
59817ccb
FB
1380
1381#if !defined(CONFIG_SOFTMMU)
1382 /* XXX: this is expensive */
1383 {
1384 VirtPageDesc *p;
1385 int j;
1386 target_ulong addr;
1387
1388 for(i = 0; i < L1_SIZE; i++) {
1389 p = l1_virt_map[i];
1390 if (p) {
1391 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1392 for(j = 0; j < L2_SIZE; j++) {
1393 if (p->valid_tag == virt_valid_tag &&
1394 p->phys_addr >= start && p->phys_addr < end &&
1395 (p->prot & PROT_WRITE)) {
1396 if (addr < MMAP_AREA_END) {
1397 mprotect((void *)addr, TARGET_PAGE_SIZE,
1398 p->prot & ~PROT_WRITE);
1399 }
1400 }
1401 addr += TARGET_PAGE_SIZE;
1402 p++;
1403 }
1404 }
1405 }
1406 }
1407#endif
1ccde1cb
FB
1408}
1409
3a7d929e
FB
1410static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1411{
1412 ram_addr_t ram_addr;
1413
84b7b8e7
FB
1414 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1415 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1416 tlb_entry->addend - (unsigned long)phys_ram_base;
1417 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1418 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1419 }
1420 }
1421}
1422
1423/* update the TLB according to the current state of the dirty bits */
1424void cpu_tlb_update_dirty(CPUState *env)
1425{
1426 int i;
1427 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1428 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1429 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1430 tlb_update_dirty(&env->tlb_table[1][i]);
3a7d929e
FB
1431}
1432
1ccde1cb 1433static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1434 unsigned long start)
1ccde1cb
FB
1435{
1436 unsigned long addr;
84b7b8e7
FB
1437 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1438 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1439 if (addr == start) {
84b7b8e7 1440 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1441 }
1442 }
1443}
1444
1445/* update the TLB corresponding to virtual page vaddr and phys addr
1446 addr so that it is no longer dirty */
6a00d601
FB
1447static inline void tlb_set_dirty(CPUState *env,
1448 unsigned long addr, target_ulong vaddr)
1ccde1cb 1449{
1ccde1cb
FB
1450 int i;
1451
1ccde1cb
FB
1452 addr &= TARGET_PAGE_MASK;
1453 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1454 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1455 tlb_set_dirty1(&env->tlb_table[1][i], addr);
9fa3e853
FB
1456}
1457
59817ccb
FB
1458/* add a new TLB entry. At most one entry for a given virtual address
1459 is permitted. Return 0 if OK or 2 if the page could not be mapped
1460 (can only happen in non SOFTMMU mode for I/O pages or pages
1461 conflicting with the host address space). */
84b7b8e7
FB
1462int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1463 target_phys_addr_t paddr, int prot,
1464 int is_user, int is_softmmu)
9fa3e853 1465{
92e873b9 1466 PhysPageDesc *p;
4f2ac237 1467 unsigned long pd;
9fa3e853 1468 unsigned int index;
4f2ac237 1469 target_ulong address;
108c49b8 1470 target_phys_addr_t addend;
9fa3e853 1471 int ret;
84b7b8e7 1472 CPUTLBEntry *te;
9fa3e853 1473
92e873b9 1474 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1475 if (!p) {
1476 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1477 } else {
1478 pd = p->phys_offset;
9fa3e853
FB
1479 }
1480#if defined(DEBUG_TLB)
3a7d929e 1481 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
84b7b8e7 1482 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1483#endif
1484
1485 ret = 0;
1486#if !defined(CONFIG_SOFTMMU)
1487 if (is_softmmu)
1488#endif
1489 {
2a4188a3 1490 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1491 /* IO memory case */
1492 address = vaddr | pd;
1493 addend = paddr;
1494 } else {
1495 /* standard memory */
1496 address = vaddr;
1497 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1498 }
1499
90f18422 1500 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1501 addend -= vaddr;
84b7b8e7
FB
1502 te = &env->tlb_table[is_user][index];
1503 te->addend = addend;
67b915a5 1504 if (prot & PAGE_READ) {
84b7b8e7
FB
1505 te->addr_read = address;
1506 } else {
1507 te->addr_read = -1;
1508 }
1509 if (prot & PAGE_EXEC) {
1510 te->addr_code = address;
9fa3e853 1511 } else {
84b7b8e7 1512 te->addr_code = -1;
9fa3e853 1513 }
67b915a5 1514 if (prot & PAGE_WRITE) {
856074ec
FB
1515 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1516 (pd & IO_MEM_ROMD)) {
1517 /* write access calls the I/O callback */
1518 te->addr_write = vaddr |
1519 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
3a7d929e 1520 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1521 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1522 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1523 } else {
84b7b8e7 1524 te->addr_write = address;
9fa3e853
FB
1525 }
1526 } else {
84b7b8e7 1527 te->addr_write = -1;
9fa3e853
FB
1528 }
1529 }
1530#if !defined(CONFIG_SOFTMMU)
1531 else {
1532 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1533 /* IO access: no mapping is done as it will be handled by the
1534 soft MMU */
1535 if (!(env->hflags & HF_SOFTMMU_MASK))
1536 ret = 2;
1537 } else {
1538 void *map_addr;
59817ccb
FB
1539
1540 if (vaddr >= MMAP_AREA_END) {
1541 ret = 2;
1542 } else {
1543 if (prot & PROT_WRITE) {
1544 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1545#if defined(TARGET_HAS_SMC) || 1
59817ccb 1546 first_tb ||
d720b93d 1547#endif
59817ccb
FB
1548 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1549 !cpu_physical_memory_is_dirty(pd))) {
1550 /* ROM: we do as if code was inside */
1551 /* if code is present, we only map as read only and save the
1552 original mapping */
1553 VirtPageDesc *vp;
1554
90f18422 1555 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1556 vp->phys_addr = pd;
1557 vp->prot = prot;
1558 vp->valid_tag = virt_valid_tag;
1559 prot &= ~PAGE_WRITE;
1560 }
1561 }
1562 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1563 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1564 if (map_addr == MAP_FAILED) {
1565 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1566 paddr, vaddr);
9fa3e853 1567 }
9fa3e853
FB
1568 }
1569 }
1570 }
1571#endif
1572 return ret;
1573}
1574
1575/* called from signal handler: invalidate the code and unprotect the
1576 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1577int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1578{
1579#if !defined(CONFIG_SOFTMMU)
1580 VirtPageDesc *vp;
1581
1582#if defined(DEBUG_TLB)
1583 printf("page_unprotect: addr=0x%08x\n", addr);
1584#endif
1585 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1586
1587 /* if it is not mapped, no need to worry here */
1588 if (addr >= MMAP_AREA_END)
1589 return 0;
9fa3e853
FB
1590 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1591 if (!vp)
1592 return 0;
1593 /* NOTE: in this case, validate_tag is _not_ tested as it
1594 validates only the code TLB */
1595 if (vp->valid_tag != virt_valid_tag)
1596 return 0;
1597 if (!(vp->prot & PAGE_WRITE))
1598 return 0;
1599#if defined(DEBUG_TLB)
1600 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1601 addr, vp->phys_addr, vp->prot);
1602#endif
59817ccb
FB
1603 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1604 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1605 (unsigned long)addr, vp->prot);
d720b93d 1606 /* set the dirty bit */
0a962c02 1607 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1608 /* flush the code inside */
1609 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1610 return 1;
1611#else
1612 return 0;
1613#endif
33417e70
FB
1614}
1615
0124311e
FB
1616#else
1617
ee8b7021 1618void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1619{
1620}
1621
2e12669a 1622void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1623{
1624}
1625
84b7b8e7
FB
1626int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1627 target_phys_addr_t paddr, int prot,
1628 int is_user, int is_softmmu)
9fa3e853
FB
1629{
1630 return 0;
1631}
0124311e 1632
9fa3e853
FB
1633/* dump memory mappings */
1634void page_dump(FILE *f)
33417e70 1635{
9fa3e853
FB
1636 unsigned long start, end;
1637 int i, j, prot, prot1;
1638 PageDesc *p;
33417e70 1639
9fa3e853
FB
1640 fprintf(f, "%-8s %-8s %-8s %s\n",
1641 "start", "end", "size", "prot");
1642 start = -1;
1643 end = -1;
1644 prot = 0;
1645 for(i = 0; i <= L1_SIZE; i++) {
1646 if (i < L1_SIZE)
1647 p = l1_map[i];
1648 else
1649 p = NULL;
1650 for(j = 0;j < L2_SIZE; j++) {
1651 if (!p)
1652 prot1 = 0;
1653 else
1654 prot1 = p[j].flags;
1655 if (prot1 != prot) {
1656 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1657 if (start != -1) {
1658 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1659 start, end, end - start,
1660 prot & PAGE_READ ? 'r' : '-',
1661 prot & PAGE_WRITE ? 'w' : '-',
1662 prot & PAGE_EXEC ? 'x' : '-');
1663 }
1664 if (prot1 != 0)
1665 start = end;
1666 else
1667 start = -1;
1668 prot = prot1;
1669 }
1670 if (!p)
1671 break;
1672 }
33417e70 1673 }
33417e70
FB
1674}
1675
53a5960a 1676int page_get_flags(target_ulong address)
33417e70 1677{
9fa3e853
FB
1678 PageDesc *p;
1679
1680 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1681 if (!p)
9fa3e853
FB
1682 return 0;
1683 return p->flags;
1684}
1685
1686/* modify the flags of a page and invalidate the code if
1687 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1688 depending on PAGE_WRITE */
53a5960a 1689void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1690{
1691 PageDesc *p;
53a5960a 1692 target_ulong addr;
9fa3e853
FB
1693
1694 start = start & TARGET_PAGE_MASK;
1695 end = TARGET_PAGE_ALIGN(end);
1696 if (flags & PAGE_WRITE)
1697 flags |= PAGE_WRITE_ORG;
1698 spin_lock(&tb_lock);
1699 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1700 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1701 /* if the write protection is set, then we invalidate the code
1702 inside */
1703 if (!(p->flags & PAGE_WRITE) &&
1704 (flags & PAGE_WRITE) &&
1705 p->first_tb) {
d720b93d 1706 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1707 }
1708 p->flags = flags;
1709 }
1710 spin_unlock(&tb_lock);
33417e70
FB
1711}
1712
9fa3e853
FB
1713/* called from signal handler: invalidate the code and unprotect the
1714 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1715int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1716{
1717 unsigned int page_index, prot, pindex;
1718 PageDesc *p, *p1;
53a5960a 1719 target_ulong host_start, host_end, addr;
9fa3e853 1720
83fb7adf 1721 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1722 page_index = host_start >> TARGET_PAGE_BITS;
1723 p1 = page_find(page_index);
1724 if (!p1)
1725 return 0;
83fb7adf 1726 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1727 p = p1;
1728 prot = 0;
1729 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1730 prot |= p->flags;
1731 p++;
1732 }
1733 /* if the page was really writable, then we change its
1734 protection back to writable */
1735 if (prot & PAGE_WRITE_ORG) {
1736 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1737 if (!(p1[pindex].flags & PAGE_WRITE)) {
53a5960a 1738 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1739 (prot & PAGE_BITS) | PAGE_WRITE);
1740 p1[pindex].flags |= PAGE_WRITE;
1741 /* and since the content will be modified, we must invalidate
1742 the corresponding translated code. */
d720b93d 1743 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1744#ifdef DEBUG_TB_CHECK
1745 tb_invalidate_check(address);
1746#endif
1747 return 1;
1748 }
1749 }
1750 return 0;
1751}
1752
1753/* call this function when system calls directly modify a memory area */
53a5960a
PB
1754/* ??? This should be redundant now we have lock_user. */
1755void page_unprotect_range(target_ulong data, target_ulong data_size)
9fa3e853 1756{
53a5960a 1757 target_ulong start, end, addr;
9fa3e853 1758
53a5960a 1759 start = data;
9fa3e853
FB
1760 end = start + data_size;
1761 start &= TARGET_PAGE_MASK;
1762 end = TARGET_PAGE_ALIGN(end);
1763 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1764 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1765 }
1766}
1767
6a00d601
FB
1768static inline void tlb_set_dirty(CPUState *env,
1769 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1770{
1771}
9fa3e853
FB
1772#endif /* defined(CONFIG_USER_ONLY) */
1773
33417e70
FB
1774/* register physical memory. 'size' must be a multiple of the target
1775 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1776 io memory page */
2e12669a
FB
1777void cpu_register_physical_memory(target_phys_addr_t start_addr,
1778 unsigned long size,
1779 unsigned long phys_offset)
33417e70 1780{
108c49b8 1781 target_phys_addr_t addr, end_addr;
92e873b9 1782 PhysPageDesc *p;
9d42037b 1783 CPUState *env;
33417e70 1784
5fd386f6 1785 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1786 end_addr = start_addr + size;
5fd386f6 1787 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
108c49b8 1788 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1789 p->phys_offset = phys_offset;
2a4188a3
FB
1790 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1791 (phys_offset & IO_MEM_ROMD))
33417e70
FB
1792 phys_offset += TARGET_PAGE_SIZE;
1793 }
9d42037b
FB
1794
1795 /* since each CPU stores ram addresses in its TLB cache, we must
1796 reset the modified entries */
1797 /* XXX: slow ! */
1798 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1799 tlb_flush(env, 1);
1800 }
33417e70
FB
1801}
1802
ba863458
FB
1803/* XXX: temporary until new memory mapping API */
1804uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1805{
1806 PhysPageDesc *p;
1807
1808 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1809 if (!p)
1810 return IO_MEM_UNASSIGNED;
1811 return p->phys_offset;
1812}
1813
a4193c8a 1814static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1815{
1816 return 0;
1817}
1818
a4193c8a 1819static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1820{
1821}
1822
1823static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1824 unassigned_mem_readb,
1825 unassigned_mem_readb,
1826 unassigned_mem_readb,
1827};
1828
1829static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1830 unassigned_mem_writeb,
1831 unassigned_mem_writeb,
1832 unassigned_mem_writeb,
1833};
1834
3a7d929e 1835static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1836{
3a7d929e
FB
1837 unsigned long ram_addr;
1838 int dirty_flags;
1839 ram_addr = addr - (unsigned long)phys_ram_base;
1840 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1841 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1842#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1843 tb_invalidate_phys_page_fast(ram_addr, 1);
1844 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1845#endif
3a7d929e 1846 }
c27004ec 1847 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
1848#ifdef USE_KQEMU
1849 if (cpu_single_env->kqemu_enabled &&
1850 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1851 kqemu_modify_page(cpu_single_env, ram_addr);
1852#endif
f23db169
FB
1853 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1854 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1855 /* we remove the notdirty callback only if the code has been
1856 flushed */
1857 if (dirty_flags == 0xff)
6a00d601 1858 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1859}
1860
3a7d929e 1861static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1862{
3a7d929e
FB
1863 unsigned long ram_addr;
1864 int dirty_flags;
1865 ram_addr = addr - (unsigned long)phys_ram_base;
1866 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1867 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1868#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1869 tb_invalidate_phys_page_fast(ram_addr, 2);
1870 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1871#endif
3a7d929e 1872 }
c27004ec 1873 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
1874#ifdef USE_KQEMU
1875 if (cpu_single_env->kqemu_enabled &&
1876 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1877 kqemu_modify_page(cpu_single_env, ram_addr);
1878#endif
f23db169
FB
1879 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1880 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1881 /* we remove the notdirty callback only if the code has been
1882 flushed */
1883 if (dirty_flags == 0xff)
6a00d601 1884 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1885}
1886
3a7d929e 1887static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1888{
3a7d929e
FB
1889 unsigned long ram_addr;
1890 int dirty_flags;
1891 ram_addr = addr - (unsigned long)phys_ram_base;
1892 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1893 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1894#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1895 tb_invalidate_phys_page_fast(ram_addr, 4);
1896 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1897#endif
3a7d929e 1898 }
c27004ec 1899 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
1900#ifdef USE_KQEMU
1901 if (cpu_single_env->kqemu_enabled &&
1902 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1903 kqemu_modify_page(cpu_single_env, ram_addr);
1904#endif
f23db169
FB
1905 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1906 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1907 /* we remove the notdirty callback only if the code has been
1908 flushed */
1909 if (dirty_flags == 0xff)
6a00d601 1910 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1911}
1912
3a7d929e 1913static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
1914 NULL, /* never used */
1915 NULL, /* never used */
1916 NULL, /* never used */
1917};
1918
1ccde1cb
FB
1919static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1920 notdirty_mem_writeb,
1921 notdirty_mem_writew,
1922 notdirty_mem_writel,
1923};
1924
33417e70
FB
1925static void io_mem_init(void)
1926{
3a7d929e 1927 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 1928 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 1929 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1930 io_mem_nb = 5;
1931
1932 /* alloc dirty bits array */
0a962c02 1933 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 1934 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1935}
1936
1937/* mem_read and mem_write are arrays of functions containing the
1938 function to access byte (index 0), word (index 1) and dword (index
1939 2). All functions must be supplied. If io_index is non zero, the
1940 corresponding io zone is modified. If it is zero, a new io zone is
1941 allocated. The return value can be used with
1942 cpu_register_physical_memory(). (-1) is returned if error. */
1943int cpu_register_io_memory(int io_index,
1944 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1945 CPUWriteMemoryFunc **mem_write,
1946 void *opaque)
33417e70
FB
1947{
1948 int i;
1949
1950 if (io_index <= 0) {
b5ff1b31 1951 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
1952 return -1;
1953 io_index = io_mem_nb++;
1954 } else {
1955 if (io_index >= IO_MEM_NB_ENTRIES)
1956 return -1;
1957 }
b5ff1b31 1958
33417e70
FB
1959 for(i = 0;i < 3; i++) {
1960 io_mem_read[io_index][i] = mem_read[i];
1961 io_mem_write[io_index][i] = mem_write[i];
1962 }
a4193c8a 1963 io_mem_opaque[io_index] = opaque;
33417e70
FB
1964 return io_index << IO_MEM_SHIFT;
1965}
61382a50 1966
8926b517
FB
1967CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1968{
1969 return io_mem_write[io_index >> IO_MEM_SHIFT];
1970}
1971
1972CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1973{
1974 return io_mem_read[io_index >> IO_MEM_SHIFT];
1975}
1976
13eb76e0
FB
1977/* physical memory access (slow version, mainly for debug) */
1978#if defined(CONFIG_USER_ONLY)
2e12669a 1979void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1980 int len, int is_write)
1981{
1982 int l, flags;
1983 target_ulong page;
53a5960a 1984 void * p;
13eb76e0
FB
1985
1986 while (len > 0) {
1987 page = addr & TARGET_PAGE_MASK;
1988 l = (page + TARGET_PAGE_SIZE) - addr;
1989 if (l > len)
1990 l = len;
1991 flags = page_get_flags(page);
1992 if (!(flags & PAGE_VALID))
1993 return;
1994 if (is_write) {
1995 if (!(flags & PAGE_WRITE))
1996 return;
53a5960a
PB
1997 p = lock_user(addr, len, 0);
1998 memcpy(p, buf, len);
1999 unlock_user(p, addr, len);
13eb76e0
FB
2000 } else {
2001 if (!(flags & PAGE_READ))
2002 return;
53a5960a
PB
2003 p = lock_user(addr, len, 1);
2004 memcpy(buf, p, len);
2005 unlock_user(p, addr, 0);
13eb76e0
FB
2006 }
2007 len -= l;
2008 buf += l;
2009 addr += l;
2010 }
2011}
8df1cd07 2012
13eb76e0 2013#else
2e12669a 2014void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2015 int len, int is_write)
2016{
2017 int l, io_index;
2018 uint8_t *ptr;
2019 uint32_t val;
2e12669a
FB
2020 target_phys_addr_t page;
2021 unsigned long pd;
92e873b9 2022 PhysPageDesc *p;
13eb76e0
FB
2023
2024 while (len > 0) {
2025 page = addr & TARGET_PAGE_MASK;
2026 l = (page + TARGET_PAGE_SIZE) - addr;
2027 if (l > len)
2028 l = len;
92e873b9 2029 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2030 if (!p) {
2031 pd = IO_MEM_UNASSIGNED;
2032 } else {
2033 pd = p->phys_offset;
2034 }
2035
2036 if (is_write) {
3a7d929e 2037 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2038 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2039 /* XXX: could force cpu_single_env to NULL to avoid
2040 potential bugs */
13eb76e0 2041 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2042 /* 32 bit write access */
c27004ec 2043 val = ldl_p(buf);
a4193c8a 2044 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2045 l = 4;
2046 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2047 /* 16 bit write access */
c27004ec 2048 val = lduw_p(buf);
a4193c8a 2049 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2050 l = 2;
2051 } else {
1c213d19 2052 /* 8 bit write access */
c27004ec 2053 val = ldub_p(buf);
a4193c8a 2054 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2055 l = 1;
2056 }
2057 } else {
b448f2f3
FB
2058 unsigned long addr1;
2059 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2060 /* RAM case */
b448f2f3 2061 ptr = phys_ram_base + addr1;
13eb76e0 2062 memcpy(ptr, buf, l);
3a7d929e
FB
2063 if (!cpu_physical_memory_is_dirty(addr1)) {
2064 /* invalidate code */
2065 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2066 /* set dirty bit */
f23db169
FB
2067 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2068 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2069 }
13eb76e0
FB
2070 }
2071 } else {
2a4188a3
FB
2072 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2073 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2074 /* I/O case */
2075 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2076 if (l >= 4 && ((addr & 3) == 0)) {
2077 /* 32 bit read access */
a4193c8a 2078 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2079 stl_p(buf, val);
13eb76e0
FB
2080 l = 4;
2081 } else if (l >= 2 && ((addr & 1) == 0)) {
2082 /* 16 bit read access */
a4193c8a 2083 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2084 stw_p(buf, val);
13eb76e0
FB
2085 l = 2;
2086 } else {
1c213d19 2087 /* 8 bit read access */
a4193c8a 2088 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2089 stb_p(buf, val);
13eb76e0
FB
2090 l = 1;
2091 }
2092 } else {
2093 /* RAM case */
2094 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2095 (addr & ~TARGET_PAGE_MASK);
2096 memcpy(buf, ptr, l);
2097 }
2098 }
2099 len -= l;
2100 buf += l;
2101 addr += l;
2102 }
2103}
8df1cd07 2104
d0ecd2aa
FB
2105/* used for ROM loading : can write in RAM and ROM */
2106void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2107 const uint8_t *buf, int len)
2108{
2109 int l;
2110 uint8_t *ptr;
2111 target_phys_addr_t page;
2112 unsigned long pd;
2113 PhysPageDesc *p;
2114
2115 while (len > 0) {
2116 page = addr & TARGET_PAGE_MASK;
2117 l = (page + TARGET_PAGE_SIZE) - addr;
2118 if (l > len)
2119 l = len;
2120 p = phys_page_find(page >> TARGET_PAGE_BITS);
2121 if (!p) {
2122 pd = IO_MEM_UNASSIGNED;
2123 } else {
2124 pd = p->phys_offset;
2125 }
2126
2127 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2128 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2129 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2130 /* do nothing */
2131 } else {
2132 unsigned long addr1;
2133 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2134 /* ROM/RAM case */
2135 ptr = phys_ram_base + addr1;
2136 memcpy(ptr, buf, l);
2137 }
2138 len -= l;
2139 buf += l;
2140 addr += l;
2141 }
2142}
2143
2144
8df1cd07
FB
2145/* warning: addr must be aligned */
2146uint32_t ldl_phys(target_phys_addr_t addr)
2147{
2148 int io_index;
2149 uint8_t *ptr;
2150 uint32_t val;
2151 unsigned long pd;
2152 PhysPageDesc *p;
2153
2154 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2155 if (!p) {
2156 pd = IO_MEM_UNASSIGNED;
2157 } else {
2158 pd = p->phys_offset;
2159 }
2160
2a4188a3
FB
2161 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2162 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2163 /* I/O case */
2164 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2165 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2166 } else {
2167 /* RAM case */
2168 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2169 (addr & ~TARGET_PAGE_MASK);
2170 val = ldl_p(ptr);
2171 }
2172 return val;
2173}
2174
84b7b8e7
FB
2175/* warning: addr must be aligned */
2176uint64_t ldq_phys(target_phys_addr_t addr)
2177{
2178 int io_index;
2179 uint8_t *ptr;
2180 uint64_t val;
2181 unsigned long pd;
2182 PhysPageDesc *p;
2183
2184 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2185 if (!p) {
2186 pd = IO_MEM_UNASSIGNED;
2187 } else {
2188 pd = p->phys_offset;
2189 }
2190
2a4188a3
FB
2191 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2192 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2193 /* I/O case */
2194 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2195#ifdef TARGET_WORDS_BIGENDIAN
2196 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2197 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2198#else
2199 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2200 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2201#endif
2202 } else {
2203 /* RAM case */
2204 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2205 (addr & ~TARGET_PAGE_MASK);
2206 val = ldq_p(ptr);
2207 }
2208 return val;
2209}
2210
aab33094
FB
2211/* XXX: optimize */
2212uint32_t ldub_phys(target_phys_addr_t addr)
2213{
2214 uint8_t val;
2215 cpu_physical_memory_read(addr, &val, 1);
2216 return val;
2217}
2218
2219/* XXX: optimize */
2220uint32_t lduw_phys(target_phys_addr_t addr)
2221{
2222 uint16_t val;
2223 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2224 return tswap16(val);
2225}
2226
8df1cd07
FB
2227/* warning: addr must be aligned. The ram page is not masked as dirty
2228 and the code inside is not invalidated. It is useful if the dirty
2229 bits are used to track modified PTEs */
2230void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2231{
2232 int io_index;
2233 uint8_t *ptr;
2234 unsigned long pd;
2235 PhysPageDesc *p;
2236
2237 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2238 if (!p) {
2239 pd = IO_MEM_UNASSIGNED;
2240 } else {
2241 pd = p->phys_offset;
2242 }
2243
3a7d929e 2244 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2245 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2246 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2247 } else {
2248 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2249 (addr & ~TARGET_PAGE_MASK);
2250 stl_p(ptr, val);
2251 }
2252}
2253
2254/* warning: addr must be aligned */
8df1cd07
FB
2255void stl_phys(target_phys_addr_t addr, uint32_t val)
2256{
2257 int io_index;
2258 uint8_t *ptr;
2259 unsigned long pd;
2260 PhysPageDesc *p;
2261
2262 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2263 if (!p) {
2264 pd = IO_MEM_UNASSIGNED;
2265 } else {
2266 pd = p->phys_offset;
2267 }
2268
3a7d929e 2269 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2270 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2271 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2272 } else {
2273 unsigned long addr1;
2274 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2275 /* RAM case */
2276 ptr = phys_ram_base + addr1;
2277 stl_p(ptr, val);
3a7d929e
FB
2278 if (!cpu_physical_memory_is_dirty(addr1)) {
2279 /* invalidate code */
2280 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2281 /* set dirty bit */
f23db169
FB
2282 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2283 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2284 }
8df1cd07
FB
2285 }
2286}
2287
aab33094
FB
2288/* XXX: optimize */
2289void stb_phys(target_phys_addr_t addr, uint32_t val)
2290{
2291 uint8_t v = val;
2292 cpu_physical_memory_write(addr, &v, 1);
2293}
2294
2295/* XXX: optimize */
2296void stw_phys(target_phys_addr_t addr, uint32_t val)
2297{
2298 uint16_t v = tswap16(val);
2299 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2300}
2301
2302/* XXX: optimize */
2303void stq_phys(target_phys_addr_t addr, uint64_t val)
2304{
2305 val = tswap64(val);
2306 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2307}
2308
13eb76e0
FB
2309#endif
2310
2311/* virtual memory access for debug */
b448f2f3
FB
2312int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2313 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2314{
2315 int l;
2316 target_ulong page, phys_addr;
2317
2318 while (len > 0) {
2319 page = addr & TARGET_PAGE_MASK;
2320 phys_addr = cpu_get_phys_page_debug(env, page);
2321 /* if no physical page mapped, return an error */
2322 if (phys_addr == -1)
2323 return -1;
2324 l = (page + TARGET_PAGE_SIZE) - addr;
2325 if (l > len)
2326 l = len;
b448f2f3
FB
2327 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2328 buf, l, is_write);
13eb76e0
FB
2329 len -= l;
2330 buf += l;
2331 addr += l;
2332 }
2333 return 0;
2334}
2335
e3db7226
FB
2336void dump_exec_info(FILE *f,
2337 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2338{
2339 int i, target_code_size, max_target_code_size;
2340 int direct_jmp_count, direct_jmp2_count, cross_page;
2341 TranslationBlock *tb;
2342
2343 target_code_size = 0;
2344 max_target_code_size = 0;
2345 cross_page = 0;
2346 direct_jmp_count = 0;
2347 direct_jmp2_count = 0;
2348 for(i = 0; i < nb_tbs; i++) {
2349 tb = &tbs[i];
2350 target_code_size += tb->size;
2351 if (tb->size > max_target_code_size)
2352 max_target_code_size = tb->size;
2353 if (tb->page_addr[1] != -1)
2354 cross_page++;
2355 if (tb->tb_next_offset[0] != 0xffff) {
2356 direct_jmp_count++;
2357 if (tb->tb_next_offset[1] != 0xffff) {
2358 direct_jmp2_count++;
2359 }
2360 }
2361 }
2362 /* XXX: avoid using doubles ? */
2363 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2364 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2365 nb_tbs ? target_code_size / nb_tbs : 0,
2366 max_target_code_size);
2367 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2368 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2369 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2370 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2371 cross_page,
2372 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2373 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2374 direct_jmp_count,
2375 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2376 direct_jmp2_count,
2377 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2378 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2379 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2380 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2381}
2382
61382a50
FB
2383#if !defined(CONFIG_USER_ONLY)
2384
2385#define MMUSUFFIX _cmmu
2386#define GETPC() NULL
2387#define env cpu_single_env
b769d8fe 2388#define SOFTMMU_CODE_ACCESS
61382a50
FB
2389
2390#define SHIFT 0
2391#include "softmmu_template.h"
2392
2393#define SHIFT 1
2394#include "softmmu_template.h"
2395
2396#define SHIFT 2
2397#include "softmmu_template.h"
2398
2399#define SHIFT 3
2400#include "softmmu_template.h"
2401
2402#undef env
2403
2404#endif