]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Fix code formatting.
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
53a5960a
PB
37#if defined(CONFIG_USER_ONLY)
38#include <qemu.h>
39#endif
54936004 40
fd6ce8f6 41//#define DEBUG_TB_INVALIDATE
66e85a21 42//#define DEBUG_FLUSH
9fa3e853 43//#define DEBUG_TLB
67d3b957 44//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
45
46/* make various TB consistency checks */
47//#define DEBUG_TB_CHECK
98857888 48//#define DEBUG_TLB_CHECK
fd6ce8f6 49
1196be37
TS
50//#define DEBUG_IOPORT
51
99773bd4
PB
52#if !defined(CONFIG_USER_ONLY)
53/* TB consistency checks only implemented for usermode emulation. */
54#undef DEBUG_TB_CHECK
55#endif
56
fd6ce8f6
FB
57/* threshold to flush the translated code buffer */
58#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
59
9fa3e853
FB
60#define SMC_BITMAP_USE_THRESHOLD 10
61
62#define MMAP_AREA_START 0x00000000
63#define MMAP_AREA_END 0xa8000000
fd6ce8f6 64
108c49b8
FB
65#if defined(TARGET_SPARC64)
66#define TARGET_PHYS_ADDR_SPACE_BITS 41
67#elif defined(TARGET_PPC64)
68#define TARGET_PHYS_ADDR_SPACE_BITS 42
69#else
70/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
71#define TARGET_PHYS_ADDR_SPACE_BITS 32
72#endif
73
fd6ce8f6 74TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 75TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 76int nb_tbs;
eb51d102
FB
77/* any access to the tbs or the page table must use this lock */
78spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 79
b8076a74 80uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
81uint8_t *code_gen_ptr;
82
9fa3e853
FB
83int phys_ram_size;
84int phys_ram_fd;
85uint8_t *phys_ram_base;
1ccde1cb 86uint8_t *phys_ram_dirty;
e9a1ab19 87static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 88
6a00d601
FB
89CPUState *first_cpu;
90/* current CPU in the current thread. It is only valid inside
91 cpu_exec() */
92CPUState *cpu_single_env;
93
54936004 94typedef struct PageDesc {
92e873b9 95 /* list of TBs intersecting this ram page */
fd6ce8f6 96 TranslationBlock *first_tb;
9fa3e853
FB
97 /* in order to optimize self modifying code, we count the number
98 of lookups we do to a given page to use a bitmap */
99 unsigned int code_write_count;
100 uint8_t *code_bitmap;
101#if defined(CONFIG_USER_ONLY)
102 unsigned long flags;
103#endif
54936004
FB
104} PageDesc;
105
92e873b9
FB
106typedef struct PhysPageDesc {
107 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 108 uint32_t phys_offset;
92e873b9
FB
109} PhysPageDesc;
110
54936004
FB
111#define L2_BITS 10
112#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
113
114#define L1_SIZE (1 << L1_BITS)
115#define L2_SIZE (1 << L2_BITS)
116
33417e70 117static void io_mem_init(void);
fd6ce8f6 118
83fb7adf
FB
119unsigned long qemu_real_host_page_size;
120unsigned long qemu_host_page_bits;
121unsigned long qemu_host_page_size;
122unsigned long qemu_host_page_mask;
54936004 123
92e873b9 124/* XXX: for system emulation, it could just be an array */
54936004 125static PageDesc *l1_map[L1_SIZE];
0a962c02 126PhysPageDesc **l1_phys_map;
54936004 127
33417e70 128/* io memory support */
33417e70
FB
129CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
130CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 131void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 132static int io_mem_nb;
6658ffb8
PB
133#if defined(CONFIG_SOFTMMU)
134static int io_mem_watch;
135#endif
33417e70 136
34865134
FB
137/* log support */
138char *logfilename = "/tmp/qemu.log";
139FILE *logfile;
140int loglevel;
141
e3db7226
FB
142/* statistics */
143static int tlb_flush_count;
144static int tb_flush_count;
145static int tb_phys_invalidate_count;
146
b346ff46 147static void page_init(void)
54936004 148{
83fb7adf 149 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 150 TARGET_PAGE_SIZE */
67b915a5 151#ifdef _WIN32
d5a8f07c
FB
152 {
153 SYSTEM_INFO system_info;
154 DWORD old_protect;
155
156 GetSystemInfo(&system_info);
157 qemu_real_host_page_size = system_info.dwPageSize;
158
159 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
160 PAGE_EXECUTE_READWRITE, &old_protect);
161 }
67b915a5 162#else
83fb7adf 163 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
164 {
165 unsigned long start, end;
166
167 start = (unsigned long)code_gen_buffer;
168 start &= ~(qemu_real_host_page_size - 1);
169
170 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
171 end += qemu_real_host_page_size - 1;
172 end &= ~(qemu_real_host_page_size - 1);
173
174 mprotect((void *)start, end - start,
175 PROT_READ | PROT_WRITE | PROT_EXEC);
176 }
67b915a5 177#endif
d5a8f07c 178
83fb7adf
FB
179 if (qemu_host_page_size == 0)
180 qemu_host_page_size = qemu_real_host_page_size;
181 if (qemu_host_page_size < TARGET_PAGE_SIZE)
182 qemu_host_page_size = TARGET_PAGE_SIZE;
183 qemu_host_page_bits = 0;
184 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
185 qemu_host_page_bits++;
186 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
187 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
188 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
189}
190
fd6ce8f6 191static inline PageDesc *page_find_alloc(unsigned int index)
54936004 192{
54936004
FB
193 PageDesc **lp, *p;
194
54936004
FB
195 lp = &l1_map[index >> L2_BITS];
196 p = *lp;
197 if (!p) {
198 /* allocate if not found */
59817ccb 199 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 200 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
201 *lp = p;
202 }
203 return p + (index & (L2_SIZE - 1));
204}
205
fd6ce8f6 206static inline PageDesc *page_find(unsigned int index)
54936004 207{
54936004
FB
208 PageDesc *p;
209
54936004
FB
210 p = l1_map[index >> L2_BITS];
211 if (!p)
212 return 0;
fd6ce8f6
FB
213 return p + (index & (L2_SIZE - 1));
214}
215
108c49b8 216static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 217{
108c49b8 218 void **lp, **p;
e3f4e2a4 219 PhysPageDesc *pd;
92e873b9 220
108c49b8
FB
221 p = (void **)l1_phys_map;
222#if TARGET_PHYS_ADDR_SPACE_BITS > 32
223
224#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
225#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
226#endif
227 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
228 p = *lp;
229 if (!p) {
230 /* allocate if not found */
108c49b8
FB
231 if (!alloc)
232 return NULL;
233 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
234 memset(p, 0, sizeof(void *) * L1_SIZE);
235 *lp = p;
236 }
237#endif
238 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
239 pd = *lp;
240 if (!pd) {
241 int i;
108c49b8
FB
242 /* allocate if not found */
243 if (!alloc)
244 return NULL;
e3f4e2a4
PB
245 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
246 *lp = pd;
247 for (i = 0; i < L2_SIZE; i++)
248 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 249 }
e3f4e2a4 250 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
251}
252
108c49b8 253static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 254{
108c49b8 255 return phys_page_find_alloc(index, 0);
92e873b9
FB
256}
257
9fa3e853 258#if !defined(CONFIG_USER_ONLY)
6a00d601 259static void tlb_protect_code(ram_addr_t ram_addr);
3a7d929e
FB
260static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
261 target_ulong vaddr);
9fa3e853 262#endif
fd6ce8f6 263
6a00d601 264void cpu_exec_init(CPUState *env)
fd6ce8f6 265{
6a00d601
FB
266 CPUState **penv;
267 int cpu_index;
268
fd6ce8f6
FB
269 if (!code_gen_ptr) {
270 code_gen_ptr = code_gen_buffer;
b346ff46 271 page_init();
33417e70 272 io_mem_init();
fd6ce8f6 273 }
6a00d601
FB
274 env->next_cpu = NULL;
275 penv = &first_cpu;
276 cpu_index = 0;
277 while (*penv != NULL) {
278 penv = (CPUState **)&(*penv)->next_cpu;
279 cpu_index++;
280 }
281 env->cpu_index = cpu_index;
6658ffb8 282 env->nb_watchpoints = 0;
6a00d601 283 *penv = env;
fd6ce8f6
FB
284}
285
9fa3e853
FB
286static inline void invalidate_page_bitmap(PageDesc *p)
287{
288 if (p->code_bitmap) {
59817ccb 289 qemu_free(p->code_bitmap);
9fa3e853
FB
290 p->code_bitmap = NULL;
291 }
292 p->code_write_count = 0;
293}
294
fd6ce8f6
FB
295/* set to NULL all the 'first_tb' fields in all PageDescs */
296static void page_flush_tb(void)
297{
298 int i, j;
299 PageDesc *p;
300
301 for(i = 0; i < L1_SIZE; i++) {
302 p = l1_map[i];
303 if (p) {
9fa3e853
FB
304 for(j = 0; j < L2_SIZE; j++) {
305 p->first_tb = NULL;
306 invalidate_page_bitmap(p);
307 p++;
308 }
fd6ce8f6
FB
309 }
310 }
311}
312
313/* flush all the translation blocks */
d4e8164f 314/* XXX: tb_flush is currently not thread safe */
6a00d601 315void tb_flush(CPUState *env1)
fd6ce8f6 316{
6a00d601 317 CPUState *env;
0124311e 318#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
319 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
320 code_gen_ptr - code_gen_buffer,
321 nb_tbs,
0124311e 322 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
323#endif
324 nb_tbs = 0;
6a00d601
FB
325
326 for(env = first_cpu; env != NULL; env = env->next_cpu) {
327 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
328 }
9fa3e853 329
8a8a608f 330 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 331 page_flush_tb();
9fa3e853 332
fd6ce8f6 333 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
334 /* XXX: flush processor icache at this point if cache flush is
335 expensive */
e3db7226 336 tb_flush_count++;
fd6ce8f6
FB
337}
338
339#ifdef DEBUG_TB_CHECK
340
bc98a7ef 341static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
342{
343 TranslationBlock *tb;
344 int i;
345 address &= TARGET_PAGE_MASK;
99773bd4
PB
346 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
347 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
348 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
349 address >= tb->pc + tb->size)) {
350 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 351 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
352 }
353 }
354 }
355}
356
357/* verify that all the pages have correct rights for code */
358static void tb_page_check(void)
359{
360 TranslationBlock *tb;
361 int i, flags1, flags2;
362
99773bd4
PB
363 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
364 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
365 flags1 = page_get_flags(tb->pc);
366 flags2 = page_get_flags(tb->pc + tb->size - 1);
367 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
368 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 369 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
370 }
371 }
372 }
373}
374
d4e8164f
FB
375void tb_jmp_check(TranslationBlock *tb)
376{
377 TranslationBlock *tb1;
378 unsigned int n1;
379
380 /* suppress any remaining jumps to this TB */
381 tb1 = tb->jmp_first;
382 for(;;) {
383 n1 = (long)tb1 & 3;
384 tb1 = (TranslationBlock *)((long)tb1 & ~3);
385 if (n1 == 2)
386 break;
387 tb1 = tb1->jmp_next[n1];
388 }
389 /* check end of list */
390 if (tb1 != tb) {
391 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
392 }
393}
394
fd6ce8f6
FB
395#endif
396
397/* invalidate one TB */
398static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
399 int next_offset)
400{
401 TranslationBlock *tb1;
402 for(;;) {
403 tb1 = *ptb;
404 if (tb1 == tb) {
405 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
406 break;
407 }
408 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
409 }
410}
411
9fa3e853
FB
412static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
413{
414 TranslationBlock *tb1;
415 unsigned int n1;
416
417 for(;;) {
418 tb1 = *ptb;
419 n1 = (long)tb1 & 3;
420 tb1 = (TranslationBlock *)((long)tb1 & ~3);
421 if (tb1 == tb) {
422 *ptb = tb1->page_next[n1];
423 break;
424 }
425 ptb = &tb1->page_next[n1];
426 }
427}
428
d4e8164f
FB
429static inline void tb_jmp_remove(TranslationBlock *tb, int n)
430{
431 TranslationBlock *tb1, **ptb;
432 unsigned int n1;
433
434 ptb = &tb->jmp_next[n];
435 tb1 = *ptb;
436 if (tb1) {
437 /* find tb(n) in circular list */
438 for(;;) {
439 tb1 = *ptb;
440 n1 = (long)tb1 & 3;
441 tb1 = (TranslationBlock *)((long)tb1 & ~3);
442 if (n1 == n && tb1 == tb)
443 break;
444 if (n1 == 2) {
445 ptb = &tb1->jmp_first;
446 } else {
447 ptb = &tb1->jmp_next[n1];
448 }
449 }
450 /* now we can suppress tb(n) from the list */
451 *ptb = tb->jmp_next[n];
452
453 tb->jmp_next[n] = NULL;
454 }
455}
456
457/* reset the jump entry 'n' of a TB so that it is not chained to
458 another TB */
459static inline void tb_reset_jump(TranslationBlock *tb, int n)
460{
461 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
462}
463
8a40a180 464static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 465{
6a00d601 466 CPUState *env;
8a40a180 467 PageDesc *p;
d4e8164f 468 unsigned int h, n1;
8a40a180
FB
469 target_ulong phys_pc;
470 TranslationBlock *tb1, *tb2;
d4e8164f 471
8a40a180
FB
472 /* remove the TB from the hash list */
473 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
474 h = tb_phys_hash_func(phys_pc);
475 tb_remove(&tb_phys_hash[h], tb,
476 offsetof(TranslationBlock, phys_hash_next));
477
478 /* remove the TB from the page list */
479 if (tb->page_addr[0] != page_addr) {
480 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
481 tb_page_remove(&p->first_tb, tb);
482 invalidate_page_bitmap(p);
483 }
484 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
485 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
486 tb_page_remove(&p->first_tb, tb);
487 invalidate_page_bitmap(p);
488 }
489
36bdbe54 490 tb_invalidated_flag = 1;
59817ccb 491
fd6ce8f6 492 /* remove the TB from the hash list */
8a40a180 493 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
494 for(env = first_cpu; env != NULL; env = env->next_cpu) {
495 if (env->tb_jmp_cache[h] == tb)
496 env->tb_jmp_cache[h] = NULL;
497 }
d4e8164f
FB
498
499 /* suppress this TB from the two jump lists */
500 tb_jmp_remove(tb, 0);
501 tb_jmp_remove(tb, 1);
502
503 /* suppress any remaining jumps to this TB */
504 tb1 = tb->jmp_first;
505 for(;;) {
506 n1 = (long)tb1 & 3;
507 if (n1 == 2)
508 break;
509 tb1 = (TranslationBlock *)((long)tb1 & ~3);
510 tb2 = tb1->jmp_next[n1];
511 tb_reset_jump(tb1, n1);
512 tb1->jmp_next[n1] = NULL;
513 tb1 = tb2;
514 }
515 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 516
e3db7226 517 tb_phys_invalidate_count++;
9fa3e853
FB
518}
519
520static inline void set_bits(uint8_t *tab, int start, int len)
521{
522 int end, mask, end1;
523
524 end = start + len;
525 tab += start >> 3;
526 mask = 0xff << (start & 7);
527 if ((start & ~7) == (end & ~7)) {
528 if (start < end) {
529 mask &= ~(0xff << (end & 7));
530 *tab |= mask;
531 }
532 } else {
533 *tab++ |= mask;
534 start = (start + 8) & ~7;
535 end1 = end & ~7;
536 while (start < end1) {
537 *tab++ = 0xff;
538 start += 8;
539 }
540 if (start < end) {
541 mask = ~(0xff << (end & 7));
542 *tab |= mask;
543 }
544 }
545}
546
547static void build_page_bitmap(PageDesc *p)
548{
549 int n, tb_start, tb_end;
550 TranslationBlock *tb;
551
59817ccb 552 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
553 if (!p->code_bitmap)
554 return;
555 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
556
557 tb = p->first_tb;
558 while (tb != NULL) {
559 n = (long)tb & 3;
560 tb = (TranslationBlock *)((long)tb & ~3);
561 /* NOTE: this is subtle as a TB may span two physical pages */
562 if (n == 0) {
563 /* NOTE: tb_end may be after the end of the page, but
564 it is not a problem */
565 tb_start = tb->pc & ~TARGET_PAGE_MASK;
566 tb_end = tb_start + tb->size;
567 if (tb_end > TARGET_PAGE_SIZE)
568 tb_end = TARGET_PAGE_SIZE;
569 } else {
570 tb_start = 0;
571 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
572 }
573 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
574 tb = tb->page_next[n];
575 }
576}
577
d720b93d
FB
578#ifdef TARGET_HAS_PRECISE_SMC
579
580static void tb_gen_code(CPUState *env,
581 target_ulong pc, target_ulong cs_base, int flags,
582 int cflags)
583{
584 TranslationBlock *tb;
585 uint8_t *tc_ptr;
586 target_ulong phys_pc, phys_page2, virt_page2;
587 int code_gen_size;
588
c27004ec
FB
589 phys_pc = get_phys_addr_code(env, pc);
590 tb = tb_alloc(pc);
d720b93d
FB
591 if (!tb) {
592 /* flush must be done */
593 tb_flush(env);
594 /* cannot fail at this point */
c27004ec 595 tb = tb_alloc(pc);
d720b93d
FB
596 }
597 tc_ptr = code_gen_ptr;
598 tb->tc_ptr = tc_ptr;
599 tb->cs_base = cs_base;
600 tb->flags = flags;
601 tb->cflags = cflags;
602 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
603 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
604
605 /* check next page if needed */
c27004ec 606 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 607 phys_page2 = -1;
c27004ec 608 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
609 phys_page2 = get_phys_addr_code(env, virt_page2);
610 }
611 tb_link_phys(tb, phys_pc, phys_page2);
612}
613#endif
614
9fa3e853
FB
615/* invalidate all TBs which intersect with the target physical page
616 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
617 the same physical page. 'is_cpu_write_access' should be true if called
618 from a real cpu write access: the virtual CPU will exit the current
619 TB if code is modified inside this TB. */
620void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
621 int is_cpu_write_access)
622{
623 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 624 CPUState *env = cpu_single_env;
9fa3e853 625 PageDesc *p;
ea1c1802 626 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 627 target_ulong tb_start, tb_end;
d720b93d 628 target_ulong current_pc, current_cs_base;
9fa3e853
FB
629
630 p = page_find(start >> TARGET_PAGE_BITS);
631 if (!p)
632 return;
633 if (!p->code_bitmap &&
d720b93d
FB
634 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
635 is_cpu_write_access) {
9fa3e853
FB
636 /* build code bitmap */
637 build_page_bitmap(p);
638 }
639
640 /* we remove all the TBs in the range [start, end[ */
641 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
642 current_tb_not_found = is_cpu_write_access;
643 current_tb_modified = 0;
644 current_tb = NULL; /* avoid warning */
645 current_pc = 0; /* avoid warning */
646 current_cs_base = 0; /* avoid warning */
647 current_flags = 0; /* avoid warning */
9fa3e853
FB
648 tb = p->first_tb;
649 while (tb != NULL) {
650 n = (long)tb & 3;
651 tb = (TranslationBlock *)((long)tb & ~3);
652 tb_next = tb->page_next[n];
653 /* NOTE: this is subtle as a TB may span two physical pages */
654 if (n == 0) {
655 /* NOTE: tb_end may be after the end of the page, but
656 it is not a problem */
657 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
658 tb_end = tb_start + tb->size;
659 } else {
660 tb_start = tb->page_addr[1];
661 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
662 }
663 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
664#ifdef TARGET_HAS_PRECISE_SMC
665 if (current_tb_not_found) {
666 current_tb_not_found = 0;
667 current_tb = NULL;
668 if (env->mem_write_pc) {
669 /* now we have a real cpu fault */
670 current_tb = tb_find_pc(env->mem_write_pc);
671 }
672 }
673 if (current_tb == tb &&
674 !(current_tb->cflags & CF_SINGLE_INSN)) {
675 /* If we are modifying the current TB, we must stop
676 its execution. We could be more precise by checking
677 that the modification is after the current PC, but it
678 would require a specialized function to partially
679 restore the CPU state */
680
681 current_tb_modified = 1;
682 cpu_restore_state(current_tb, env,
683 env->mem_write_pc, NULL);
684#if defined(TARGET_I386)
685 current_flags = env->hflags;
686 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
687 current_cs_base = (target_ulong)env->segs[R_CS].base;
688 current_pc = current_cs_base + env->eip;
689#else
690#error unsupported CPU
691#endif
692 }
693#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
694 /* we need to do that to handle the case where a signal
695 occurs while doing tb_phys_invalidate() */
696 saved_tb = NULL;
697 if (env) {
698 saved_tb = env->current_tb;
699 env->current_tb = NULL;
700 }
9fa3e853 701 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
702 if (env) {
703 env->current_tb = saved_tb;
704 if (env->interrupt_request && env->current_tb)
705 cpu_interrupt(env, env->interrupt_request);
706 }
9fa3e853
FB
707 }
708 tb = tb_next;
709 }
710#if !defined(CONFIG_USER_ONLY)
711 /* if no code remaining, no need to continue to use slow writes */
712 if (!p->first_tb) {
713 invalidate_page_bitmap(p);
d720b93d
FB
714 if (is_cpu_write_access) {
715 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
716 }
717 }
718#endif
719#ifdef TARGET_HAS_PRECISE_SMC
720 if (current_tb_modified) {
721 /* we generate a block containing just the instruction
722 modifying the memory. It will ensure that it cannot modify
723 itself */
ea1c1802 724 env->current_tb = NULL;
d720b93d
FB
725 tb_gen_code(env, current_pc, current_cs_base, current_flags,
726 CF_SINGLE_INSN);
727 cpu_resume_from_signal(env, NULL);
9fa3e853 728 }
fd6ce8f6 729#endif
9fa3e853 730}
fd6ce8f6 731
9fa3e853 732/* len must be <= 8 and start must be a multiple of len */
d720b93d 733static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
734{
735 PageDesc *p;
736 int offset, b;
59817ccb 737#if 0
a4193c8a
FB
738 if (1) {
739 if (loglevel) {
740 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
741 cpu_single_env->mem_write_vaddr, len,
742 cpu_single_env->eip,
743 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
744 }
59817ccb
FB
745 }
746#endif
9fa3e853
FB
747 p = page_find(start >> TARGET_PAGE_BITS);
748 if (!p)
749 return;
750 if (p->code_bitmap) {
751 offset = start & ~TARGET_PAGE_MASK;
752 b = p->code_bitmap[offset >> 3] >> (offset & 7);
753 if (b & ((1 << len) - 1))
754 goto do_invalidate;
755 } else {
756 do_invalidate:
d720b93d 757 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
758 }
759}
760
9fa3e853 761#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
762static void tb_invalidate_phys_page(target_ulong addr,
763 unsigned long pc, void *puc)
9fa3e853 764{
d720b93d
FB
765 int n, current_flags, current_tb_modified;
766 target_ulong current_pc, current_cs_base;
9fa3e853 767 PageDesc *p;
d720b93d
FB
768 TranslationBlock *tb, *current_tb;
769#ifdef TARGET_HAS_PRECISE_SMC
770 CPUState *env = cpu_single_env;
771#endif
9fa3e853
FB
772
773 addr &= TARGET_PAGE_MASK;
774 p = page_find(addr >> TARGET_PAGE_BITS);
775 if (!p)
776 return;
777 tb = p->first_tb;
d720b93d
FB
778 current_tb_modified = 0;
779 current_tb = NULL;
780 current_pc = 0; /* avoid warning */
781 current_cs_base = 0; /* avoid warning */
782 current_flags = 0; /* avoid warning */
783#ifdef TARGET_HAS_PRECISE_SMC
784 if (tb && pc != 0) {
785 current_tb = tb_find_pc(pc);
786 }
787#endif
9fa3e853
FB
788 while (tb != NULL) {
789 n = (long)tb & 3;
790 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
791#ifdef TARGET_HAS_PRECISE_SMC
792 if (current_tb == tb &&
793 !(current_tb->cflags & CF_SINGLE_INSN)) {
794 /* If we are modifying the current TB, we must stop
795 its execution. We could be more precise by checking
796 that the modification is after the current PC, but it
797 would require a specialized function to partially
798 restore the CPU state */
799
800 current_tb_modified = 1;
801 cpu_restore_state(current_tb, env, pc, puc);
802#if defined(TARGET_I386)
803 current_flags = env->hflags;
804 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
805 current_cs_base = (target_ulong)env->segs[R_CS].base;
806 current_pc = current_cs_base + env->eip;
807#else
808#error unsupported CPU
809#endif
810 }
811#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
812 tb_phys_invalidate(tb, addr);
813 tb = tb->page_next[n];
814 }
fd6ce8f6 815 p->first_tb = NULL;
d720b93d
FB
816#ifdef TARGET_HAS_PRECISE_SMC
817 if (current_tb_modified) {
818 /* we generate a block containing just the instruction
819 modifying the memory. It will ensure that it cannot modify
820 itself */
ea1c1802 821 env->current_tb = NULL;
d720b93d
FB
822 tb_gen_code(env, current_pc, current_cs_base, current_flags,
823 CF_SINGLE_INSN);
824 cpu_resume_from_signal(env, puc);
825 }
826#endif
fd6ce8f6 827}
9fa3e853 828#endif
fd6ce8f6
FB
829
830/* add the tb in the target page and protect it if necessary */
9fa3e853 831static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 832 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
833{
834 PageDesc *p;
9fa3e853
FB
835 TranslationBlock *last_first_tb;
836
837 tb->page_addr[n] = page_addr;
3a7d929e 838 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
839 tb->page_next[n] = p->first_tb;
840 last_first_tb = p->first_tb;
841 p->first_tb = (TranslationBlock *)((long)tb | n);
842 invalidate_page_bitmap(p);
fd6ce8f6 843
107db443 844#if defined(TARGET_HAS_SMC) || 1
d720b93d 845
9fa3e853 846#if defined(CONFIG_USER_ONLY)
fd6ce8f6 847 if (p->flags & PAGE_WRITE) {
53a5960a
PB
848 target_ulong addr;
849 PageDesc *p2;
9fa3e853
FB
850 int prot;
851
fd6ce8f6
FB
852 /* force the host page as non writable (writes will have a
853 page fault + mprotect overhead) */
53a5960a 854 page_addr &= qemu_host_page_mask;
fd6ce8f6 855 prot = 0;
53a5960a
PB
856 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
857 addr += TARGET_PAGE_SIZE) {
858
859 p2 = page_find (addr >> TARGET_PAGE_BITS);
860 if (!p2)
861 continue;
862 prot |= p2->flags;
863 p2->flags &= ~PAGE_WRITE;
864 page_get_flags(addr);
865 }
866 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
867 (prot & PAGE_BITS) & ~PAGE_WRITE);
868#ifdef DEBUG_TB_INVALIDATE
869 printf("protecting code page: 0x%08lx\n",
53a5960a 870 page_addr);
fd6ce8f6 871#endif
fd6ce8f6 872 }
9fa3e853
FB
873#else
874 /* if some code is already present, then the pages are already
875 protected. So we handle the case where only the first TB is
876 allocated in a physical page */
877 if (!last_first_tb) {
6a00d601 878 tlb_protect_code(page_addr);
9fa3e853
FB
879 }
880#endif
d720b93d
FB
881
882#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
883}
884
885/* Allocate a new translation block. Flush the translation buffer if
886 too many translation blocks or too much generated code. */
c27004ec 887TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
888{
889 TranslationBlock *tb;
fd6ce8f6
FB
890
891 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
892 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 893 return NULL;
fd6ce8f6
FB
894 tb = &tbs[nb_tbs++];
895 tb->pc = pc;
b448f2f3 896 tb->cflags = 0;
d4e8164f
FB
897 return tb;
898}
899
9fa3e853
FB
900/* add a new TB and link it to the physical page tables. phys_page2 is
901 (-1) to indicate that only one page contains the TB. */
902void tb_link_phys(TranslationBlock *tb,
903 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 904{
9fa3e853
FB
905 unsigned int h;
906 TranslationBlock **ptb;
907
908 /* add in the physical hash table */
909 h = tb_phys_hash_func(phys_pc);
910 ptb = &tb_phys_hash[h];
911 tb->phys_hash_next = *ptb;
912 *ptb = tb;
fd6ce8f6
FB
913
914 /* add in the page list */
9fa3e853
FB
915 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
916 if (phys_page2 != -1)
917 tb_alloc_page(tb, 1, phys_page2);
918 else
919 tb->page_addr[1] = -1;
9fa3e853 920
d4e8164f
FB
921 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
922 tb->jmp_next[0] = NULL;
923 tb->jmp_next[1] = NULL;
b448f2f3
FB
924#ifdef USE_CODE_COPY
925 tb->cflags &= ~CF_FP_USED;
926 if (tb->cflags & CF_TB_FP_USED)
927 tb->cflags |= CF_FP_USED;
928#endif
d4e8164f
FB
929
930 /* init original jump addresses */
931 if (tb->tb_next_offset[0] != 0xffff)
932 tb_reset_jump(tb, 0);
933 if (tb->tb_next_offset[1] != 0xffff)
934 tb_reset_jump(tb, 1);
8a40a180
FB
935
936#ifdef DEBUG_TB_CHECK
937 tb_page_check();
938#endif
fd6ce8f6
FB
939}
940
9fa3e853
FB
941/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
942 tb[1].tc_ptr. Return NULL if not found */
943TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 944{
9fa3e853
FB
945 int m_min, m_max, m;
946 unsigned long v;
947 TranslationBlock *tb;
a513fe19
FB
948
949 if (nb_tbs <= 0)
950 return NULL;
951 if (tc_ptr < (unsigned long)code_gen_buffer ||
952 tc_ptr >= (unsigned long)code_gen_ptr)
953 return NULL;
954 /* binary search (cf Knuth) */
955 m_min = 0;
956 m_max = nb_tbs - 1;
957 while (m_min <= m_max) {
958 m = (m_min + m_max) >> 1;
959 tb = &tbs[m];
960 v = (unsigned long)tb->tc_ptr;
961 if (v == tc_ptr)
962 return tb;
963 else if (tc_ptr < v) {
964 m_max = m - 1;
965 } else {
966 m_min = m + 1;
967 }
968 }
969 return &tbs[m_max];
970}
7501267e 971
ea041c0e
FB
972static void tb_reset_jump_recursive(TranslationBlock *tb);
973
974static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
975{
976 TranslationBlock *tb1, *tb_next, **ptb;
977 unsigned int n1;
978
979 tb1 = tb->jmp_next[n];
980 if (tb1 != NULL) {
981 /* find head of list */
982 for(;;) {
983 n1 = (long)tb1 & 3;
984 tb1 = (TranslationBlock *)((long)tb1 & ~3);
985 if (n1 == 2)
986 break;
987 tb1 = tb1->jmp_next[n1];
988 }
989 /* we are now sure now that tb jumps to tb1 */
990 tb_next = tb1;
991
992 /* remove tb from the jmp_first list */
993 ptb = &tb_next->jmp_first;
994 for(;;) {
995 tb1 = *ptb;
996 n1 = (long)tb1 & 3;
997 tb1 = (TranslationBlock *)((long)tb1 & ~3);
998 if (n1 == n && tb1 == tb)
999 break;
1000 ptb = &tb1->jmp_next[n1];
1001 }
1002 *ptb = tb->jmp_next[n];
1003 tb->jmp_next[n] = NULL;
1004
1005 /* suppress the jump to next tb in generated code */
1006 tb_reset_jump(tb, n);
1007
0124311e 1008 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1009 tb_reset_jump_recursive(tb_next);
1010 }
1011}
1012
1013static void tb_reset_jump_recursive(TranslationBlock *tb)
1014{
1015 tb_reset_jump_recursive2(tb, 0);
1016 tb_reset_jump_recursive2(tb, 1);
1017}
1018
1fddef4b 1019#if defined(TARGET_HAS_ICE)
d720b93d
FB
1020static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1021{
c2f07f81
PB
1022 target_ulong addr, pd;
1023 ram_addr_t ram_addr;
1024 PhysPageDesc *p;
d720b93d 1025
c2f07f81
PB
1026 addr = cpu_get_phys_page_debug(env, pc);
1027 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1028 if (!p) {
1029 pd = IO_MEM_UNASSIGNED;
1030 } else {
1031 pd = p->phys_offset;
1032 }
1033 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1034 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1035}
c27004ec 1036#endif
d720b93d 1037
6658ffb8
PB
1038/* Add a watchpoint. */
1039int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1040{
1041 int i;
1042
1043 for (i = 0; i < env->nb_watchpoints; i++) {
1044 if (addr == env->watchpoint[i].vaddr)
1045 return 0;
1046 }
1047 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1048 return -1;
1049
1050 i = env->nb_watchpoints++;
1051 env->watchpoint[i].vaddr = addr;
1052 tlb_flush_page(env, addr);
1053 /* FIXME: This flush is needed because of the hack to make memory ops
1054 terminate the TB. It can be removed once the proper IO trap and
1055 re-execute bits are in. */
1056 tb_flush(env);
1057 return i;
1058}
1059
1060/* Remove a watchpoint. */
1061int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1062{
1063 int i;
1064
1065 for (i = 0; i < env->nb_watchpoints; i++) {
1066 if (addr == env->watchpoint[i].vaddr) {
1067 env->nb_watchpoints--;
1068 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1069 tlb_flush_page(env, addr);
1070 return 0;
1071 }
1072 }
1073 return -1;
1074}
1075
c33a346e
FB
1076/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1077 breakpoint is reached */
2e12669a 1078int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1079{
1fddef4b 1080#if defined(TARGET_HAS_ICE)
4c3a88a2 1081 int i;
d720b93d 1082
4c3a88a2
FB
1083 for(i = 0; i < env->nb_breakpoints; i++) {
1084 if (env->breakpoints[i] == pc)
1085 return 0;
1086 }
1087
1088 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1089 return -1;
1090 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1091
1092 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1093 return 0;
1094#else
1095 return -1;
1096#endif
1097}
1098
1099/* remove a breakpoint */
2e12669a 1100int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1101{
1fddef4b 1102#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1103 int i;
1104 for(i = 0; i < env->nb_breakpoints; i++) {
1105 if (env->breakpoints[i] == pc)
1106 goto found;
1107 }
1108 return -1;
1109 found:
4c3a88a2 1110 env->nb_breakpoints--;
1fddef4b
FB
1111 if (i < env->nb_breakpoints)
1112 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1113
1114 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1115 return 0;
1116#else
1117 return -1;
1118#endif
1119}
1120
c33a346e
FB
1121/* enable or disable single step mode. EXCP_DEBUG is returned by the
1122 CPU loop after each instruction */
1123void cpu_single_step(CPUState *env, int enabled)
1124{
1fddef4b 1125#if defined(TARGET_HAS_ICE)
c33a346e
FB
1126 if (env->singlestep_enabled != enabled) {
1127 env->singlestep_enabled = enabled;
1128 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1129 /* XXX: only flush what is necessary */
0124311e 1130 tb_flush(env);
c33a346e
FB
1131 }
1132#endif
1133}
1134
34865134
FB
1135/* enable or disable low levels log */
1136void cpu_set_log(int log_flags)
1137{
1138 loglevel = log_flags;
1139 if (loglevel && !logfile) {
1140 logfile = fopen(logfilename, "w");
1141 if (!logfile) {
1142 perror(logfilename);
1143 _exit(1);
1144 }
9fa3e853
FB
1145#if !defined(CONFIG_SOFTMMU)
1146 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1147 {
1148 static uint8_t logfile_buf[4096];
1149 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1150 }
1151#else
34865134 1152 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1153#endif
34865134
FB
1154 }
1155}
1156
1157void cpu_set_log_filename(const char *filename)
1158{
1159 logfilename = strdup(filename);
1160}
c33a346e 1161
0124311e 1162/* mask must never be zero, except for A20 change call */
68a79315 1163void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1164{
1165 TranslationBlock *tb;
ee8b7021 1166 static int interrupt_lock;
59817ccb 1167
68a79315 1168 env->interrupt_request |= mask;
ea041c0e
FB
1169 /* if the cpu is currently executing code, we must unlink it and
1170 all the potentially executing TB */
1171 tb = env->current_tb;
ee8b7021
FB
1172 if (tb && !testandset(&interrupt_lock)) {
1173 env->current_tb = NULL;
ea041c0e 1174 tb_reset_jump_recursive(tb);
ee8b7021 1175 interrupt_lock = 0;
ea041c0e
FB
1176 }
1177}
1178
b54ad049
FB
1179void cpu_reset_interrupt(CPUState *env, int mask)
1180{
1181 env->interrupt_request &= ~mask;
1182}
1183
f193c797
FB
1184CPULogItem cpu_log_items[] = {
1185 { CPU_LOG_TB_OUT_ASM, "out_asm",
1186 "show generated host assembly code for each compiled TB" },
1187 { CPU_LOG_TB_IN_ASM, "in_asm",
1188 "show target assembly code for each compiled TB" },
1189 { CPU_LOG_TB_OP, "op",
1190 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1191#ifdef TARGET_I386
1192 { CPU_LOG_TB_OP_OPT, "op_opt",
1193 "show micro ops after optimization for each compiled TB" },
1194#endif
1195 { CPU_LOG_INT, "int",
1196 "show interrupts/exceptions in short format" },
1197 { CPU_LOG_EXEC, "exec",
1198 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1199 { CPU_LOG_TB_CPU, "cpu",
1200 "show CPU state before bloc translation" },
f193c797
FB
1201#ifdef TARGET_I386
1202 { CPU_LOG_PCALL, "pcall",
1203 "show protected mode far calls/returns/exceptions" },
1204#endif
8e3a9fd2 1205#ifdef DEBUG_IOPORT
fd872598
FB
1206 { CPU_LOG_IOPORT, "ioport",
1207 "show all i/o ports accesses" },
8e3a9fd2 1208#endif
f193c797
FB
1209 { 0, NULL, NULL },
1210};
1211
1212static int cmp1(const char *s1, int n, const char *s2)
1213{
1214 if (strlen(s2) != n)
1215 return 0;
1216 return memcmp(s1, s2, n) == 0;
1217}
1218
1219/* takes a comma separated list of log masks. Return 0 if error. */
1220int cpu_str_to_log_mask(const char *str)
1221{
1222 CPULogItem *item;
1223 int mask;
1224 const char *p, *p1;
1225
1226 p = str;
1227 mask = 0;
1228 for(;;) {
1229 p1 = strchr(p, ',');
1230 if (!p1)
1231 p1 = p + strlen(p);
8e3a9fd2
FB
1232 if(cmp1(p,p1-p,"all")) {
1233 for(item = cpu_log_items; item->mask != 0; item++) {
1234 mask |= item->mask;
1235 }
1236 } else {
f193c797
FB
1237 for(item = cpu_log_items; item->mask != 0; item++) {
1238 if (cmp1(p, p1 - p, item->name))
1239 goto found;
1240 }
1241 return 0;
8e3a9fd2 1242 }
f193c797
FB
1243 found:
1244 mask |= item->mask;
1245 if (*p1 != ',')
1246 break;
1247 p = p1 + 1;
1248 }
1249 return mask;
1250}
ea041c0e 1251
7501267e
FB
1252void cpu_abort(CPUState *env, const char *fmt, ...)
1253{
1254 va_list ap;
1255
1256 va_start(ap, fmt);
1257 fprintf(stderr, "qemu: fatal: ");
1258 vfprintf(stderr, fmt, ap);
1259 fprintf(stderr, "\n");
1260#ifdef TARGET_I386
7fe48483
FB
1261 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1262#else
1263 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1264#endif
1265 va_end(ap);
1266 abort();
1267}
1268
c5be9f08
TS
1269CPUState *cpu_copy(CPUState *env)
1270{
1271 CPUState *new_env = cpu_init();
1272 /* preserve chaining and index */
1273 CPUState *next_cpu = new_env->next_cpu;
1274 int cpu_index = new_env->cpu_index;
1275 memcpy(new_env, env, sizeof(CPUState));
1276 new_env->next_cpu = next_cpu;
1277 new_env->cpu_index = cpu_index;
1278 return new_env;
1279}
1280
0124311e
FB
1281#if !defined(CONFIG_USER_ONLY)
1282
ee8b7021
FB
1283/* NOTE: if flush_global is true, also flush global entries (not
1284 implemented yet) */
1285void tlb_flush(CPUState *env, int flush_global)
33417e70 1286{
33417e70 1287 int i;
0124311e 1288
9fa3e853
FB
1289#if defined(DEBUG_TLB)
1290 printf("tlb_flush:\n");
1291#endif
0124311e
FB
1292 /* must reset current TB so that interrupts cannot modify the
1293 links while we are modifying them */
1294 env->current_tb = NULL;
1295
33417e70 1296 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1297 env->tlb_table[0][i].addr_read = -1;
1298 env->tlb_table[0][i].addr_write = -1;
1299 env->tlb_table[0][i].addr_code = -1;
1300 env->tlb_table[1][i].addr_read = -1;
1301 env->tlb_table[1][i].addr_write = -1;
1302 env->tlb_table[1][i].addr_code = -1;
33417e70 1303 }
9fa3e853 1304
8a40a180 1305 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1306
1307#if !defined(CONFIG_SOFTMMU)
1308 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1309#endif
1310#ifdef USE_KQEMU
1311 if (env->kqemu_enabled) {
1312 kqemu_flush(env, flush_global);
1313 }
9fa3e853 1314#endif
e3db7226 1315 tlb_flush_count++;
33417e70
FB
1316}
1317
274da6b2 1318static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1319{
84b7b8e7
FB
1320 if (addr == (tlb_entry->addr_read &
1321 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1322 addr == (tlb_entry->addr_write &
1323 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1324 addr == (tlb_entry->addr_code &
1325 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1326 tlb_entry->addr_read = -1;
1327 tlb_entry->addr_write = -1;
1328 tlb_entry->addr_code = -1;
1329 }
61382a50
FB
1330}
1331
2e12669a 1332void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1333{
8a40a180 1334 int i;
9fa3e853 1335 TranslationBlock *tb;
0124311e 1336
9fa3e853 1337#if defined(DEBUG_TLB)
108c49b8 1338 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1339#endif
0124311e
FB
1340 /* must reset current TB so that interrupts cannot modify the
1341 links while we are modifying them */
1342 env->current_tb = NULL;
61382a50
FB
1343
1344 addr &= TARGET_PAGE_MASK;
1345 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1346 tlb_flush_entry(&env->tlb_table[0][i], addr);
1347 tlb_flush_entry(&env->tlb_table[1][i], addr);
0124311e 1348
b362e5e0
PB
1349 /* Discard jump cache entries for any tb which might potentially
1350 overlap the flushed page. */
1351 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1352 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1353
1354 i = tb_jmp_cache_hash_page(addr);
1355 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1356
0124311e 1357#if !defined(CONFIG_SOFTMMU)
9fa3e853 1358 if (addr < MMAP_AREA_END)
0124311e 1359 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1360#endif
0a962c02
FB
1361#ifdef USE_KQEMU
1362 if (env->kqemu_enabled) {
1363 kqemu_flush_page(env, addr);
1364 }
1365#endif
9fa3e853
FB
1366}
1367
9fa3e853
FB
1368/* update the TLBs so that writes to code in the virtual page 'addr'
1369 can be detected */
6a00d601 1370static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1371{
6a00d601
FB
1372 cpu_physical_memory_reset_dirty(ram_addr,
1373 ram_addr + TARGET_PAGE_SIZE,
1374 CODE_DIRTY_FLAG);
9fa3e853
FB
1375}
1376
9fa3e853 1377/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1378 tested for self modifying code */
1379static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1380 target_ulong vaddr)
9fa3e853 1381{
3a7d929e 1382 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1383}
1384
1385static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1386 unsigned long start, unsigned long length)
1387{
1388 unsigned long addr;
84b7b8e7
FB
1389 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1390 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1391 if ((addr - start) < length) {
84b7b8e7 1392 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1393 }
1394 }
1395}
1396
3a7d929e 1397void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1398 int dirty_flags)
1ccde1cb
FB
1399{
1400 CPUState *env;
4f2ac237 1401 unsigned long length, start1;
0a962c02
FB
1402 int i, mask, len;
1403 uint8_t *p;
1ccde1cb
FB
1404
1405 start &= TARGET_PAGE_MASK;
1406 end = TARGET_PAGE_ALIGN(end);
1407
1408 length = end - start;
1409 if (length == 0)
1410 return;
0a962c02 1411 len = length >> TARGET_PAGE_BITS;
3a7d929e 1412#ifdef USE_KQEMU
6a00d601
FB
1413 /* XXX: should not depend on cpu context */
1414 env = first_cpu;
3a7d929e 1415 if (env->kqemu_enabled) {
f23db169
FB
1416 ram_addr_t addr;
1417 addr = start;
1418 for(i = 0; i < len; i++) {
1419 kqemu_set_notdirty(env, addr);
1420 addr += TARGET_PAGE_SIZE;
1421 }
3a7d929e
FB
1422 }
1423#endif
f23db169
FB
1424 mask = ~dirty_flags;
1425 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1426 for(i = 0; i < len; i++)
1427 p[i] &= mask;
1428
1ccde1cb
FB
1429 /* we modify the TLB cache so that the dirty bit will be set again
1430 when accessing the range */
59817ccb 1431 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1432 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1433 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1434 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1435 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1436 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6a00d601 1437 }
59817ccb
FB
1438
1439#if !defined(CONFIG_SOFTMMU)
1440 /* XXX: this is expensive */
1441 {
1442 VirtPageDesc *p;
1443 int j;
1444 target_ulong addr;
1445
1446 for(i = 0; i < L1_SIZE; i++) {
1447 p = l1_virt_map[i];
1448 if (p) {
1449 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1450 for(j = 0; j < L2_SIZE; j++) {
1451 if (p->valid_tag == virt_valid_tag &&
1452 p->phys_addr >= start && p->phys_addr < end &&
1453 (p->prot & PROT_WRITE)) {
1454 if (addr < MMAP_AREA_END) {
1455 mprotect((void *)addr, TARGET_PAGE_SIZE,
1456 p->prot & ~PROT_WRITE);
1457 }
1458 }
1459 addr += TARGET_PAGE_SIZE;
1460 p++;
1461 }
1462 }
1463 }
1464 }
1465#endif
1ccde1cb
FB
1466}
1467
3a7d929e
FB
1468static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1469{
1470 ram_addr_t ram_addr;
1471
84b7b8e7
FB
1472 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1473 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1474 tlb_entry->addend - (unsigned long)phys_ram_base;
1475 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1476 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1477 }
1478 }
1479}
1480
1481/* update the TLB according to the current state of the dirty bits */
1482void cpu_tlb_update_dirty(CPUState *env)
1483{
1484 int i;
1485 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1486 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1487 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1488 tlb_update_dirty(&env->tlb_table[1][i]);
3a7d929e
FB
1489}
1490
1ccde1cb 1491static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1492 unsigned long start)
1ccde1cb
FB
1493{
1494 unsigned long addr;
84b7b8e7
FB
1495 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1496 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1497 if (addr == start) {
84b7b8e7 1498 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1499 }
1500 }
1501}
1502
1503/* update the TLB corresponding to virtual page vaddr and phys addr
1504 addr so that it is no longer dirty */
6a00d601
FB
1505static inline void tlb_set_dirty(CPUState *env,
1506 unsigned long addr, target_ulong vaddr)
1ccde1cb 1507{
1ccde1cb
FB
1508 int i;
1509
1ccde1cb
FB
1510 addr &= TARGET_PAGE_MASK;
1511 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1512 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1513 tlb_set_dirty1(&env->tlb_table[1][i], addr);
9fa3e853
FB
1514}
1515
59817ccb
FB
1516/* add a new TLB entry. At most one entry for a given virtual address
1517 is permitted. Return 0 if OK or 2 if the page could not be mapped
1518 (can only happen in non SOFTMMU mode for I/O pages or pages
1519 conflicting with the host address space). */
84b7b8e7
FB
1520int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1521 target_phys_addr_t paddr, int prot,
1522 int is_user, int is_softmmu)
9fa3e853 1523{
92e873b9 1524 PhysPageDesc *p;
4f2ac237 1525 unsigned long pd;
9fa3e853 1526 unsigned int index;
4f2ac237 1527 target_ulong address;
108c49b8 1528 target_phys_addr_t addend;
9fa3e853 1529 int ret;
84b7b8e7 1530 CPUTLBEntry *te;
6658ffb8 1531 int i;
9fa3e853 1532
92e873b9 1533 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1534 if (!p) {
1535 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1536 } else {
1537 pd = p->phys_offset;
9fa3e853
FB
1538 }
1539#if defined(DEBUG_TLB)
3a7d929e 1540 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
84b7b8e7 1541 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1542#endif
1543
1544 ret = 0;
1545#if !defined(CONFIG_SOFTMMU)
1546 if (is_softmmu)
1547#endif
1548 {
2a4188a3 1549 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1550 /* IO memory case */
1551 address = vaddr | pd;
1552 addend = paddr;
1553 } else {
1554 /* standard memory */
1555 address = vaddr;
1556 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1557 }
6658ffb8
PB
1558
1559 /* Make accesses to pages with watchpoints go via the
1560 watchpoint trap routines. */
1561 for (i = 0; i < env->nb_watchpoints; i++) {
1562 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1563 if (address & ~TARGET_PAGE_MASK) {
1564 env->watchpoint[i].is_ram = 0;
1565 address = vaddr | io_mem_watch;
1566 } else {
1567 env->watchpoint[i].is_ram = 1;
1568 /* TODO: Figure out how to make read watchpoints coexist
1569 with code. */
1570 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1571 }
1572 }
1573 }
9fa3e853 1574
90f18422 1575 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1576 addend -= vaddr;
84b7b8e7
FB
1577 te = &env->tlb_table[is_user][index];
1578 te->addend = addend;
67b915a5 1579 if (prot & PAGE_READ) {
84b7b8e7
FB
1580 te->addr_read = address;
1581 } else {
1582 te->addr_read = -1;
1583 }
1584 if (prot & PAGE_EXEC) {
1585 te->addr_code = address;
9fa3e853 1586 } else {
84b7b8e7 1587 te->addr_code = -1;
9fa3e853 1588 }
67b915a5 1589 if (prot & PAGE_WRITE) {
856074ec
FB
1590 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1591 (pd & IO_MEM_ROMD)) {
1592 /* write access calls the I/O callback */
1593 te->addr_write = vaddr |
1594 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
3a7d929e 1595 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1596 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1597 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1598 } else {
84b7b8e7 1599 te->addr_write = address;
9fa3e853
FB
1600 }
1601 } else {
84b7b8e7 1602 te->addr_write = -1;
9fa3e853
FB
1603 }
1604 }
1605#if !defined(CONFIG_SOFTMMU)
1606 else {
1607 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1608 /* IO access: no mapping is done as it will be handled by the
1609 soft MMU */
1610 if (!(env->hflags & HF_SOFTMMU_MASK))
1611 ret = 2;
1612 } else {
1613 void *map_addr;
59817ccb
FB
1614
1615 if (vaddr >= MMAP_AREA_END) {
1616 ret = 2;
1617 } else {
1618 if (prot & PROT_WRITE) {
1619 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1620#if defined(TARGET_HAS_SMC) || 1
59817ccb 1621 first_tb ||
d720b93d 1622#endif
59817ccb
FB
1623 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1624 !cpu_physical_memory_is_dirty(pd))) {
1625 /* ROM: we do as if code was inside */
1626 /* if code is present, we only map as read only and save the
1627 original mapping */
1628 VirtPageDesc *vp;
1629
90f18422 1630 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1631 vp->phys_addr = pd;
1632 vp->prot = prot;
1633 vp->valid_tag = virt_valid_tag;
1634 prot &= ~PAGE_WRITE;
1635 }
1636 }
1637 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1638 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1639 if (map_addr == MAP_FAILED) {
1640 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1641 paddr, vaddr);
9fa3e853 1642 }
9fa3e853
FB
1643 }
1644 }
1645 }
1646#endif
1647 return ret;
1648}
1649
1650/* called from signal handler: invalidate the code and unprotect the
1651 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1652int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1653{
1654#if !defined(CONFIG_SOFTMMU)
1655 VirtPageDesc *vp;
1656
1657#if defined(DEBUG_TLB)
1658 printf("page_unprotect: addr=0x%08x\n", addr);
1659#endif
1660 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1661
1662 /* if it is not mapped, no need to worry here */
1663 if (addr >= MMAP_AREA_END)
1664 return 0;
9fa3e853
FB
1665 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1666 if (!vp)
1667 return 0;
1668 /* NOTE: in this case, validate_tag is _not_ tested as it
1669 validates only the code TLB */
1670 if (vp->valid_tag != virt_valid_tag)
1671 return 0;
1672 if (!(vp->prot & PAGE_WRITE))
1673 return 0;
1674#if defined(DEBUG_TLB)
1675 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1676 addr, vp->phys_addr, vp->prot);
1677#endif
59817ccb
FB
1678 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1679 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1680 (unsigned long)addr, vp->prot);
d720b93d 1681 /* set the dirty bit */
0a962c02 1682 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1683 /* flush the code inside */
1684 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1685 return 1;
1686#else
1687 return 0;
1688#endif
33417e70
FB
1689}
1690
0124311e
FB
1691#else
1692
ee8b7021 1693void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1694{
1695}
1696
2e12669a 1697void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1698{
1699}
1700
84b7b8e7
FB
1701int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1702 target_phys_addr_t paddr, int prot,
1703 int is_user, int is_softmmu)
9fa3e853
FB
1704{
1705 return 0;
1706}
0124311e 1707
9fa3e853
FB
1708/* dump memory mappings */
1709void page_dump(FILE *f)
33417e70 1710{
9fa3e853
FB
1711 unsigned long start, end;
1712 int i, j, prot, prot1;
1713 PageDesc *p;
33417e70 1714
9fa3e853
FB
1715 fprintf(f, "%-8s %-8s %-8s %s\n",
1716 "start", "end", "size", "prot");
1717 start = -1;
1718 end = -1;
1719 prot = 0;
1720 for(i = 0; i <= L1_SIZE; i++) {
1721 if (i < L1_SIZE)
1722 p = l1_map[i];
1723 else
1724 p = NULL;
1725 for(j = 0;j < L2_SIZE; j++) {
1726 if (!p)
1727 prot1 = 0;
1728 else
1729 prot1 = p[j].flags;
1730 if (prot1 != prot) {
1731 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1732 if (start != -1) {
1733 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1734 start, end, end - start,
1735 prot & PAGE_READ ? 'r' : '-',
1736 prot & PAGE_WRITE ? 'w' : '-',
1737 prot & PAGE_EXEC ? 'x' : '-');
1738 }
1739 if (prot1 != 0)
1740 start = end;
1741 else
1742 start = -1;
1743 prot = prot1;
1744 }
1745 if (!p)
1746 break;
1747 }
33417e70 1748 }
33417e70
FB
1749}
1750
53a5960a 1751int page_get_flags(target_ulong address)
33417e70 1752{
9fa3e853
FB
1753 PageDesc *p;
1754
1755 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1756 if (!p)
9fa3e853
FB
1757 return 0;
1758 return p->flags;
1759}
1760
1761/* modify the flags of a page and invalidate the code if
1762 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1763 depending on PAGE_WRITE */
53a5960a 1764void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1765{
1766 PageDesc *p;
53a5960a 1767 target_ulong addr;
9fa3e853
FB
1768
1769 start = start & TARGET_PAGE_MASK;
1770 end = TARGET_PAGE_ALIGN(end);
1771 if (flags & PAGE_WRITE)
1772 flags |= PAGE_WRITE_ORG;
1773 spin_lock(&tb_lock);
1774 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1775 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1776 /* if the write protection is set, then we invalidate the code
1777 inside */
1778 if (!(p->flags & PAGE_WRITE) &&
1779 (flags & PAGE_WRITE) &&
1780 p->first_tb) {
d720b93d 1781 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1782 }
1783 p->flags = flags;
1784 }
1785 spin_unlock(&tb_lock);
33417e70
FB
1786}
1787
9fa3e853
FB
1788/* called from signal handler: invalidate the code and unprotect the
1789 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1790int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1791{
1792 unsigned int page_index, prot, pindex;
1793 PageDesc *p, *p1;
53a5960a 1794 target_ulong host_start, host_end, addr;
9fa3e853 1795
83fb7adf 1796 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1797 page_index = host_start >> TARGET_PAGE_BITS;
1798 p1 = page_find(page_index);
1799 if (!p1)
1800 return 0;
83fb7adf 1801 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1802 p = p1;
1803 prot = 0;
1804 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1805 prot |= p->flags;
1806 p++;
1807 }
1808 /* if the page was really writable, then we change its
1809 protection back to writable */
1810 if (prot & PAGE_WRITE_ORG) {
1811 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1812 if (!(p1[pindex].flags & PAGE_WRITE)) {
53a5960a 1813 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1814 (prot & PAGE_BITS) | PAGE_WRITE);
1815 p1[pindex].flags |= PAGE_WRITE;
1816 /* and since the content will be modified, we must invalidate
1817 the corresponding translated code. */
d720b93d 1818 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1819#ifdef DEBUG_TB_CHECK
1820 tb_invalidate_check(address);
1821#endif
1822 return 1;
1823 }
1824 }
1825 return 0;
1826}
1827
1828/* call this function when system calls directly modify a memory area */
53a5960a
PB
1829/* ??? This should be redundant now we have lock_user. */
1830void page_unprotect_range(target_ulong data, target_ulong data_size)
9fa3e853 1831{
53a5960a 1832 target_ulong start, end, addr;
9fa3e853 1833
53a5960a 1834 start = data;
9fa3e853
FB
1835 end = start + data_size;
1836 start &= TARGET_PAGE_MASK;
1837 end = TARGET_PAGE_ALIGN(end);
1838 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1839 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1840 }
1841}
1842
6a00d601
FB
1843static inline void tlb_set_dirty(CPUState *env,
1844 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1845{
1846}
9fa3e853
FB
1847#endif /* defined(CONFIG_USER_ONLY) */
1848
33417e70
FB
1849/* register physical memory. 'size' must be a multiple of the target
1850 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1851 io memory page */
2e12669a
FB
1852void cpu_register_physical_memory(target_phys_addr_t start_addr,
1853 unsigned long size,
1854 unsigned long phys_offset)
33417e70 1855{
108c49b8 1856 target_phys_addr_t addr, end_addr;
92e873b9 1857 PhysPageDesc *p;
9d42037b 1858 CPUState *env;
33417e70 1859
5fd386f6 1860 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1861 end_addr = start_addr + size;
5fd386f6 1862 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
108c49b8 1863 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1864 p->phys_offset = phys_offset;
2a4188a3
FB
1865 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1866 (phys_offset & IO_MEM_ROMD))
33417e70
FB
1867 phys_offset += TARGET_PAGE_SIZE;
1868 }
9d42037b
FB
1869
1870 /* since each CPU stores ram addresses in its TLB cache, we must
1871 reset the modified entries */
1872 /* XXX: slow ! */
1873 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1874 tlb_flush(env, 1);
1875 }
33417e70
FB
1876}
1877
ba863458
FB
1878/* XXX: temporary until new memory mapping API */
1879uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1880{
1881 PhysPageDesc *p;
1882
1883 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1884 if (!p)
1885 return IO_MEM_UNASSIGNED;
1886 return p->phys_offset;
1887}
1888
e9a1ab19
FB
1889/* XXX: better than nothing */
1890ram_addr_t qemu_ram_alloc(unsigned int size)
1891{
1892 ram_addr_t addr;
1893 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1894 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
1895 size, phys_ram_size);
1896 abort();
1897 }
1898 addr = phys_ram_alloc_offset;
1899 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1900 return addr;
1901}
1902
1903void qemu_ram_free(ram_addr_t addr)
1904{
1905}
1906
a4193c8a 1907static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 1908{
67d3b957
PB
1909#ifdef DEBUG_UNASSIGNED
1910 printf("Unassigned mem read 0x%08x\n", (int)addr);
1911#endif
33417e70
FB
1912 return 0;
1913}
1914
a4193c8a 1915static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 1916{
67d3b957
PB
1917#ifdef DEBUG_UNASSIGNED
1918 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1919#endif
33417e70
FB
1920}
1921
1922static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1923 unassigned_mem_readb,
1924 unassigned_mem_readb,
1925 unassigned_mem_readb,
1926};
1927
1928static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1929 unassigned_mem_writeb,
1930 unassigned_mem_writeb,
1931 unassigned_mem_writeb,
1932};
1933
3a7d929e 1934static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1935{
3a7d929e
FB
1936 unsigned long ram_addr;
1937 int dirty_flags;
1938 ram_addr = addr - (unsigned long)phys_ram_base;
1939 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1940 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1941#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1942 tb_invalidate_phys_page_fast(ram_addr, 1);
1943 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1944#endif
3a7d929e 1945 }
c27004ec 1946 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
1947#ifdef USE_KQEMU
1948 if (cpu_single_env->kqemu_enabled &&
1949 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1950 kqemu_modify_page(cpu_single_env, ram_addr);
1951#endif
f23db169
FB
1952 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1953 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1954 /* we remove the notdirty callback only if the code has been
1955 flushed */
1956 if (dirty_flags == 0xff)
6a00d601 1957 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1958}
1959
3a7d929e 1960static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1961{
3a7d929e
FB
1962 unsigned long ram_addr;
1963 int dirty_flags;
1964 ram_addr = addr - (unsigned long)phys_ram_base;
1965 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1966 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1967#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1968 tb_invalidate_phys_page_fast(ram_addr, 2);
1969 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1970#endif
3a7d929e 1971 }
c27004ec 1972 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
1973#ifdef USE_KQEMU
1974 if (cpu_single_env->kqemu_enabled &&
1975 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1976 kqemu_modify_page(cpu_single_env, ram_addr);
1977#endif
f23db169
FB
1978 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1979 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1980 /* we remove the notdirty callback only if the code has been
1981 flushed */
1982 if (dirty_flags == 0xff)
6a00d601 1983 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1984}
1985
3a7d929e 1986static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1987{
3a7d929e
FB
1988 unsigned long ram_addr;
1989 int dirty_flags;
1990 ram_addr = addr - (unsigned long)phys_ram_base;
1991 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1992 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1993#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1994 tb_invalidate_phys_page_fast(ram_addr, 4);
1995 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1996#endif
3a7d929e 1997 }
c27004ec 1998 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
1999#ifdef USE_KQEMU
2000 if (cpu_single_env->kqemu_enabled &&
2001 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2002 kqemu_modify_page(cpu_single_env, ram_addr);
2003#endif
f23db169
FB
2004 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2005 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2006 /* we remove the notdirty callback only if the code has been
2007 flushed */
2008 if (dirty_flags == 0xff)
6a00d601 2009 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2010}
2011
3a7d929e 2012static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2013 NULL, /* never used */
2014 NULL, /* never used */
2015 NULL, /* never used */
2016};
2017
1ccde1cb
FB
2018static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2019 notdirty_mem_writeb,
2020 notdirty_mem_writew,
2021 notdirty_mem_writel,
2022};
2023
6658ffb8
PB
2024#if defined(CONFIG_SOFTMMU)
2025/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2026 so these check for a hit then pass through to the normal out-of-line
2027 phys routines. */
2028static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2029{
2030 return ldub_phys(addr);
2031}
2032
2033static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2034{
2035 return lduw_phys(addr);
2036}
2037
2038static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2039{
2040 return ldl_phys(addr);
2041}
2042
2043/* Generate a debug exception if a watchpoint has been hit.
2044 Returns the real physical address of the access. addr will be a host
2045 address in the is_ram case. */
2046static target_ulong check_watchpoint(target_phys_addr_t addr)
2047{
2048 CPUState *env = cpu_single_env;
2049 target_ulong watch;
2050 target_ulong retaddr;
2051 int i;
2052
2053 retaddr = addr;
2054 for (i = 0; i < env->nb_watchpoints; i++) {
2055 watch = env->watchpoint[i].vaddr;
2056 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2057 if (env->watchpoint[i].is_ram)
2058 retaddr = addr - (unsigned long)phys_ram_base;
2059 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2060 cpu_single_env->watchpoint_hit = i + 1;
2061 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2062 break;
2063 }
2064 }
2065 }
2066 return retaddr;
2067}
2068
2069static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2070 uint32_t val)
2071{
2072 addr = check_watchpoint(addr);
2073 stb_phys(addr, val);
2074}
2075
2076static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2077 uint32_t val)
2078{
2079 addr = check_watchpoint(addr);
2080 stw_phys(addr, val);
2081}
2082
2083static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2084 uint32_t val)
2085{
2086 addr = check_watchpoint(addr);
2087 stl_phys(addr, val);
2088}
2089
2090static CPUReadMemoryFunc *watch_mem_read[3] = {
2091 watch_mem_readb,
2092 watch_mem_readw,
2093 watch_mem_readl,
2094};
2095
2096static CPUWriteMemoryFunc *watch_mem_write[3] = {
2097 watch_mem_writeb,
2098 watch_mem_writew,
2099 watch_mem_writel,
2100};
2101#endif
2102
33417e70
FB
2103static void io_mem_init(void)
2104{
3a7d929e 2105 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2106 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2107 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2108 io_mem_nb = 5;
2109
6658ffb8
PB
2110#if defined(CONFIG_SOFTMMU)
2111 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2112 watch_mem_write, NULL);
2113#endif
1ccde1cb 2114 /* alloc dirty bits array */
0a962c02 2115 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2116 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2117}
2118
2119/* mem_read and mem_write are arrays of functions containing the
2120 function to access byte (index 0), word (index 1) and dword (index
2121 2). All functions must be supplied. If io_index is non zero, the
2122 corresponding io zone is modified. If it is zero, a new io zone is
2123 allocated. The return value can be used with
2124 cpu_register_physical_memory(). (-1) is returned if error. */
2125int cpu_register_io_memory(int io_index,
2126 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2127 CPUWriteMemoryFunc **mem_write,
2128 void *opaque)
33417e70
FB
2129{
2130 int i;
2131
2132 if (io_index <= 0) {
b5ff1b31 2133 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2134 return -1;
2135 io_index = io_mem_nb++;
2136 } else {
2137 if (io_index >= IO_MEM_NB_ENTRIES)
2138 return -1;
2139 }
b5ff1b31 2140
33417e70
FB
2141 for(i = 0;i < 3; i++) {
2142 io_mem_read[io_index][i] = mem_read[i];
2143 io_mem_write[io_index][i] = mem_write[i];
2144 }
a4193c8a 2145 io_mem_opaque[io_index] = opaque;
33417e70
FB
2146 return io_index << IO_MEM_SHIFT;
2147}
61382a50 2148
8926b517
FB
2149CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2150{
2151 return io_mem_write[io_index >> IO_MEM_SHIFT];
2152}
2153
2154CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2155{
2156 return io_mem_read[io_index >> IO_MEM_SHIFT];
2157}
2158
13eb76e0
FB
2159/* physical memory access (slow version, mainly for debug) */
2160#if defined(CONFIG_USER_ONLY)
2e12669a 2161void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2162 int len, int is_write)
2163{
2164 int l, flags;
2165 target_ulong page;
53a5960a 2166 void * p;
13eb76e0
FB
2167
2168 while (len > 0) {
2169 page = addr & TARGET_PAGE_MASK;
2170 l = (page + TARGET_PAGE_SIZE) - addr;
2171 if (l > len)
2172 l = len;
2173 flags = page_get_flags(page);
2174 if (!(flags & PAGE_VALID))
2175 return;
2176 if (is_write) {
2177 if (!(flags & PAGE_WRITE))
2178 return;
53a5960a
PB
2179 p = lock_user(addr, len, 0);
2180 memcpy(p, buf, len);
2181 unlock_user(p, addr, len);
13eb76e0
FB
2182 } else {
2183 if (!(flags & PAGE_READ))
2184 return;
53a5960a
PB
2185 p = lock_user(addr, len, 1);
2186 memcpy(buf, p, len);
2187 unlock_user(p, addr, 0);
13eb76e0
FB
2188 }
2189 len -= l;
2190 buf += l;
2191 addr += l;
2192 }
2193}
8df1cd07 2194
13eb76e0 2195#else
2e12669a 2196void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2197 int len, int is_write)
2198{
2199 int l, io_index;
2200 uint8_t *ptr;
2201 uint32_t val;
2e12669a
FB
2202 target_phys_addr_t page;
2203 unsigned long pd;
92e873b9 2204 PhysPageDesc *p;
13eb76e0
FB
2205
2206 while (len > 0) {
2207 page = addr & TARGET_PAGE_MASK;
2208 l = (page + TARGET_PAGE_SIZE) - addr;
2209 if (l > len)
2210 l = len;
92e873b9 2211 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2212 if (!p) {
2213 pd = IO_MEM_UNASSIGNED;
2214 } else {
2215 pd = p->phys_offset;
2216 }
2217
2218 if (is_write) {
3a7d929e 2219 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2220 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2221 /* XXX: could force cpu_single_env to NULL to avoid
2222 potential bugs */
13eb76e0 2223 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2224 /* 32 bit write access */
c27004ec 2225 val = ldl_p(buf);
a4193c8a 2226 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2227 l = 4;
2228 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2229 /* 16 bit write access */
c27004ec 2230 val = lduw_p(buf);
a4193c8a 2231 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2232 l = 2;
2233 } else {
1c213d19 2234 /* 8 bit write access */
c27004ec 2235 val = ldub_p(buf);
a4193c8a 2236 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2237 l = 1;
2238 }
2239 } else {
b448f2f3
FB
2240 unsigned long addr1;
2241 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2242 /* RAM case */
b448f2f3 2243 ptr = phys_ram_base + addr1;
13eb76e0 2244 memcpy(ptr, buf, l);
3a7d929e
FB
2245 if (!cpu_physical_memory_is_dirty(addr1)) {
2246 /* invalidate code */
2247 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2248 /* set dirty bit */
f23db169
FB
2249 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2250 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2251 }
13eb76e0
FB
2252 }
2253 } else {
2a4188a3
FB
2254 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2255 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2256 /* I/O case */
2257 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2258 if (l >= 4 && ((addr & 3) == 0)) {
2259 /* 32 bit read access */
a4193c8a 2260 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2261 stl_p(buf, val);
13eb76e0
FB
2262 l = 4;
2263 } else if (l >= 2 && ((addr & 1) == 0)) {
2264 /* 16 bit read access */
a4193c8a 2265 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2266 stw_p(buf, val);
13eb76e0
FB
2267 l = 2;
2268 } else {
1c213d19 2269 /* 8 bit read access */
a4193c8a 2270 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2271 stb_p(buf, val);
13eb76e0
FB
2272 l = 1;
2273 }
2274 } else {
2275 /* RAM case */
2276 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2277 (addr & ~TARGET_PAGE_MASK);
2278 memcpy(buf, ptr, l);
2279 }
2280 }
2281 len -= l;
2282 buf += l;
2283 addr += l;
2284 }
2285}
8df1cd07 2286
d0ecd2aa
FB
2287/* used for ROM loading : can write in RAM and ROM */
2288void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2289 const uint8_t *buf, int len)
2290{
2291 int l;
2292 uint8_t *ptr;
2293 target_phys_addr_t page;
2294 unsigned long pd;
2295 PhysPageDesc *p;
2296
2297 while (len > 0) {
2298 page = addr & TARGET_PAGE_MASK;
2299 l = (page + TARGET_PAGE_SIZE) - addr;
2300 if (l > len)
2301 l = len;
2302 p = phys_page_find(page >> TARGET_PAGE_BITS);
2303 if (!p) {
2304 pd = IO_MEM_UNASSIGNED;
2305 } else {
2306 pd = p->phys_offset;
2307 }
2308
2309 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2310 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2311 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2312 /* do nothing */
2313 } else {
2314 unsigned long addr1;
2315 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2316 /* ROM/RAM case */
2317 ptr = phys_ram_base + addr1;
2318 memcpy(ptr, buf, l);
2319 }
2320 len -= l;
2321 buf += l;
2322 addr += l;
2323 }
2324}
2325
2326
8df1cd07
FB
2327/* warning: addr must be aligned */
2328uint32_t ldl_phys(target_phys_addr_t addr)
2329{
2330 int io_index;
2331 uint8_t *ptr;
2332 uint32_t val;
2333 unsigned long pd;
2334 PhysPageDesc *p;
2335
2336 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2337 if (!p) {
2338 pd = IO_MEM_UNASSIGNED;
2339 } else {
2340 pd = p->phys_offset;
2341 }
2342
2a4188a3
FB
2343 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2344 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2345 /* I/O case */
2346 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2347 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2348 } else {
2349 /* RAM case */
2350 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2351 (addr & ~TARGET_PAGE_MASK);
2352 val = ldl_p(ptr);
2353 }
2354 return val;
2355}
2356
84b7b8e7
FB
2357/* warning: addr must be aligned */
2358uint64_t ldq_phys(target_phys_addr_t addr)
2359{
2360 int io_index;
2361 uint8_t *ptr;
2362 uint64_t val;
2363 unsigned long pd;
2364 PhysPageDesc *p;
2365
2366 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2367 if (!p) {
2368 pd = IO_MEM_UNASSIGNED;
2369 } else {
2370 pd = p->phys_offset;
2371 }
2372
2a4188a3
FB
2373 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2374 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2375 /* I/O case */
2376 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2377#ifdef TARGET_WORDS_BIGENDIAN
2378 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2379 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2380#else
2381 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2382 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2383#endif
2384 } else {
2385 /* RAM case */
2386 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2387 (addr & ~TARGET_PAGE_MASK);
2388 val = ldq_p(ptr);
2389 }
2390 return val;
2391}
2392
aab33094
FB
2393/* XXX: optimize */
2394uint32_t ldub_phys(target_phys_addr_t addr)
2395{
2396 uint8_t val;
2397 cpu_physical_memory_read(addr, &val, 1);
2398 return val;
2399}
2400
2401/* XXX: optimize */
2402uint32_t lduw_phys(target_phys_addr_t addr)
2403{
2404 uint16_t val;
2405 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2406 return tswap16(val);
2407}
2408
8df1cd07
FB
2409/* warning: addr must be aligned. The ram page is not masked as dirty
2410 and the code inside is not invalidated. It is useful if the dirty
2411 bits are used to track modified PTEs */
2412void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2413{
2414 int io_index;
2415 uint8_t *ptr;
2416 unsigned long pd;
2417 PhysPageDesc *p;
2418
2419 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2420 if (!p) {
2421 pd = IO_MEM_UNASSIGNED;
2422 } else {
2423 pd = p->phys_offset;
2424 }
2425
3a7d929e 2426 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2427 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2428 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2429 } else {
2430 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2431 (addr & ~TARGET_PAGE_MASK);
2432 stl_p(ptr, val);
2433 }
2434}
2435
bc98a7ef
JM
2436void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2437{
2438 int io_index;
2439 uint8_t *ptr;
2440 unsigned long pd;
2441 PhysPageDesc *p;
2442
2443 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2444 if (!p) {
2445 pd = IO_MEM_UNASSIGNED;
2446 } else {
2447 pd = p->phys_offset;
2448 }
2449
2450 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2451 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2452#ifdef TARGET_WORDS_BIGENDIAN
2453 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2454 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2455#else
2456 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2457 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2458#endif
2459 } else {
2460 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2461 (addr & ~TARGET_PAGE_MASK);
2462 stq_p(ptr, val);
2463 }
2464}
2465
8df1cd07 2466/* warning: addr must be aligned */
8df1cd07
FB
2467void stl_phys(target_phys_addr_t addr, uint32_t val)
2468{
2469 int io_index;
2470 uint8_t *ptr;
2471 unsigned long pd;
2472 PhysPageDesc *p;
2473
2474 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2475 if (!p) {
2476 pd = IO_MEM_UNASSIGNED;
2477 } else {
2478 pd = p->phys_offset;
2479 }
2480
3a7d929e 2481 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2482 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2483 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2484 } else {
2485 unsigned long addr1;
2486 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2487 /* RAM case */
2488 ptr = phys_ram_base + addr1;
2489 stl_p(ptr, val);
3a7d929e
FB
2490 if (!cpu_physical_memory_is_dirty(addr1)) {
2491 /* invalidate code */
2492 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2493 /* set dirty bit */
f23db169
FB
2494 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2495 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2496 }
8df1cd07
FB
2497 }
2498}
2499
aab33094
FB
2500/* XXX: optimize */
2501void stb_phys(target_phys_addr_t addr, uint32_t val)
2502{
2503 uint8_t v = val;
2504 cpu_physical_memory_write(addr, &v, 1);
2505}
2506
2507/* XXX: optimize */
2508void stw_phys(target_phys_addr_t addr, uint32_t val)
2509{
2510 uint16_t v = tswap16(val);
2511 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2512}
2513
2514/* XXX: optimize */
2515void stq_phys(target_phys_addr_t addr, uint64_t val)
2516{
2517 val = tswap64(val);
2518 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2519}
2520
13eb76e0
FB
2521#endif
2522
2523/* virtual memory access for debug */
b448f2f3
FB
2524int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2525 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2526{
2527 int l;
2528 target_ulong page, phys_addr;
2529
2530 while (len > 0) {
2531 page = addr & TARGET_PAGE_MASK;
2532 phys_addr = cpu_get_phys_page_debug(env, page);
2533 /* if no physical page mapped, return an error */
2534 if (phys_addr == -1)
2535 return -1;
2536 l = (page + TARGET_PAGE_SIZE) - addr;
2537 if (l > len)
2538 l = len;
b448f2f3
FB
2539 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2540 buf, l, is_write);
13eb76e0
FB
2541 len -= l;
2542 buf += l;
2543 addr += l;
2544 }
2545 return 0;
2546}
2547
e3db7226
FB
2548void dump_exec_info(FILE *f,
2549 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2550{
2551 int i, target_code_size, max_target_code_size;
2552 int direct_jmp_count, direct_jmp2_count, cross_page;
2553 TranslationBlock *tb;
2554
2555 target_code_size = 0;
2556 max_target_code_size = 0;
2557 cross_page = 0;
2558 direct_jmp_count = 0;
2559 direct_jmp2_count = 0;
2560 for(i = 0; i < nb_tbs; i++) {
2561 tb = &tbs[i];
2562 target_code_size += tb->size;
2563 if (tb->size > max_target_code_size)
2564 max_target_code_size = tb->size;
2565 if (tb->page_addr[1] != -1)
2566 cross_page++;
2567 if (tb->tb_next_offset[0] != 0xffff) {
2568 direct_jmp_count++;
2569 if (tb->tb_next_offset[1] != 0xffff) {
2570 direct_jmp2_count++;
2571 }
2572 }
2573 }
2574 /* XXX: avoid using doubles ? */
2575 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2576 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2577 nb_tbs ? target_code_size / nb_tbs : 0,
2578 max_target_code_size);
2579 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2580 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2581 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2582 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2583 cross_page,
2584 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2585 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2586 direct_jmp_count,
2587 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2588 direct_jmp2_count,
2589 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2590 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2591 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2592 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2593}
2594
61382a50
FB
2595#if !defined(CONFIG_USER_ONLY)
2596
2597#define MMUSUFFIX _cmmu
2598#define GETPC() NULL
2599#define env cpu_single_env
b769d8fe 2600#define SOFTMMU_CODE_ACCESS
61382a50
FB
2601
2602#define SHIFT 0
2603#include "softmmu_template.h"
2604
2605#define SHIFT 1
2606#include "softmmu_template.h"
2607
2608#define SHIFT 2
2609#include "softmmu_template.h"
2610
2611#define SHIFT 3
2612#include "softmmu_template.h"
2613
2614#undef env
2615
2616#endif