]> git.proxmox.com Git - qemu.git/blame - exec.c
Change -tftp option to take a root directory, by Anthony Liguori.
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
53a5960a
PB
37#if defined(CONFIG_USER_ONLY)
38#include <qemu.h>
39#endif
54936004 40
fd6ce8f6 41//#define DEBUG_TB_INVALIDATE
66e85a21 42//#define DEBUG_FLUSH
9fa3e853 43//#define DEBUG_TLB
67d3b957 44//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
45
46/* make various TB consistency checks */
47//#define DEBUG_TB_CHECK
98857888 48//#define DEBUG_TLB_CHECK
fd6ce8f6 49
99773bd4
PB
50#if !defined(CONFIG_USER_ONLY)
51/* TB consistency checks only implemented for usermode emulation. */
52#undef DEBUG_TB_CHECK
53#endif
54
fd6ce8f6
FB
55/* threshold to flush the translated code buffer */
56#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
57
9fa3e853
FB
58#define SMC_BITMAP_USE_THRESHOLD 10
59
60#define MMAP_AREA_START 0x00000000
61#define MMAP_AREA_END 0xa8000000
fd6ce8f6 62
108c49b8
FB
63#if defined(TARGET_SPARC64)
64#define TARGET_PHYS_ADDR_SPACE_BITS 41
65#elif defined(TARGET_PPC64)
66#define TARGET_PHYS_ADDR_SPACE_BITS 42
67#else
68/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
69#define TARGET_PHYS_ADDR_SPACE_BITS 32
70#endif
71
fd6ce8f6 72TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 73TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 74int nb_tbs;
eb51d102
FB
75/* any access to the tbs or the page table must use this lock */
76spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 77
b8076a74 78uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
79uint8_t *code_gen_ptr;
80
9fa3e853
FB
81int phys_ram_size;
82int phys_ram_fd;
83uint8_t *phys_ram_base;
1ccde1cb 84uint8_t *phys_ram_dirty;
e9a1ab19 85static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 86
6a00d601
FB
87CPUState *first_cpu;
88/* current CPU in the current thread. It is only valid inside
89 cpu_exec() */
90CPUState *cpu_single_env;
91
54936004 92typedef struct PageDesc {
92e873b9 93 /* list of TBs intersecting this ram page */
fd6ce8f6 94 TranslationBlock *first_tb;
9fa3e853
FB
95 /* in order to optimize self modifying code, we count the number
96 of lookups we do to a given page to use a bitmap */
97 unsigned int code_write_count;
98 uint8_t *code_bitmap;
99#if defined(CONFIG_USER_ONLY)
100 unsigned long flags;
101#endif
54936004
FB
102} PageDesc;
103
92e873b9
FB
104typedef struct PhysPageDesc {
105 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 106 uint32_t phys_offset;
92e873b9
FB
107} PhysPageDesc;
108
54936004
FB
109#define L2_BITS 10
110#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
111
112#define L1_SIZE (1 << L1_BITS)
113#define L2_SIZE (1 << L2_BITS)
114
33417e70 115static void io_mem_init(void);
fd6ce8f6 116
83fb7adf
FB
117unsigned long qemu_real_host_page_size;
118unsigned long qemu_host_page_bits;
119unsigned long qemu_host_page_size;
120unsigned long qemu_host_page_mask;
54936004 121
92e873b9 122/* XXX: for system emulation, it could just be an array */
54936004 123static PageDesc *l1_map[L1_SIZE];
0a962c02 124PhysPageDesc **l1_phys_map;
54936004 125
33417e70 126/* io memory support */
33417e70
FB
127CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
128CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 129void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
130static int io_mem_nb;
131
34865134
FB
132/* log support */
133char *logfilename = "/tmp/qemu.log";
134FILE *logfile;
135int loglevel;
136
e3db7226
FB
137/* statistics */
138static int tlb_flush_count;
139static int tb_flush_count;
140static int tb_phys_invalidate_count;
141
b346ff46 142static void page_init(void)
54936004 143{
83fb7adf 144 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 145 TARGET_PAGE_SIZE */
67b915a5 146#ifdef _WIN32
d5a8f07c
FB
147 {
148 SYSTEM_INFO system_info;
149 DWORD old_protect;
150
151 GetSystemInfo(&system_info);
152 qemu_real_host_page_size = system_info.dwPageSize;
153
154 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
155 PAGE_EXECUTE_READWRITE, &old_protect);
156 }
67b915a5 157#else
83fb7adf 158 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
159 {
160 unsigned long start, end;
161
162 start = (unsigned long)code_gen_buffer;
163 start &= ~(qemu_real_host_page_size - 1);
164
165 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
166 end += qemu_real_host_page_size - 1;
167 end &= ~(qemu_real_host_page_size - 1);
168
169 mprotect((void *)start, end - start,
170 PROT_READ | PROT_WRITE | PROT_EXEC);
171 }
67b915a5 172#endif
d5a8f07c 173
83fb7adf
FB
174 if (qemu_host_page_size == 0)
175 qemu_host_page_size = qemu_real_host_page_size;
176 if (qemu_host_page_size < TARGET_PAGE_SIZE)
177 qemu_host_page_size = TARGET_PAGE_SIZE;
178 qemu_host_page_bits = 0;
179 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
180 qemu_host_page_bits++;
181 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
182 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
183 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
184}
185
fd6ce8f6 186static inline PageDesc *page_find_alloc(unsigned int index)
54936004 187{
54936004
FB
188 PageDesc **lp, *p;
189
54936004
FB
190 lp = &l1_map[index >> L2_BITS];
191 p = *lp;
192 if (!p) {
193 /* allocate if not found */
59817ccb 194 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 195 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
196 *lp = p;
197 }
198 return p + (index & (L2_SIZE - 1));
199}
200
fd6ce8f6 201static inline PageDesc *page_find(unsigned int index)
54936004 202{
54936004
FB
203 PageDesc *p;
204
54936004
FB
205 p = l1_map[index >> L2_BITS];
206 if (!p)
207 return 0;
fd6ce8f6
FB
208 return p + (index & (L2_SIZE - 1));
209}
210
108c49b8 211static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 212{
108c49b8 213 void **lp, **p;
e3f4e2a4 214 PhysPageDesc *pd;
92e873b9 215
108c49b8
FB
216 p = (void **)l1_phys_map;
217#if TARGET_PHYS_ADDR_SPACE_BITS > 32
218
219#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
220#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
221#endif
222 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
223 p = *lp;
224 if (!p) {
225 /* allocate if not found */
108c49b8
FB
226 if (!alloc)
227 return NULL;
228 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
229 memset(p, 0, sizeof(void *) * L1_SIZE);
230 *lp = p;
231 }
232#endif
233 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
234 pd = *lp;
235 if (!pd) {
236 int i;
108c49b8
FB
237 /* allocate if not found */
238 if (!alloc)
239 return NULL;
e3f4e2a4
PB
240 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
241 *lp = pd;
242 for (i = 0; i < L2_SIZE; i++)
243 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 244 }
e3f4e2a4 245 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
246}
247
108c49b8 248static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 249{
108c49b8 250 return phys_page_find_alloc(index, 0);
92e873b9
FB
251}
252
9fa3e853 253#if !defined(CONFIG_USER_ONLY)
6a00d601 254static void tlb_protect_code(ram_addr_t ram_addr);
3a7d929e
FB
255static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
256 target_ulong vaddr);
9fa3e853 257#endif
fd6ce8f6 258
6a00d601 259void cpu_exec_init(CPUState *env)
fd6ce8f6 260{
6a00d601
FB
261 CPUState **penv;
262 int cpu_index;
263
fd6ce8f6
FB
264 if (!code_gen_ptr) {
265 code_gen_ptr = code_gen_buffer;
b346ff46 266 page_init();
33417e70 267 io_mem_init();
fd6ce8f6 268 }
6a00d601
FB
269 env->next_cpu = NULL;
270 penv = &first_cpu;
271 cpu_index = 0;
272 while (*penv != NULL) {
273 penv = (CPUState **)&(*penv)->next_cpu;
274 cpu_index++;
275 }
276 env->cpu_index = cpu_index;
277 *penv = env;
fd6ce8f6
FB
278}
279
9fa3e853
FB
280static inline void invalidate_page_bitmap(PageDesc *p)
281{
282 if (p->code_bitmap) {
59817ccb 283 qemu_free(p->code_bitmap);
9fa3e853
FB
284 p->code_bitmap = NULL;
285 }
286 p->code_write_count = 0;
287}
288
fd6ce8f6
FB
289/* set to NULL all the 'first_tb' fields in all PageDescs */
290static void page_flush_tb(void)
291{
292 int i, j;
293 PageDesc *p;
294
295 for(i = 0; i < L1_SIZE; i++) {
296 p = l1_map[i];
297 if (p) {
9fa3e853
FB
298 for(j = 0; j < L2_SIZE; j++) {
299 p->first_tb = NULL;
300 invalidate_page_bitmap(p);
301 p++;
302 }
fd6ce8f6
FB
303 }
304 }
305}
306
307/* flush all the translation blocks */
d4e8164f 308/* XXX: tb_flush is currently not thread safe */
6a00d601 309void tb_flush(CPUState *env1)
fd6ce8f6 310{
6a00d601 311 CPUState *env;
0124311e 312#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
313 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
314 code_gen_ptr - code_gen_buffer,
315 nb_tbs,
0124311e 316 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
317#endif
318 nb_tbs = 0;
6a00d601
FB
319
320 for(env = first_cpu; env != NULL; env = env->next_cpu) {
321 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
322 }
9fa3e853 323
8a8a608f 324 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 325 page_flush_tb();
9fa3e853 326
fd6ce8f6 327 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
328 /* XXX: flush processor icache at this point if cache flush is
329 expensive */
e3db7226 330 tb_flush_count++;
fd6ce8f6
FB
331}
332
333#ifdef DEBUG_TB_CHECK
334
335static void tb_invalidate_check(unsigned long address)
336{
337 TranslationBlock *tb;
338 int i;
339 address &= TARGET_PAGE_MASK;
99773bd4
PB
340 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
341 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
342 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
343 address >= tb->pc + tb->size)) {
344 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 345 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
346 }
347 }
348 }
349}
350
351/* verify that all the pages have correct rights for code */
352static void tb_page_check(void)
353{
354 TranslationBlock *tb;
355 int i, flags1, flags2;
356
99773bd4
PB
357 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
358 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
359 flags1 = page_get_flags(tb->pc);
360 flags2 = page_get_flags(tb->pc + tb->size - 1);
361 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
362 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 363 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
364 }
365 }
366 }
367}
368
d4e8164f
FB
369void tb_jmp_check(TranslationBlock *tb)
370{
371 TranslationBlock *tb1;
372 unsigned int n1;
373
374 /* suppress any remaining jumps to this TB */
375 tb1 = tb->jmp_first;
376 for(;;) {
377 n1 = (long)tb1 & 3;
378 tb1 = (TranslationBlock *)((long)tb1 & ~3);
379 if (n1 == 2)
380 break;
381 tb1 = tb1->jmp_next[n1];
382 }
383 /* check end of list */
384 if (tb1 != tb) {
385 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
386 }
387}
388
fd6ce8f6
FB
389#endif
390
391/* invalidate one TB */
392static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
393 int next_offset)
394{
395 TranslationBlock *tb1;
396 for(;;) {
397 tb1 = *ptb;
398 if (tb1 == tb) {
399 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
400 break;
401 }
402 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
403 }
404}
405
9fa3e853
FB
406static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
407{
408 TranslationBlock *tb1;
409 unsigned int n1;
410
411 for(;;) {
412 tb1 = *ptb;
413 n1 = (long)tb1 & 3;
414 tb1 = (TranslationBlock *)((long)tb1 & ~3);
415 if (tb1 == tb) {
416 *ptb = tb1->page_next[n1];
417 break;
418 }
419 ptb = &tb1->page_next[n1];
420 }
421}
422
d4e8164f
FB
423static inline void tb_jmp_remove(TranslationBlock *tb, int n)
424{
425 TranslationBlock *tb1, **ptb;
426 unsigned int n1;
427
428 ptb = &tb->jmp_next[n];
429 tb1 = *ptb;
430 if (tb1) {
431 /* find tb(n) in circular list */
432 for(;;) {
433 tb1 = *ptb;
434 n1 = (long)tb1 & 3;
435 tb1 = (TranslationBlock *)((long)tb1 & ~3);
436 if (n1 == n && tb1 == tb)
437 break;
438 if (n1 == 2) {
439 ptb = &tb1->jmp_first;
440 } else {
441 ptb = &tb1->jmp_next[n1];
442 }
443 }
444 /* now we can suppress tb(n) from the list */
445 *ptb = tb->jmp_next[n];
446
447 tb->jmp_next[n] = NULL;
448 }
449}
450
451/* reset the jump entry 'n' of a TB so that it is not chained to
452 another TB */
453static inline void tb_reset_jump(TranslationBlock *tb, int n)
454{
455 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
456}
457
8a40a180 458static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 459{
6a00d601 460 CPUState *env;
8a40a180 461 PageDesc *p;
d4e8164f 462 unsigned int h, n1;
8a40a180
FB
463 target_ulong phys_pc;
464 TranslationBlock *tb1, *tb2;
d4e8164f 465
8a40a180
FB
466 /* remove the TB from the hash list */
467 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
468 h = tb_phys_hash_func(phys_pc);
469 tb_remove(&tb_phys_hash[h], tb,
470 offsetof(TranslationBlock, phys_hash_next));
471
472 /* remove the TB from the page list */
473 if (tb->page_addr[0] != page_addr) {
474 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
475 tb_page_remove(&p->first_tb, tb);
476 invalidate_page_bitmap(p);
477 }
478 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
479 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
480 tb_page_remove(&p->first_tb, tb);
481 invalidate_page_bitmap(p);
482 }
483
36bdbe54 484 tb_invalidated_flag = 1;
59817ccb 485
fd6ce8f6 486 /* remove the TB from the hash list */
8a40a180 487 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
488 for(env = first_cpu; env != NULL; env = env->next_cpu) {
489 if (env->tb_jmp_cache[h] == tb)
490 env->tb_jmp_cache[h] = NULL;
491 }
d4e8164f
FB
492
493 /* suppress this TB from the two jump lists */
494 tb_jmp_remove(tb, 0);
495 tb_jmp_remove(tb, 1);
496
497 /* suppress any remaining jumps to this TB */
498 tb1 = tb->jmp_first;
499 for(;;) {
500 n1 = (long)tb1 & 3;
501 if (n1 == 2)
502 break;
503 tb1 = (TranslationBlock *)((long)tb1 & ~3);
504 tb2 = tb1->jmp_next[n1];
505 tb_reset_jump(tb1, n1);
506 tb1->jmp_next[n1] = NULL;
507 tb1 = tb2;
508 }
509 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 510
e3db7226 511 tb_phys_invalidate_count++;
9fa3e853
FB
512}
513
514static inline void set_bits(uint8_t *tab, int start, int len)
515{
516 int end, mask, end1;
517
518 end = start + len;
519 tab += start >> 3;
520 mask = 0xff << (start & 7);
521 if ((start & ~7) == (end & ~7)) {
522 if (start < end) {
523 mask &= ~(0xff << (end & 7));
524 *tab |= mask;
525 }
526 } else {
527 *tab++ |= mask;
528 start = (start + 8) & ~7;
529 end1 = end & ~7;
530 while (start < end1) {
531 *tab++ = 0xff;
532 start += 8;
533 }
534 if (start < end) {
535 mask = ~(0xff << (end & 7));
536 *tab |= mask;
537 }
538 }
539}
540
541static void build_page_bitmap(PageDesc *p)
542{
543 int n, tb_start, tb_end;
544 TranslationBlock *tb;
545
59817ccb 546 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
547 if (!p->code_bitmap)
548 return;
549 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
550
551 tb = p->first_tb;
552 while (tb != NULL) {
553 n = (long)tb & 3;
554 tb = (TranslationBlock *)((long)tb & ~3);
555 /* NOTE: this is subtle as a TB may span two physical pages */
556 if (n == 0) {
557 /* NOTE: tb_end may be after the end of the page, but
558 it is not a problem */
559 tb_start = tb->pc & ~TARGET_PAGE_MASK;
560 tb_end = tb_start + tb->size;
561 if (tb_end > TARGET_PAGE_SIZE)
562 tb_end = TARGET_PAGE_SIZE;
563 } else {
564 tb_start = 0;
565 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
566 }
567 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
568 tb = tb->page_next[n];
569 }
570}
571
d720b93d
FB
572#ifdef TARGET_HAS_PRECISE_SMC
573
574static void tb_gen_code(CPUState *env,
575 target_ulong pc, target_ulong cs_base, int flags,
576 int cflags)
577{
578 TranslationBlock *tb;
579 uint8_t *tc_ptr;
580 target_ulong phys_pc, phys_page2, virt_page2;
581 int code_gen_size;
582
c27004ec
FB
583 phys_pc = get_phys_addr_code(env, pc);
584 tb = tb_alloc(pc);
d720b93d
FB
585 if (!tb) {
586 /* flush must be done */
587 tb_flush(env);
588 /* cannot fail at this point */
c27004ec 589 tb = tb_alloc(pc);
d720b93d
FB
590 }
591 tc_ptr = code_gen_ptr;
592 tb->tc_ptr = tc_ptr;
593 tb->cs_base = cs_base;
594 tb->flags = flags;
595 tb->cflags = cflags;
596 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
597 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
598
599 /* check next page if needed */
c27004ec 600 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 601 phys_page2 = -1;
c27004ec 602 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
603 phys_page2 = get_phys_addr_code(env, virt_page2);
604 }
605 tb_link_phys(tb, phys_pc, phys_page2);
606}
607#endif
608
9fa3e853
FB
609/* invalidate all TBs which intersect with the target physical page
610 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
611 the same physical page. 'is_cpu_write_access' should be true if called
612 from a real cpu write access: the virtual CPU will exit the current
613 TB if code is modified inside this TB. */
614void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
615 int is_cpu_write_access)
616{
617 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 618 CPUState *env = cpu_single_env;
9fa3e853 619 PageDesc *p;
ea1c1802 620 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 621 target_ulong tb_start, tb_end;
d720b93d 622 target_ulong current_pc, current_cs_base;
9fa3e853
FB
623
624 p = page_find(start >> TARGET_PAGE_BITS);
625 if (!p)
626 return;
627 if (!p->code_bitmap &&
d720b93d
FB
628 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
629 is_cpu_write_access) {
9fa3e853
FB
630 /* build code bitmap */
631 build_page_bitmap(p);
632 }
633
634 /* we remove all the TBs in the range [start, end[ */
635 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
636 current_tb_not_found = is_cpu_write_access;
637 current_tb_modified = 0;
638 current_tb = NULL; /* avoid warning */
639 current_pc = 0; /* avoid warning */
640 current_cs_base = 0; /* avoid warning */
641 current_flags = 0; /* avoid warning */
9fa3e853
FB
642 tb = p->first_tb;
643 while (tb != NULL) {
644 n = (long)tb & 3;
645 tb = (TranslationBlock *)((long)tb & ~3);
646 tb_next = tb->page_next[n];
647 /* NOTE: this is subtle as a TB may span two physical pages */
648 if (n == 0) {
649 /* NOTE: tb_end may be after the end of the page, but
650 it is not a problem */
651 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
652 tb_end = tb_start + tb->size;
653 } else {
654 tb_start = tb->page_addr[1];
655 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
656 }
657 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
658#ifdef TARGET_HAS_PRECISE_SMC
659 if (current_tb_not_found) {
660 current_tb_not_found = 0;
661 current_tb = NULL;
662 if (env->mem_write_pc) {
663 /* now we have a real cpu fault */
664 current_tb = tb_find_pc(env->mem_write_pc);
665 }
666 }
667 if (current_tb == tb &&
668 !(current_tb->cflags & CF_SINGLE_INSN)) {
669 /* If we are modifying the current TB, we must stop
670 its execution. We could be more precise by checking
671 that the modification is after the current PC, but it
672 would require a specialized function to partially
673 restore the CPU state */
674
675 current_tb_modified = 1;
676 cpu_restore_state(current_tb, env,
677 env->mem_write_pc, NULL);
678#if defined(TARGET_I386)
679 current_flags = env->hflags;
680 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
681 current_cs_base = (target_ulong)env->segs[R_CS].base;
682 current_pc = current_cs_base + env->eip;
683#else
684#error unsupported CPU
685#endif
686 }
687#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
688 /* we need to do that to handle the case where a signal
689 occurs while doing tb_phys_invalidate() */
690 saved_tb = NULL;
691 if (env) {
692 saved_tb = env->current_tb;
693 env->current_tb = NULL;
694 }
9fa3e853 695 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
696 if (env) {
697 env->current_tb = saved_tb;
698 if (env->interrupt_request && env->current_tb)
699 cpu_interrupt(env, env->interrupt_request);
700 }
9fa3e853
FB
701 }
702 tb = tb_next;
703 }
704#if !defined(CONFIG_USER_ONLY)
705 /* if no code remaining, no need to continue to use slow writes */
706 if (!p->first_tb) {
707 invalidate_page_bitmap(p);
d720b93d
FB
708 if (is_cpu_write_access) {
709 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
710 }
711 }
712#endif
713#ifdef TARGET_HAS_PRECISE_SMC
714 if (current_tb_modified) {
715 /* we generate a block containing just the instruction
716 modifying the memory. It will ensure that it cannot modify
717 itself */
ea1c1802 718 env->current_tb = NULL;
d720b93d
FB
719 tb_gen_code(env, current_pc, current_cs_base, current_flags,
720 CF_SINGLE_INSN);
721 cpu_resume_from_signal(env, NULL);
9fa3e853 722 }
fd6ce8f6 723#endif
9fa3e853 724}
fd6ce8f6 725
9fa3e853 726/* len must be <= 8 and start must be a multiple of len */
d720b93d 727static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
728{
729 PageDesc *p;
730 int offset, b;
59817ccb 731#if 0
a4193c8a
FB
732 if (1) {
733 if (loglevel) {
734 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
735 cpu_single_env->mem_write_vaddr, len,
736 cpu_single_env->eip,
737 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
738 }
59817ccb
FB
739 }
740#endif
9fa3e853
FB
741 p = page_find(start >> TARGET_PAGE_BITS);
742 if (!p)
743 return;
744 if (p->code_bitmap) {
745 offset = start & ~TARGET_PAGE_MASK;
746 b = p->code_bitmap[offset >> 3] >> (offset & 7);
747 if (b & ((1 << len) - 1))
748 goto do_invalidate;
749 } else {
750 do_invalidate:
d720b93d 751 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
752 }
753}
754
9fa3e853 755#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
756static void tb_invalidate_phys_page(target_ulong addr,
757 unsigned long pc, void *puc)
9fa3e853 758{
d720b93d
FB
759 int n, current_flags, current_tb_modified;
760 target_ulong current_pc, current_cs_base;
9fa3e853 761 PageDesc *p;
d720b93d
FB
762 TranslationBlock *tb, *current_tb;
763#ifdef TARGET_HAS_PRECISE_SMC
764 CPUState *env = cpu_single_env;
765#endif
9fa3e853
FB
766
767 addr &= TARGET_PAGE_MASK;
768 p = page_find(addr >> TARGET_PAGE_BITS);
769 if (!p)
770 return;
771 tb = p->first_tb;
d720b93d
FB
772 current_tb_modified = 0;
773 current_tb = NULL;
774 current_pc = 0; /* avoid warning */
775 current_cs_base = 0; /* avoid warning */
776 current_flags = 0; /* avoid warning */
777#ifdef TARGET_HAS_PRECISE_SMC
778 if (tb && pc != 0) {
779 current_tb = tb_find_pc(pc);
780 }
781#endif
9fa3e853
FB
782 while (tb != NULL) {
783 n = (long)tb & 3;
784 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
785#ifdef TARGET_HAS_PRECISE_SMC
786 if (current_tb == tb &&
787 !(current_tb->cflags & CF_SINGLE_INSN)) {
788 /* If we are modifying the current TB, we must stop
789 its execution. We could be more precise by checking
790 that the modification is after the current PC, but it
791 would require a specialized function to partially
792 restore the CPU state */
793
794 current_tb_modified = 1;
795 cpu_restore_state(current_tb, env, pc, puc);
796#if defined(TARGET_I386)
797 current_flags = env->hflags;
798 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
799 current_cs_base = (target_ulong)env->segs[R_CS].base;
800 current_pc = current_cs_base + env->eip;
801#else
802#error unsupported CPU
803#endif
804 }
805#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
806 tb_phys_invalidate(tb, addr);
807 tb = tb->page_next[n];
808 }
fd6ce8f6 809 p->first_tb = NULL;
d720b93d
FB
810#ifdef TARGET_HAS_PRECISE_SMC
811 if (current_tb_modified) {
812 /* we generate a block containing just the instruction
813 modifying the memory. It will ensure that it cannot modify
814 itself */
ea1c1802 815 env->current_tb = NULL;
d720b93d
FB
816 tb_gen_code(env, current_pc, current_cs_base, current_flags,
817 CF_SINGLE_INSN);
818 cpu_resume_from_signal(env, puc);
819 }
820#endif
fd6ce8f6 821}
9fa3e853 822#endif
fd6ce8f6
FB
823
824/* add the tb in the target page and protect it if necessary */
9fa3e853 825static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 826 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
827{
828 PageDesc *p;
9fa3e853
FB
829 TranslationBlock *last_first_tb;
830
831 tb->page_addr[n] = page_addr;
3a7d929e 832 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
833 tb->page_next[n] = p->first_tb;
834 last_first_tb = p->first_tb;
835 p->first_tb = (TranslationBlock *)((long)tb | n);
836 invalidate_page_bitmap(p);
fd6ce8f6 837
107db443 838#if defined(TARGET_HAS_SMC) || 1
d720b93d 839
9fa3e853 840#if defined(CONFIG_USER_ONLY)
fd6ce8f6 841 if (p->flags & PAGE_WRITE) {
53a5960a
PB
842 target_ulong addr;
843 PageDesc *p2;
9fa3e853
FB
844 int prot;
845
fd6ce8f6
FB
846 /* force the host page as non writable (writes will have a
847 page fault + mprotect overhead) */
53a5960a 848 page_addr &= qemu_host_page_mask;
fd6ce8f6 849 prot = 0;
53a5960a
PB
850 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
851 addr += TARGET_PAGE_SIZE) {
852
853 p2 = page_find (addr >> TARGET_PAGE_BITS);
854 if (!p2)
855 continue;
856 prot |= p2->flags;
857 p2->flags &= ~PAGE_WRITE;
858 page_get_flags(addr);
859 }
860 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
861 (prot & PAGE_BITS) & ~PAGE_WRITE);
862#ifdef DEBUG_TB_INVALIDATE
863 printf("protecting code page: 0x%08lx\n",
53a5960a 864 page_addr);
fd6ce8f6 865#endif
fd6ce8f6 866 }
9fa3e853
FB
867#else
868 /* if some code is already present, then the pages are already
869 protected. So we handle the case where only the first TB is
870 allocated in a physical page */
871 if (!last_first_tb) {
6a00d601 872 tlb_protect_code(page_addr);
9fa3e853
FB
873 }
874#endif
d720b93d
FB
875
876#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
877}
878
879/* Allocate a new translation block. Flush the translation buffer if
880 too many translation blocks or too much generated code. */
c27004ec 881TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
882{
883 TranslationBlock *tb;
fd6ce8f6
FB
884
885 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
886 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 887 return NULL;
fd6ce8f6
FB
888 tb = &tbs[nb_tbs++];
889 tb->pc = pc;
b448f2f3 890 tb->cflags = 0;
d4e8164f
FB
891 return tb;
892}
893
9fa3e853
FB
894/* add a new TB and link it to the physical page tables. phys_page2 is
895 (-1) to indicate that only one page contains the TB. */
896void tb_link_phys(TranslationBlock *tb,
897 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 898{
9fa3e853
FB
899 unsigned int h;
900 TranslationBlock **ptb;
901
902 /* add in the physical hash table */
903 h = tb_phys_hash_func(phys_pc);
904 ptb = &tb_phys_hash[h];
905 tb->phys_hash_next = *ptb;
906 *ptb = tb;
fd6ce8f6
FB
907
908 /* add in the page list */
9fa3e853
FB
909 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
910 if (phys_page2 != -1)
911 tb_alloc_page(tb, 1, phys_page2);
912 else
913 tb->page_addr[1] = -1;
9fa3e853 914
d4e8164f
FB
915 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
916 tb->jmp_next[0] = NULL;
917 tb->jmp_next[1] = NULL;
b448f2f3
FB
918#ifdef USE_CODE_COPY
919 tb->cflags &= ~CF_FP_USED;
920 if (tb->cflags & CF_TB_FP_USED)
921 tb->cflags |= CF_FP_USED;
922#endif
d4e8164f
FB
923
924 /* init original jump addresses */
925 if (tb->tb_next_offset[0] != 0xffff)
926 tb_reset_jump(tb, 0);
927 if (tb->tb_next_offset[1] != 0xffff)
928 tb_reset_jump(tb, 1);
8a40a180
FB
929
930#ifdef DEBUG_TB_CHECK
931 tb_page_check();
932#endif
fd6ce8f6
FB
933}
934
9fa3e853
FB
935/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
936 tb[1].tc_ptr. Return NULL if not found */
937TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 938{
9fa3e853
FB
939 int m_min, m_max, m;
940 unsigned long v;
941 TranslationBlock *tb;
a513fe19
FB
942
943 if (nb_tbs <= 0)
944 return NULL;
945 if (tc_ptr < (unsigned long)code_gen_buffer ||
946 tc_ptr >= (unsigned long)code_gen_ptr)
947 return NULL;
948 /* binary search (cf Knuth) */
949 m_min = 0;
950 m_max = nb_tbs - 1;
951 while (m_min <= m_max) {
952 m = (m_min + m_max) >> 1;
953 tb = &tbs[m];
954 v = (unsigned long)tb->tc_ptr;
955 if (v == tc_ptr)
956 return tb;
957 else if (tc_ptr < v) {
958 m_max = m - 1;
959 } else {
960 m_min = m + 1;
961 }
962 }
963 return &tbs[m_max];
964}
7501267e 965
ea041c0e
FB
966static void tb_reset_jump_recursive(TranslationBlock *tb);
967
968static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
969{
970 TranslationBlock *tb1, *tb_next, **ptb;
971 unsigned int n1;
972
973 tb1 = tb->jmp_next[n];
974 if (tb1 != NULL) {
975 /* find head of list */
976 for(;;) {
977 n1 = (long)tb1 & 3;
978 tb1 = (TranslationBlock *)((long)tb1 & ~3);
979 if (n1 == 2)
980 break;
981 tb1 = tb1->jmp_next[n1];
982 }
983 /* we are now sure now that tb jumps to tb1 */
984 tb_next = tb1;
985
986 /* remove tb from the jmp_first list */
987 ptb = &tb_next->jmp_first;
988 for(;;) {
989 tb1 = *ptb;
990 n1 = (long)tb1 & 3;
991 tb1 = (TranslationBlock *)((long)tb1 & ~3);
992 if (n1 == n && tb1 == tb)
993 break;
994 ptb = &tb1->jmp_next[n1];
995 }
996 *ptb = tb->jmp_next[n];
997 tb->jmp_next[n] = NULL;
998
999 /* suppress the jump to next tb in generated code */
1000 tb_reset_jump(tb, n);
1001
0124311e 1002 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1003 tb_reset_jump_recursive(tb_next);
1004 }
1005}
1006
1007static void tb_reset_jump_recursive(TranslationBlock *tb)
1008{
1009 tb_reset_jump_recursive2(tb, 0);
1010 tb_reset_jump_recursive2(tb, 1);
1011}
1012
1fddef4b 1013#if defined(TARGET_HAS_ICE)
d720b93d
FB
1014static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1015{
c2f07f81
PB
1016 target_ulong addr, pd;
1017 ram_addr_t ram_addr;
1018 PhysPageDesc *p;
d720b93d 1019
c2f07f81
PB
1020 addr = cpu_get_phys_page_debug(env, pc);
1021 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1022 if (!p) {
1023 pd = IO_MEM_UNASSIGNED;
1024 } else {
1025 pd = p->phys_offset;
1026 }
1027 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1028 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1029}
c27004ec 1030#endif
d720b93d 1031
c33a346e
FB
1032/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1033 breakpoint is reached */
2e12669a 1034int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1035{
1fddef4b 1036#if defined(TARGET_HAS_ICE)
4c3a88a2 1037 int i;
d720b93d 1038
4c3a88a2
FB
1039 for(i = 0; i < env->nb_breakpoints; i++) {
1040 if (env->breakpoints[i] == pc)
1041 return 0;
1042 }
1043
1044 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1045 return -1;
1046 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1047
1048 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1049 return 0;
1050#else
1051 return -1;
1052#endif
1053}
1054
1055/* remove a breakpoint */
2e12669a 1056int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1057{
1fddef4b 1058#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1059 int i;
1060 for(i = 0; i < env->nb_breakpoints; i++) {
1061 if (env->breakpoints[i] == pc)
1062 goto found;
1063 }
1064 return -1;
1065 found:
4c3a88a2 1066 env->nb_breakpoints--;
1fddef4b
FB
1067 if (i < env->nb_breakpoints)
1068 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1069
1070 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1071 return 0;
1072#else
1073 return -1;
1074#endif
1075}
1076
c33a346e
FB
1077/* enable or disable single step mode. EXCP_DEBUG is returned by the
1078 CPU loop after each instruction */
1079void cpu_single_step(CPUState *env, int enabled)
1080{
1fddef4b 1081#if defined(TARGET_HAS_ICE)
c33a346e
FB
1082 if (env->singlestep_enabled != enabled) {
1083 env->singlestep_enabled = enabled;
1084 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1085 /* XXX: only flush what is necessary */
0124311e 1086 tb_flush(env);
c33a346e
FB
1087 }
1088#endif
1089}
1090
34865134
FB
1091/* enable or disable low levels log */
1092void cpu_set_log(int log_flags)
1093{
1094 loglevel = log_flags;
1095 if (loglevel && !logfile) {
1096 logfile = fopen(logfilename, "w");
1097 if (!logfile) {
1098 perror(logfilename);
1099 _exit(1);
1100 }
9fa3e853
FB
1101#if !defined(CONFIG_SOFTMMU)
1102 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1103 {
1104 static uint8_t logfile_buf[4096];
1105 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1106 }
1107#else
34865134 1108 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1109#endif
34865134
FB
1110 }
1111}
1112
1113void cpu_set_log_filename(const char *filename)
1114{
1115 logfilename = strdup(filename);
1116}
c33a346e 1117
0124311e 1118/* mask must never be zero, except for A20 change call */
68a79315 1119void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1120{
1121 TranslationBlock *tb;
ee8b7021 1122 static int interrupt_lock;
59817ccb 1123
68a79315 1124 env->interrupt_request |= mask;
ea041c0e
FB
1125 /* if the cpu is currently executing code, we must unlink it and
1126 all the potentially executing TB */
1127 tb = env->current_tb;
ee8b7021
FB
1128 if (tb && !testandset(&interrupt_lock)) {
1129 env->current_tb = NULL;
ea041c0e 1130 tb_reset_jump_recursive(tb);
ee8b7021 1131 interrupt_lock = 0;
ea041c0e
FB
1132 }
1133}
1134
b54ad049
FB
1135void cpu_reset_interrupt(CPUState *env, int mask)
1136{
1137 env->interrupt_request &= ~mask;
1138}
1139
f193c797
FB
1140CPULogItem cpu_log_items[] = {
1141 { CPU_LOG_TB_OUT_ASM, "out_asm",
1142 "show generated host assembly code for each compiled TB" },
1143 { CPU_LOG_TB_IN_ASM, "in_asm",
1144 "show target assembly code for each compiled TB" },
1145 { CPU_LOG_TB_OP, "op",
1146 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1147#ifdef TARGET_I386
1148 { CPU_LOG_TB_OP_OPT, "op_opt",
1149 "show micro ops after optimization for each compiled TB" },
1150#endif
1151 { CPU_LOG_INT, "int",
1152 "show interrupts/exceptions in short format" },
1153 { CPU_LOG_EXEC, "exec",
1154 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1155 { CPU_LOG_TB_CPU, "cpu",
1156 "show CPU state before bloc translation" },
f193c797
FB
1157#ifdef TARGET_I386
1158 { CPU_LOG_PCALL, "pcall",
1159 "show protected mode far calls/returns/exceptions" },
1160#endif
8e3a9fd2 1161#ifdef DEBUG_IOPORT
fd872598
FB
1162 { CPU_LOG_IOPORT, "ioport",
1163 "show all i/o ports accesses" },
8e3a9fd2 1164#endif
f193c797
FB
1165 { 0, NULL, NULL },
1166};
1167
1168static int cmp1(const char *s1, int n, const char *s2)
1169{
1170 if (strlen(s2) != n)
1171 return 0;
1172 return memcmp(s1, s2, n) == 0;
1173}
1174
1175/* takes a comma separated list of log masks. Return 0 if error. */
1176int cpu_str_to_log_mask(const char *str)
1177{
1178 CPULogItem *item;
1179 int mask;
1180 const char *p, *p1;
1181
1182 p = str;
1183 mask = 0;
1184 for(;;) {
1185 p1 = strchr(p, ',');
1186 if (!p1)
1187 p1 = p + strlen(p);
8e3a9fd2
FB
1188 if(cmp1(p,p1-p,"all")) {
1189 for(item = cpu_log_items; item->mask != 0; item++) {
1190 mask |= item->mask;
1191 }
1192 } else {
f193c797
FB
1193 for(item = cpu_log_items; item->mask != 0; item++) {
1194 if (cmp1(p, p1 - p, item->name))
1195 goto found;
1196 }
1197 return 0;
8e3a9fd2 1198 }
f193c797
FB
1199 found:
1200 mask |= item->mask;
1201 if (*p1 != ',')
1202 break;
1203 p = p1 + 1;
1204 }
1205 return mask;
1206}
ea041c0e 1207
7501267e
FB
1208void cpu_abort(CPUState *env, const char *fmt, ...)
1209{
1210 va_list ap;
1211
1212 va_start(ap, fmt);
1213 fprintf(stderr, "qemu: fatal: ");
1214 vfprintf(stderr, fmt, ap);
1215 fprintf(stderr, "\n");
1216#ifdef TARGET_I386
7fe48483
FB
1217 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1218#else
1219 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1220#endif
1221 va_end(ap);
1222 abort();
1223}
1224
0124311e
FB
1225#if !defined(CONFIG_USER_ONLY)
1226
ee8b7021
FB
1227/* NOTE: if flush_global is true, also flush global entries (not
1228 implemented yet) */
1229void tlb_flush(CPUState *env, int flush_global)
33417e70 1230{
33417e70 1231 int i;
0124311e 1232
9fa3e853
FB
1233#if defined(DEBUG_TLB)
1234 printf("tlb_flush:\n");
1235#endif
0124311e
FB
1236 /* must reset current TB so that interrupts cannot modify the
1237 links while we are modifying them */
1238 env->current_tb = NULL;
1239
33417e70 1240 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1241 env->tlb_table[0][i].addr_read = -1;
1242 env->tlb_table[0][i].addr_write = -1;
1243 env->tlb_table[0][i].addr_code = -1;
1244 env->tlb_table[1][i].addr_read = -1;
1245 env->tlb_table[1][i].addr_write = -1;
1246 env->tlb_table[1][i].addr_code = -1;
33417e70 1247 }
9fa3e853 1248
8a40a180 1249 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1250
1251#if !defined(CONFIG_SOFTMMU)
1252 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1253#endif
1254#ifdef USE_KQEMU
1255 if (env->kqemu_enabled) {
1256 kqemu_flush(env, flush_global);
1257 }
9fa3e853 1258#endif
e3db7226 1259 tlb_flush_count++;
33417e70
FB
1260}
1261
274da6b2 1262static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1263{
84b7b8e7
FB
1264 if (addr == (tlb_entry->addr_read &
1265 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1266 addr == (tlb_entry->addr_write &
1267 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1268 addr == (tlb_entry->addr_code &
1269 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1270 tlb_entry->addr_read = -1;
1271 tlb_entry->addr_write = -1;
1272 tlb_entry->addr_code = -1;
1273 }
61382a50
FB
1274}
1275
2e12669a 1276void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1277{
8a40a180 1278 int i;
9fa3e853 1279 TranslationBlock *tb;
0124311e 1280
9fa3e853 1281#if defined(DEBUG_TLB)
108c49b8 1282 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1283#endif
0124311e
FB
1284 /* must reset current TB so that interrupts cannot modify the
1285 links while we are modifying them */
1286 env->current_tb = NULL;
61382a50
FB
1287
1288 addr &= TARGET_PAGE_MASK;
1289 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1290 tlb_flush_entry(&env->tlb_table[0][i], addr);
1291 tlb_flush_entry(&env->tlb_table[1][i], addr);
0124311e 1292
b362e5e0
PB
1293 /* Discard jump cache entries for any tb which might potentially
1294 overlap the flushed page. */
1295 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1296 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1297
1298 i = tb_jmp_cache_hash_page(addr);
1299 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1300
0124311e 1301#if !defined(CONFIG_SOFTMMU)
9fa3e853 1302 if (addr < MMAP_AREA_END)
0124311e 1303 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1304#endif
0a962c02
FB
1305#ifdef USE_KQEMU
1306 if (env->kqemu_enabled) {
1307 kqemu_flush_page(env, addr);
1308 }
1309#endif
9fa3e853
FB
1310}
1311
9fa3e853
FB
1312/* update the TLBs so that writes to code in the virtual page 'addr'
1313 can be detected */
6a00d601 1314static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1315{
6a00d601
FB
1316 cpu_physical_memory_reset_dirty(ram_addr,
1317 ram_addr + TARGET_PAGE_SIZE,
1318 CODE_DIRTY_FLAG);
9fa3e853
FB
1319}
1320
9fa3e853 1321/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1322 tested for self modifying code */
1323static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1324 target_ulong vaddr)
9fa3e853 1325{
3a7d929e 1326 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1327}
1328
1329static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1330 unsigned long start, unsigned long length)
1331{
1332 unsigned long addr;
84b7b8e7
FB
1333 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1334 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1335 if ((addr - start) < length) {
84b7b8e7 1336 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1337 }
1338 }
1339}
1340
3a7d929e 1341void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1342 int dirty_flags)
1ccde1cb
FB
1343{
1344 CPUState *env;
4f2ac237 1345 unsigned long length, start1;
0a962c02
FB
1346 int i, mask, len;
1347 uint8_t *p;
1ccde1cb
FB
1348
1349 start &= TARGET_PAGE_MASK;
1350 end = TARGET_PAGE_ALIGN(end);
1351
1352 length = end - start;
1353 if (length == 0)
1354 return;
0a962c02 1355 len = length >> TARGET_PAGE_BITS;
3a7d929e 1356#ifdef USE_KQEMU
6a00d601
FB
1357 /* XXX: should not depend on cpu context */
1358 env = first_cpu;
3a7d929e 1359 if (env->kqemu_enabled) {
f23db169
FB
1360 ram_addr_t addr;
1361 addr = start;
1362 for(i = 0; i < len; i++) {
1363 kqemu_set_notdirty(env, addr);
1364 addr += TARGET_PAGE_SIZE;
1365 }
3a7d929e
FB
1366 }
1367#endif
f23db169
FB
1368 mask = ~dirty_flags;
1369 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1370 for(i = 0; i < len; i++)
1371 p[i] &= mask;
1372
1ccde1cb
FB
1373 /* we modify the TLB cache so that the dirty bit will be set again
1374 when accessing the range */
59817ccb 1375 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1376 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1377 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1378 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1379 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1380 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6a00d601 1381 }
59817ccb
FB
1382
1383#if !defined(CONFIG_SOFTMMU)
1384 /* XXX: this is expensive */
1385 {
1386 VirtPageDesc *p;
1387 int j;
1388 target_ulong addr;
1389
1390 for(i = 0; i < L1_SIZE; i++) {
1391 p = l1_virt_map[i];
1392 if (p) {
1393 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1394 for(j = 0; j < L2_SIZE; j++) {
1395 if (p->valid_tag == virt_valid_tag &&
1396 p->phys_addr >= start && p->phys_addr < end &&
1397 (p->prot & PROT_WRITE)) {
1398 if (addr < MMAP_AREA_END) {
1399 mprotect((void *)addr, TARGET_PAGE_SIZE,
1400 p->prot & ~PROT_WRITE);
1401 }
1402 }
1403 addr += TARGET_PAGE_SIZE;
1404 p++;
1405 }
1406 }
1407 }
1408 }
1409#endif
1ccde1cb
FB
1410}
1411
3a7d929e
FB
1412static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1413{
1414 ram_addr_t ram_addr;
1415
84b7b8e7
FB
1416 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1417 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1418 tlb_entry->addend - (unsigned long)phys_ram_base;
1419 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1420 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1421 }
1422 }
1423}
1424
1425/* update the TLB according to the current state of the dirty bits */
1426void cpu_tlb_update_dirty(CPUState *env)
1427{
1428 int i;
1429 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1430 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1431 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1432 tlb_update_dirty(&env->tlb_table[1][i]);
3a7d929e
FB
1433}
1434
1ccde1cb 1435static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1436 unsigned long start)
1ccde1cb
FB
1437{
1438 unsigned long addr;
84b7b8e7
FB
1439 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1440 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1441 if (addr == start) {
84b7b8e7 1442 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1443 }
1444 }
1445}
1446
1447/* update the TLB corresponding to virtual page vaddr and phys addr
1448 addr so that it is no longer dirty */
6a00d601
FB
1449static inline void tlb_set_dirty(CPUState *env,
1450 unsigned long addr, target_ulong vaddr)
1ccde1cb 1451{
1ccde1cb
FB
1452 int i;
1453
1ccde1cb
FB
1454 addr &= TARGET_PAGE_MASK;
1455 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1456 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1457 tlb_set_dirty1(&env->tlb_table[1][i], addr);
9fa3e853
FB
1458}
1459
59817ccb
FB
1460/* add a new TLB entry. At most one entry for a given virtual address
1461 is permitted. Return 0 if OK or 2 if the page could not be mapped
1462 (can only happen in non SOFTMMU mode for I/O pages or pages
1463 conflicting with the host address space). */
84b7b8e7
FB
1464int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1465 target_phys_addr_t paddr, int prot,
1466 int is_user, int is_softmmu)
9fa3e853 1467{
92e873b9 1468 PhysPageDesc *p;
4f2ac237 1469 unsigned long pd;
9fa3e853 1470 unsigned int index;
4f2ac237 1471 target_ulong address;
108c49b8 1472 target_phys_addr_t addend;
9fa3e853 1473 int ret;
84b7b8e7 1474 CPUTLBEntry *te;
9fa3e853 1475
92e873b9 1476 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1477 if (!p) {
1478 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1479 } else {
1480 pd = p->phys_offset;
9fa3e853
FB
1481 }
1482#if defined(DEBUG_TLB)
3a7d929e 1483 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
84b7b8e7 1484 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1485#endif
1486
1487 ret = 0;
1488#if !defined(CONFIG_SOFTMMU)
1489 if (is_softmmu)
1490#endif
1491 {
2a4188a3 1492 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1493 /* IO memory case */
1494 address = vaddr | pd;
1495 addend = paddr;
1496 } else {
1497 /* standard memory */
1498 address = vaddr;
1499 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1500 }
1501
90f18422 1502 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1503 addend -= vaddr;
84b7b8e7
FB
1504 te = &env->tlb_table[is_user][index];
1505 te->addend = addend;
67b915a5 1506 if (prot & PAGE_READ) {
84b7b8e7
FB
1507 te->addr_read = address;
1508 } else {
1509 te->addr_read = -1;
1510 }
1511 if (prot & PAGE_EXEC) {
1512 te->addr_code = address;
9fa3e853 1513 } else {
84b7b8e7 1514 te->addr_code = -1;
9fa3e853 1515 }
67b915a5 1516 if (prot & PAGE_WRITE) {
856074ec
FB
1517 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1518 (pd & IO_MEM_ROMD)) {
1519 /* write access calls the I/O callback */
1520 te->addr_write = vaddr |
1521 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
3a7d929e 1522 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1523 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1524 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1525 } else {
84b7b8e7 1526 te->addr_write = address;
9fa3e853
FB
1527 }
1528 } else {
84b7b8e7 1529 te->addr_write = -1;
9fa3e853
FB
1530 }
1531 }
1532#if !defined(CONFIG_SOFTMMU)
1533 else {
1534 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1535 /* IO access: no mapping is done as it will be handled by the
1536 soft MMU */
1537 if (!(env->hflags & HF_SOFTMMU_MASK))
1538 ret = 2;
1539 } else {
1540 void *map_addr;
59817ccb
FB
1541
1542 if (vaddr >= MMAP_AREA_END) {
1543 ret = 2;
1544 } else {
1545 if (prot & PROT_WRITE) {
1546 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1547#if defined(TARGET_HAS_SMC) || 1
59817ccb 1548 first_tb ||
d720b93d 1549#endif
59817ccb
FB
1550 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1551 !cpu_physical_memory_is_dirty(pd))) {
1552 /* ROM: we do as if code was inside */
1553 /* if code is present, we only map as read only and save the
1554 original mapping */
1555 VirtPageDesc *vp;
1556
90f18422 1557 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1558 vp->phys_addr = pd;
1559 vp->prot = prot;
1560 vp->valid_tag = virt_valid_tag;
1561 prot &= ~PAGE_WRITE;
1562 }
1563 }
1564 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1565 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1566 if (map_addr == MAP_FAILED) {
1567 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1568 paddr, vaddr);
9fa3e853 1569 }
9fa3e853
FB
1570 }
1571 }
1572 }
1573#endif
1574 return ret;
1575}
1576
1577/* called from signal handler: invalidate the code and unprotect the
1578 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1579int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1580{
1581#if !defined(CONFIG_SOFTMMU)
1582 VirtPageDesc *vp;
1583
1584#if defined(DEBUG_TLB)
1585 printf("page_unprotect: addr=0x%08x\n", addr);
1586#endif
1587 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1588
1589 /* if it is not mapped, no need to worry here */
1590 if (addr >= MMAP_AREA_END)
1591 return 0;
9fa3e853
FB
1592 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1593 if (!vp)
1594 return 0;
1595 /* NOTE: in this case, validate_tag is _not_ tested as it
1596 validates only the code TLB */
1597 if (vp->valid_tag != virt_valid_tag)
1598 return 0;
1599 if (!(vp->prot & PAGE_WRITE))
1600 return 0;
1601#if defined(DEBUG_TLB)
1602 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1603 addr, vp->phys_addr, vp->prot);
1604#endif
59817ccb
FB
1605 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1606 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1607 (unsigned long)addr, vp->prot);
d720b93d 1608 /* set the dirty bit */
0a962c02 1609 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1610 /* flush the code inside */
1611 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1612 return 1;
1613#else
1614 return 0;
1615#endif
33417e70
FB
1616}
1617
0124311e
FB
1618#else
1619
ee8b7021 1620void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1621{
1622}
1623
2e12669a 1624void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1625{
1626}
1627
84b7b8e7
FB
1628int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1629 target_phys_addr_t paddr, int prot,
1630 int is_user, int is_softmmu)
9fa3e853
FB
1631{
1632 return 0;
1633}
0124311e 1634
9fa3e853
FB
1635/* dump memory mappings */
1636void page_dump(FILE *f)
33417e70 1637{
9fa3e853
FB
1638 unsigned long start, end;
1639 int i, j, prot, prot1;
1640 PageDesc *p;
33417e70 1641
9fa3e853
FB
1642 fprintf(f, "%-8s %-8s %-8s %s\n",
1643 "start", "end", "size", "prot");
1644 start = -1;
1645 end = -1;
1646 prot = 0;
1647 for(i = 0; i <= L1_SIZE; i++) {
1648 if (i < L1_SIZE)
1649 p = l1_map[i];
1650 else
1651 p = NULL;
1652 for(j = 0;j < L2_SIZE; j++) {
1653 if (!p)
1654 prot1 = 0;
1655 else
1656 prot1 = p[j].flags;
1657 if (prot1 != prot) {
1658 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1659 if (start != -1) {
1660 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1661 start, end, end - start,
1662 prot & PAGE_READ ? 'r' : '-',
1663 prot & PAGE_WRITE ? 'w' : '-',
1664 prot & PAGE_EXEC ? 'x' : '-');
1665 }
1666 if (prot1 != 0)
1667 start = end;
1668 else
1669 start = -1;
1670 prot = prot1;
1671 }
1672 if (!p)
1673 break;
1674 }
33417e70 1675 }
33417e70
FB
1676}
1677
53a5960a 1678int page_get_flags(target_ulong address)
33417e70 1679{
9fa3e853
FB
1680 PageDesc *p;
1681
1682 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1683 if (!p)
9fa3e853
FB
1684 return 0;
1685 return p->flags;
1686}
1687
1688/* modify the flags of a page and invalidate the code if
1689 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1690 depending on PAGE_WRITE */
53a5960a 1691void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1692{
1693 PageDesc *p;
53a5960a 1694 target_ulong addr;
9fa3e853
FB
1695
1696 start = start & TARGET_PAGE_MASK;
1697 end = TARGET_PAGE_ALIGN(end);
1698 if (flags & PAGE_WRITE)
1699 flags |= PAGE_WRITE_ORG;
1700 spin_lock(&tb_lock);
1701 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1702 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1703 /* if the write protection is set, then we invalidate the code
1704 inside */
1705 if (!(p->flags & PAGE_WRITE) &&
1706 (flags & PAGE_WRITE) &&
1707 p->first_tb) {
d720b93d 1708 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1709 }
1710 p->flags = flags;
1711 }
1712 spin_unlock(&tb_lock);
33417e70
FB
1713}
1714
9fa3e853
FB
1715/* called from signal handler: invalidate the code and unprotect the
1716 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1717int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1718{
1719 unsigned int page_index, prot, pindex;
1720 PageDesc *p, *p1;
53a5960a 1721 target_ulong host_start, host_end, addr;
9fa3e853 1722
83fb7adf 1723 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1724 page_index = host_start >> TARGET_PAGE_BITS;
1725 p1 = page_find(page_index);
1726 if (!p1)
1727 return 0;
83fb7adf 1728 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1729 p = p1;
1730 prot = 0;
1731 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1732 prot |= p->flags;
1733 p++;
1734 }
1735 /* if the page was really writable, then we change its
1736 protection back to writable */
1737 if (prot & PAGE_WRITE_ORG) {
1738 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1739 if (!(p1[pindex].flags & PAGE_WRITE)) {
53a5960a 1740 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1741 (prot & PAGE_BITS) | PAGE_WRITE);
1742 p1[pindex].flags |= PAGE_WRITE;
1743 /* and since the content will be modified, we must invalidate
1744 the corresponding translated code. */
d720b93d 1745 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1746#ifdef DEBUG_TB_CHECK
1747 tb_invalidate_check(address);
1748#endif
1749 return 1;
1750 }
1751 }
1752 return 0;
1753}
1754
1755/* call this function when system calls directly modify a memory area */
53a5960a
PB
1756/* ??? This should be redundant now we have lock_user. */
1757void page_unprotect_range(target_ulong data, target_ulong data_size)
9fa3e853 1758{
53a5960a 1759 target_ulong start, end, addr;
9fa3e853 1760
53a5960a 1761 start = data;
9fa3e853
FB
1762 end = start + data_size;
1763 start &= TARGET_PAGE_MASK;
1764 end = TARGET_PAGE_ALIGN(end);
1765 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1766 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1767 }
1768}
1769
6a00d601
FB
1770static inline void tlb_set_dirty(CPUState *env,
1771 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1772{
1773}
9fa3e853
FB
1774#endif /* defined(CONFIG_USER_ONLY) */
1775
33417e70
FB
1776/* register physical memory. 'size' must be a multiple of the target
1777 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1778 io memory page */
2e12669a
FB
1779void cpu_register_physical_memory(target_phys_addr_t start_addr,
1780 unsigned long size,
1781 unsigned long phys_offset)
33417e70 1782{
108c49b8 1783 target_phys_addr_t addr, end_addr;
92e873b9 1784 PhysPageDesc *p;
9d42037b 1785 CPUState *env;
33417e70 1786
5fd386f6 1787 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1788 end_addr = start_addr + size;
5fd386f6 1789 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
108c49b8 1790 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1791 p->phys_offset = phys_offset;
2a4188a3
FB
1792 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1793 (phys_offset & IO_MEM_ROMD))
33417e70
FB
1794 phys_offset += TARGET_PAGE_SIZE;
1795 }
9d42037b
FB
1796
1797 /* since each CPU stores ram addresses in its TLB cache, we must
1798 reset the modified entries */
1799 /* XXX: slow ! */
1800 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1801 tlb_flush(env, 1);
1802 }
33417e70
FB
1803}
1804
ba863458
FB
1805/* XXX: temporary until new memory mapping API */
1806uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1807{
1808 PhysPageDesc *p;
1809
1810 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1811 if (!p)
1812 return IO_MEM_UNASSIGNED;
1813 return p->phys_offset;
1814}
1815
e9a1ab19
FB
1816/* XXX: better than nothing */
1817ram_addr_t qemu_ram_alloc(unsigned int size)
1818{
1819 ram_addr_t addr;
1820 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1821 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
1822 size, phys_ram_size);
1823 abort();
1824 }
1825 addr = phys_ram_alloc_offset;
1826 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1827 return addr;
1828}
1829
1830void qemu_ram_free(ram_addr_t addr)
1831{
1832}
1833
a4193c8a 1834static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 1835{
67d3b957
PB
1836#ifdef DEBUG_UNASSIGNED
1837 printf("Unassigned mem read 0x%08x\n", (int)addr);
1838#endif
33417e70
FB
1839 return 0;
1840}
1841
a4193c8a 1842static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 1843{
67d3b957
PB
1844#ifdef DEBUG_UNASSIGNED
1845 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1846#endif
33417e70
FB
1847}
1848
1849static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1850 unassigned_mem_readb,
1851 unassigned_mem_readb,
1852 unassigned_mem_readb,
1853};
1854
1855static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1856 unassigned_mem_writeb,
1857 unassigned_mem_writeb,
1858 unassigned_mem_writeb,
1859};
1860
3a7d929e 1861static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1862{
3a7d929e
FB
1863 unsigned long ram_addr;
1864 int dirty_flags;
1865 ram_addr = addr - (unsigned long)phys_ram_base;
1866 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1867 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1868#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1869 tb_invalidate_phys_page_fast(ram_addr, 1);
1870 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1871#endif
3a7d929e 1872 }
c27004ec 1873 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
1874#ifdef USE_KQEMU
1875 if (cpu_single_env->kqemu_enabled &&
1876 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1877 kqemu_modify_page(cpu_single_env, ram_addr);
1878#endif
f23db169
FB
1879 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1880 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1881 /* we remove the notdirty callback only if the code has been
1882 flushed */
1883 if (dirty_flags == 0xff)
6a00d601 1884 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1885}
1886
3a7d929e 1887static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1888{
3a7d929e
FB
1889 unsigned long ram_addr;
1890 int dirty_flags;
1891 ram_addr = addr - (unsigned long)phys_ram_base;
1892 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1893 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1894#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1895 tb_invalidate_phys_page_fast(ram_addr, 2);
1896 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1897#endif
3a7d929e 1898 }
c27004ec 1899 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
1900#ifdef USE_KQEMU
1901 if (cpu_single_env->kqemu_enabled &&
1902 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1903 kqemu_modify_page(cpu_single_env, ram_addr);
1904#endif
f23db169
FB
1905 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1906 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1907 /* we remove the notdirty callback only if the code has been
1908 flushed */
1909 if (dirty_flags == 0xff)
6a00d601 1910 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1911}
1912
3a7d929e 1913static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1914{
3a7d929e
FB
1915 unsigned long ram_addr;
1916 int dirty_flags;
1917 ram_addr = addr - (unsigned long)phys_ram_base;
1918 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1919 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1920#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1921 tb_invalidate_phys_page_fast(ram_addr, 4);
1922 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1923#endif
3a7d929e 1924 }
c27004ec 1925 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
1926#ifdef USE_KQEMU
1927 if (cpu_single_env->kqemu_enabled &&
1928 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1929 kqemu_modify_page(cpu_single_env, ram_addr);
1930#endif
f23db169
FB
1931 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1932 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1933 /* we remove the notdirty callback only if the code has been
1934 flushed */
1935 if (dirty_flags == 0xff)
6a00d601 1936 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1937}
1938
3a7d929e 1939static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
1940 NULL, /* never used */
1941 NULL, /* never used */
1942 NULL, /* never used */
1943};
1944
1ccde1cb
FB
1945static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1946 notdirty_mem_writeb,
1947 notdirty_mem_writew,
1948 notdirty_mem_writel,
1949};
1950
33417e70
FB
1951static void io_mem_init(void)
1952{
3a7d929e 1953 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 1954 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 1955 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1956 io_mem_nb = 5;
1957
1958 /* alloc dirty bits array */
0a962c02 1959 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 1960 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1961}
1962
1963/* mem_read and mem_write are arrays of functions containing the
1964 function to access byte (index 0), word (index 1) and dword (index
1965 2). All functions must be supplied. If io_index is non zero, the
1966 corresponding io zone is modified. If it is zero, a new io zone is
1967 allocated. The return value can be used with
1968 cpu_register_physical_memory(). (-1) is returned if error. */
1969int cpu_register_io_memory(int io_index,
1970 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1971 CPUWriteMemoryFunc **mem_write,
1972 void *opaque)
33417e70
FB
1973{
1974 int i;
1975
1976 if (io_index <= 0) {
b5ff1b31 1977 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
1978 return -1;
1979 io_index = io_mem_nb++;
1980 } else {
1981 if (io_index >= IO_MEM_NB_ENTRIES)
1982 return -1;
1983 }
b5ff1b31 1984
33417e70
FB
1985 for(i = 0;i < 3; i++) {
1986 io_mem_read[io_index][i] = mem_read[i];
1987 io_mem_write[io_index][i] = mem_write[i];
1988 }
a4193c8a 1989 io_mem_opaque[io_index] = opaque;
33417e70
FB
1990 return io_index << IO_MEM_SHIFT;
1991}
61382a50 1992
8926b517
FB
1993CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1994{
1995 return io_mem_write[io_index >> IO_MEM_SHIFT];
1996}
1997
1998CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1999{
2000 return io_mem_read[io_index >> IO_MEM_SHIFT];
2001}
2002
13eb76e0
FB
2003/* physical memory access (slow version, mainly for debug) */
2004#if defined(CONFIG_USER_ONLY)
2e12669a 2005void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2006 int len, int is_write)
2007{
2008 int l, flags;
2009 target_ulong page;
53a5960a 2010 void * p;
13eb76e0
FB
2011
2012 while (len > 0) {
2013 page = addr & TARGET_PAGE_MASK;
2014 l = (page + TARGET_PAGE_SIZE) - addr;
2015 if (l > len)
2016 l = len;
2017 flags = page_get_flags(page);
2018 if (!(flags & PAGE_VALID))
2019 return;
2020 if (is_write) {
2021 if (!(flags & PAGE_WRITE))
2022 return;
53a5960a
PB
2023 p = lock_user(addr, len, 0);
2024 memcpy(p, buf, len);
2025 unlock_user(p, addr, len);
13eb76e0
FB
2026 } else {
2027 if (!(flags & PAGE_READ))
2028 return;
53a5960a
PB
2029 p = lock_user(addr, len, 1);
2030 memcpy(buf, p, len);
2031 unlock_user(p, addr, 0);
13eb76e0
FB
2032 }
2033 len -= l;
2034 buf += l;
2035 addr += l;
2036 }
2037}
8df1cd07 2038
13eb76e0 2039#else
2e12669a 2040void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2041 int len, int is_write)
2042{
2043 int l, io_index;
2044 uint8_t *ptr;
2045 uint32_t val;
2e12669a
FB
2046 target_phys_addr_t page;
2047 unsigned long pd;
92e873b9 2048 PhysPageDesc *p;
13eb76e0
FB
2049
2050 while (len > 0) {
2051 page = addr & TARGET_PAGE_MASK;
2052 l = (page + TARGET_PAGE_SIZE) - addr;
2053 if (l > len)
2054 l = len;
92e873b9 2055 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2056 if (!p) {
2057 pd = IO_MEM_UNASSIGNED;
2058 } else {
2059 pd = p->phys_offset;
2060 }
2061
2062 if (is_write) {
3a7d929e 2063 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2064 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2065 /* XXX: could force cpu_single_env to NULL to avoid
2066 potential bugs */
13eb76e0 2067 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2068 /* 32 bit write access */
c27004ec 2069 val = ldl_p(buf);
a4193c8a 2070 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2071 l = 4;
2072 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2073 /* 16 bit write access */
c27004ec 2074 val = lduw_p(buf);
a4193c8a 2075 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2076 l = 2;
2077 } else {
1c213d19 2078 /* 8 bit write access */
c27004ec 2079 val = ldub_p(buf);
a4193c8a 2080 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2081 l = 1;
2082 }
2083 } else {
b448f2f3
FB
2084 unsigned long addr1;
2085 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2086 /* RAM case */
b448f2f3 2087 ptr = phys_ram_base + addr1;
13eb76e0 2088 memcpy(ptr, buf, l);
3a7d929e
FB
2089 if (!cpu_physical_memory_is_dirty(addr1)) {
2090 /* invalidate code */
2091 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2092 /* set dirty bit */
f23db169
FB
2093 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2094 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2095 }
13eb76e0
FB
2096 }
2097 } else {
2a4188a3
FB
2098 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2099 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2100 /* I/O case */
2101 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2102 if (l >= 4 && ((addr & 3) == 0)) {
2103 /* 32 bit read access */
a4193c8a 2104 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2105 stl_p(buf, val);
13eb76e0
FB
2106 l = 4;
2107 } else if (l >= 2 && ((addr & 1) == 0)) {
2108 /* 16 bit read access */
a4193c8a 2109 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2110 stw_p(buf, val);
13eb76e0
FB
2111 l = 2;
2112 } else {
1c213d19 2113 /* 8 bit read access */
a4193c8a 2114 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2115 stb_p(buf, val);
13eb76e0
FB
2116 l = 1;
2117 }
2118 } else {
2119 /* RAM case */
2120 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2121 (addr & ~TARGET_PAGE_MASK);
2122 memcpy(buf, ptr, l);
2123 }
2124 }
2125 len -= l;
2126 buf += l;
2127 addr += l;
2128 }
2129}
8df1cd07 2130
d0ecd2aa
FB
2131/* used for ROM loading : can write in RAM and ROM */
2132void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2133 const uint8_t *buf, int len)
2134{
2135 int l;
2136 uint8_t *ptr;
2137 target_phys_addr_t page;
2138 unsigned long pd;
2139 PhysPageDesc *p;
2140
2141 while (len > 0) {
2142 page = addr & TARGET_PAGE_MASK;
2143 l = (page + TARGET_PAGE_SIZE) - addr;
2144 if (l > len)
2145 l = len;
2146 p = phys_page_find(page >> TARGET_PAGE_BITS);
2147 if (!p) {
2148 pd = IO_MEM_UNASSIGNED;
2149 } else {
2150 pd = p->phys_offset;
2151 }
2152
2153 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2154 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2155 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2156 /* do nothing */
2157 } else {
2158 unsigned long addr1;
2159 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2160 /* ROM/RAM case */
2161 ptr = phys_ram_base + addr1;
2162 memcpy(ptr, buf, l);
2163 }
2164 len -= l;
2165 buf += l;
2166 addr += l;
2167 }
2168}
2169
2170
8df1cd07
FB
2171/* warning: addr must be aligned */
2172uint32_t ldl_phys(target_phys_addr_t addr)
2173{
2174 int io_index;
2175 uint8_t *ptr;
2176 uint32_t val;
2177 unsigned long pd;
2178 PhysPageDesc *p;
2179
2180 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2181 if (!p) {
2182 pd = IO_MEM_UNASSIGNED;
2183 } else {
2184 pd = p->phys_offset;
2185 }
2186
2a4188a3
FB
2187 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2188 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2189 /* I/O case */
2190 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2191 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2192 } else {
2193 /* RAM case */
2194 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2195 (addr & ~TARGET_PAGE_MASK);
2196 val = ldl_p(ptr);
2197 }
2198 return val;
2199}
2200
84b7b8e7
FB
2201/* warning: addr must be aligned */
2202uint64_t ldq_phys(target_phys_addr_t addr)
2203{
2204 int io_index;
2205 uint8_t *ptr;
2206 uint64_t val;
2207 unsigned long pd;
2208 PhysPageDesc *p;
2209
2210 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2211 if (!p) {
2212 pd = IO_MEM_UNASSIGNED;
2213 } else {
2214 pd = p->phys_offset;
2215 }
2216
2a4188a3
FB
2217 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2218 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2219 /* I/O case */
2220 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2221#ifdef TARGET_WORDS_BIGENDIAN
2222 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2223 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2224#else
2225 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2226 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2227#endif
2228 } else {
2229 /* RAM case */
2230 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2231 (addr & ~TARGET_PAGE_MASK);
2232 val = ldq_p(ptr);
2233 }
2234 return val;
2235}
2236
aab33094
FB
2237/* XXX: optimize */
2238uint32_t ldub_phys(target_phys_addr_t addr)
2239{
2240 uint8_t val;
2241 cpu_physical_memory_read(addr, &val, 1);
2242 return val;
2243}
2244
2245/* XXX: optimize */
2246uint32_t lduw_phys(target_phys_addr_t addr)
2247{
2248 uint16_t val;
2249 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2250 return tswap16(val);
2251}
2252
8df1cd07
FB
2253/* warning: addr must be aligned. The ram page is not masked as dirty
2254 and the code inside is not invalidated. It is useful if the dirty
2255 bits are used to track modified PTEs */
2256void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2257{
2258 int io_index;
2259 uint8_t *ptr;
2260 unsigned long pd;
2261 PhysPageDesc *p;
2262
2263 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2264 if (!p) {
2265 pd = IO_MEM_UNASSIGNED;
2266 } else {
2267 pd = p->phys_offset;
2268 }
2269
3a7d929e 2270 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2271 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2272 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2273 } else {
2274 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2275 (addr & ~TARGET_PAGE_MASK);
2276 stl_p(ptr, val);
2277 }
2278}
2279
2280/* warning: addr must be aligned */
8df1cd07
FB
2281void stl_phys(target_phys_addr_t addr, uint32_t val)
2282{
2283 int io_index;
2284 uint8_t *ptr;
2285 unsigned long pd;
2286 PhysPageDesc *p;
2287
2288 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2289 if (!p) {
2290 pd = IO_MEM_UNASSIGNED;
2291 } else {
2292 pd = p->phys_offset;
2293 }
2294
3a7d929e 2295 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2296 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2297 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2298 } else {
2299 unsigned long addr1;
2300 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2301 /* RAM case */
2302 ptr = phys_ram_base + addr1;
2303 stl_p(ptr, val);
3a7d929e
FB
2304 if (!cpu_physical_memory_is_dirty(addr1)) {
2305 /* invalidate code */
2306 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2307 /* set dirty bit */
f23db169
FB
2308 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2309 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2310 }
8df1cd07
FB
2311 }
2312}
2313
aab33094
FB
2314/* XXX: optimize */
2315void stb_phys(target_phys_addr_t addr, uint32_t val)
2316{
2317 uint8_t v = val;
2318 cpu_physical_memory_write(addr, &v, 1);
2319}
2320
2321/* XXX: optimize */
2322void stw_phys(target_phys_addr_t addr, uint32_t val)
2323{
2324 uint16_t v = tswap16(val);
2325 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2326}
2327
2328/* XXX: optimize */
2329void stq_phys(target_phys_addr_t addr, uint64_t val)
2330{
2331 val = tswap64(val);
2332 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2333}
2334
13eb76e0
FB
2335#endif
2336
2337/* virtual memory access for debug */
b448f2f3
FB
2338int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2339 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2340{
2341 int l;
2342 target_ulong page, phys_addr;
2343
2344 while (len > 0) {
2345 page = addr & TARGET_PAGE_MASK;
2346 phys_addr = cpu_get_phys_page_debug(env, page);
2347 /* if no physical page mapped, return an error */
2348 if (phys_addr == -1)
2349 return -1;
2350 l = (page + TARGET_PAGE_SIZE) - addr;
2351 if (l > len)
2352 l = len;
b448f2f3
FB
2353 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2354 buf, l, is_write);
13eb76e0
FB
2355 len -= l;
2356 buf += l;
2357 addr += l;
2358 }
2359 return 0;
2360}
2361
e3db7226
FB
2362void dump_exec_info(FILE *f,
2363 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2364{
2365 int i, target_code_size, max_target_code_size;
2366 int direct_jmp_count, direct_jmp2_count, cross_page;
2367 TranslationBlock *tb;
2368
2369 target_code_size = 0;
2370 max_target_code_size = 0;
2371 cross_page = 0;
2372 direct_jmp_count = 0;
2373 direct_jmp2_count = 0;
2374 for(i = 0; i < nb_tbs; i++) {
2375 tb = &tbs[i];
2376 target_code_size += tb->size;
2377 if (tb->size > max_target_code_size)
2378 max_target_code_size = tb->size;
2379 if (tb->page_addr[1] != -1)
2380 cross_page++;
2381 if (tb->tb_next_offset[0] != 0xffff) {
2382 direct_jmp_count++;
2383 if (tb->tb_next_offset[1] != 0xffff) {
2384 direct_jmp2_count++;
2385 }
2386 }
2387 }
2388 /* XXX: avoid using doubles ? */
2389 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2390 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2391 nb_tbs ? target_code_size / nb_tbs : 0,
2392 max_target_code_size);
2393 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2394 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2395 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2396 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2397 cross_page,
2398 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2399 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2400 direct_jmp_count,
2401 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2402 direct_jmp2_count,
2403 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2404 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2405 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2406 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2407}
2408
61382a50
FB
2409#if !defined(CONFIG_USER_ONLY)
2410
2411#define MMUSUFFIX _cmmu
2412#define GETPC() NULL
2413#define env cpu_single_env
b769d8fe 2414#define SOFTMMU_CODE_ACCESS
61382a50
FB
2415
2416#define SHIFT 0
2417#include "softmmu_template.h"
2418
2419#define SHIFT 1
2420#include "softmmu_template.h"
2421
2422#define SHIFT 2
2423#include "softmmu_template.h"
2424
2425#define SHIFT 3
2426#include "softmmu_template.h"
2427
2428#undef env
2429
2430#endif