]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
endianness fix
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
54936004 37
fd6ce8f6 38//#define DEBUG_TB_INVALIDATE
66e85a21 39//#define DEBUG_FLUSH
9fa3e853 40//#define DEBUG_TLB
fd6ce8f6
FB
41
42/* make various TB consistency checks */
43//#define DEBUG_TB_CHECK
98857888 44//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
45
46/* threshold to flush the translated code buffer */
47#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48
9fa3e853
FB
49#define SMC_BITMAP_USE_THRESHOLD 10
50
51#define MMAP_AREA_START 0x00000000
52#define MMAP_AREA_END 0xa8000000
fd6ce8f6 53
108c49b8
FB
54#if defined(TARGET_SPARC64)
55#define TARGET_PHYS_ADDR_SPACE_BITS 41
56#elif defined(TARGET_PPC64)
57#define TARGET_PHYS_ADDR_SPACE_BITS 42
58#else
59/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60#define TARGET_PHYS_ADDR_SPACE_BITS 32
61#endif
62
fd6ce8f6
FB
63TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
64TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
9fa3e853 65TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 66int nb_tbs;
eb51d102
FB
67/* any access to the tbs or the page table must use this lock */
68spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 69
b8076a74 70uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
71uint8_t *code_gen_ptr;
72
9fa3e853
FB
73int phys_ram_size;
74int phys_ram_fd;
75uint8_t *phys_ram_base;
1ccde1cb 76uint8_t *phys_ram_dirty;
9fa3e853 77
54936004 78typedef struct PageDesc {
92e873b9 79 /* list of TBs intersecting this ram page */
fd6ce8f6 80 TranslationBlock *first_tb;
9fa3e853
FB
81 /* in order to optimize self modifying code, we count the number
82 of lookups we do to a given page to use a bitmap */
83 unsigned int code_write_count;
84 uint8_t *code_bitmap;
85#if defined(CONFIG_USER_ONLY)
86 unsigned long flags;
87#endif
54936004
FB
88} PageDesc;
89
92e873b9
FB
90typedef struct PhysPageDesc {
91 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 92 uint32_t phys_offset;
92e873b9
FB
93} PhysPageDesc;
94
90f18422
FB
95/* Note: the VirtPage handling is absolete and will be suppressed
96 ASAP */
9fa3e853
FB
97typedef struct VirtPageDesc {
98 /* physical address of code page. It is valid only if 'valid_tag'
99 matches 'virt_valid_tag' */
100 target_ulong phys_addr;
101 unsigned int valid_tag;
102#if !defined(CONFIG_SOFTMMU)
103 /* original page access rights. It is valid only if 'valid_tag'
104 matches 'virt_valid_tag' */
105 unsigned int prot;
106#endif
107} VirtPageDesc;
108
54936004
FB
109#define L2_BITS 10
110#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
111
112#define L1_SIZE (1 << L1_BITS)
113#define L2_SIZE (1 << L2_BITS)
114
33417e70 115static void io_mem_init(void);
fd6ce8f6 116
83fb7adf
FB
117unsigned long qemu_real_host_page_size;
118unsigned long qemu_host_page_bits;
119unsigned long qemu_host_page_size;
120unsigned long qemu_host_page_mask;
54936004 121
92e873b9 122/* XXX: for system emulation, it could just be an array */
54936004 123static PageDesc *l1_map[L1_SIZE];
0a962c02 124PhysPageDesc **l1_phys_map;
54936004 125
9fa3e853 126#if !defined(CONFIG_USER_ONLY)
90f18422
FB
127#if TARGET_LONG_BITS > 32
128#define VIRT_L_BITS 9
129#define VIRT_L_SIZE (1 << VIRT_L_BITS)
130static void *l1_virt_map[VIRT_L_SIZE];
131#else
9fa3e853 132static VirtPageDesc *l1_virt_map[L1_SIZE];
90f18422 133#endif
9fa3e853
FB
134static unsigned int virt_valid_tag;
135#endif
136
33417e70 137/* io memory support */
33417e70
FB
138CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
139CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 140void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
141static int io_mem_nb;
142
34865134
FB
143/* log support */
144char *logfilename = "/tmp/qemu.log";
145FILE *logfile;
146int loglevel;
147
e3db7226
FB
148/* statistics */
149static int tlb_flush_count;
150static int tb_flush_count;
151static int tb_phys_invalidate_count;
152
b346ff46 153static void page_init(void)
54936004 154{
83fb7adf 155 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 156 TARGET_PAGE_SIZE */
67b915a5 157#ifdef _WIN32
d5a8f07c
FB
158 {
159 SYSTEM_INFO system_info;
160 DWORD old_protect;
161
162 GetSystemInfo(&system_info);
163 qemu_real_host_page_size = system_info.dwPageSize;
164
165 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
166 PAGE_EXECUTE_READWRITE, &old_protect);
167 }
67b915a5 168#else
83fb7adf 169 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
170 {
171 unsigned long start, end;
172
173 start = (unsigned long)code_gen_buffer;
174 start &= ~(qemu_real_host_page_size - 1);
175
176 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
177 end += qemu_real_host_page_size - 1;
178 end &= ~(qemu_real_host_page_size - 1);
179
180 mprotect((void *)start, end - start,
181 PROT_READ | PROT_WRITE | PROT_EXEC);
182 }
67b915a5 183#endif
d5a8f07c 184
83fb7adf
FB
185 if (qemu_host_page_size == 0)
186 qemu_host_page_size = qemu_real_host_page_size;
187 if (qemu_host_page_size < TARGET_PAGE_SIZE)
188 qemu_host_page_size = TARGET_PAGE_SIZE;
189 qemu_host_page_bits = 0;
190 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
191 qemu_host_page_bits++;
192 qemu_host_page_mask = ~(qemu_host_page_size - 1);
9fa3e853
FB
193#if !defined(CONFIG_USER_ONLY)
194 virt_valid_tag = 1;
195#endif
108c49b8
FB
196 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
197 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
198}
199
fd6ce8f6 200static inline PageDesc *page_find_alloc(unsigned int index)
54936004 201{
54936004
FB
202 PageDesc **lp, *p;
203
54936004
FB
204 lp = &l1_map[index >> L2_BITS];
205 p = *lp;
206 if (!p) {
207 /* allocate if not found */
59817ccb 208 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 209 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
210 *lp = p;
211 }
212 return p + (index & (L2_SIZE - 1));
213}
214
fd6ce8f6 215static inline PageDesc *page_find(unsigned int index)
54936004 216{
54936004
FB
217 PageDesc *p;
218
54936004
FB
219 p = l1_map[index >> L2_BITS];
220 if (!p)
221 return 0;
fd6ce8f6
FB
222 return p + (index & (L2_SIZE - 1));
223}
224
108c49b8 225static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 226{
108c49b8 227 void **lp, **p;
92e873b9 228
108c49b8
FB
229 p = (void **)l1_phys_map;
230#if TARGET_PHYS_ADDR_SPACE_BITS > 32
231
232#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
233#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
234#endif
235 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
236 p = *lp;
237 if (!p) {
238 /* allocate if not found */
108c49b8
FB
239 if (!alloc)
240 return NULL;
241 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
242 memset(p, 0, sizeof(void *) * L1_SIZE);
243 *lp = p;
244 }
245#endif
246 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
247 p = *lp;
248 if (!p) {
249 /* allocate if not found */
250 if (!alloc)
251 return NULL;
0a962c02 252 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
92e873b9
FB
253 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
254 *lp = p;
255 }
108c49b8 256 return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
92e873b9
FB
257}
258
108c49b8 259static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 260{
108c49b8 261 return phys_page_find_alloc(index, 0);
92e873b9
FB
262}
263
9fa3e853 264#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
265static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,
266 target_ulong vaddr);
267static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
268 target_ulong vaddr);
9fa3e853 269
90f18422 270static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc)
fd6ce8f6 271{
c27004ec 272#if TARGET_LONG_BITS > 32
90f18422
FB
273 void **p, **lp;
274
275 p = l1_virt_map;
276 lp = p + ((index >> (5 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
277 p = *lp;
278 if (!p) {
279 if (!alloc)
280 return NULL;
281 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
282 *lp = p;
283 }
284 lp = p + ((index >> (4 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
285 p = *lp;
286 if (!p) {
287 if (!alloc)
288 return NULL;
289 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
290 *lp = p;
291 }
292 lp = p + ((index >> (3 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
293 p = *lp;
294 if (!p) {
295 if (!alloc)
296 return NULL;
297 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
298 *lp = p;
299 }
300 lp = p + ((index >> (2 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
301 p = *lp;
302 if (!p) {
303 if (!alloc)
304 return NULL;
305 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
306 *lp = p;
307 }
308 lp = p + ((index >> (1 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
309 p = *lp;
310 if (!p) {
311 if (!alloc)
312 return NULL;
313 p = qemu_mallocz(sizeof(VirtPageDesc) * VIRT_L_SIZE);
314 *lp = p;
315 }
316 return ((VirtPageDesc *)p) + (index & (VIRT_L_SIZE - 1));
317#else
318 VirtPageDesc *p, **lp;
319
9fa3e853
FB
320 lp = &l1_virt_map[index >> L2_BITS];
321 p = *lp;
322 if (!p) {
323 /* allocate if not found */
90f18422
FB
324 if (!alloc)
325 return NULL;
326 p = qemu_mallocz(sizeof(VirtPageDesc) * L2_SIZE);
9fa3e853
FB
327 *lp = p;
328 }
329 return p + (index & (L2_SIZE - 1));
90f18422 330#endif
9fa3e853
FB
331}
332
90f18422 333static inline VirtPageDesc *virt_page_find(target_ulong index)
9fa3e853 334{
90f18422
FB
335 return virt_page_find_alloc(index, 0);
336}
9fa3e853 337
90f18422
FB
338#if TARGET_LONG_BITS > 32
339static void virt_page_flush_internal(void **p, int level)
340{
341 int i;
342 if (level == 0) {
343 VirtPageDesc *q = (VirtPageDesc *)p;
344 for(i = 0; i < VIRT_L_SIZE; i++)
345 q[i].valid_tag = 0;
346 } else {
347 level--;
348 for(i = 0; i < VIRT_L_SIZE; i++) {
349 if (p[i])
350 virt_page_flush_internal(p[i], level);
351 }
352 }
54936004 353}
90f18422 354#endif
54936004 355
9fa3e853 356static void virt_page_flush(void)
54936004 357{
9fa3e853
FB
358 virt_valid_tag++;
359
360 if (virt_valid_tag == 0) {
361 virt_valid_tag = 1;
90f18422
FB
362#if TARGET_LONG_BITS > 32
363 virt_page_flush_internal(l1_virt_map, 5);
364#else
365 {
366 int i, j;
367 VirtPageDesc *p;
368 for(i = 0; i < L1_SIZE; i++) {
369 p = l1_virt_map[i];
370 if (p) {
371 for(j = 0; j < L2_SIZE; j++)
372 p[j].valid_tag = 0;
373 }
9fa3e853 374 }
fd6ce8f6 375 }
90f18422 376#endif
54936004
FB
377 }
378}
9fa3e853
FB
379#else
380static void virt_page_flush(void)
381{
382}
383#endif
fd6ce8f6 384
b346ff46 385void cpu_exec_init(void)
fd6ce8f6
FB
386{
387 if (!code_gen_ptr) {
388 code_gen_ptr = code_gen_buffer;
b346ff46 389 page_init();
33417e70 390 io_mem_init();
fd6ce8f6
FB
391 }
392}
393
9fa3e853
FB
394static inline void invalidate_page_bitmap(PageDesc *p)
395{
396 if (p->code_bitmap) {
59817ccb 397 qemu_free(p->code_bitmap);
9fa3e853
FB
398 p->code_bitmap = NULL;
399 }
400 p->code_write_count = 0;
401}
402
fd6ce8f6
FB
403/* set to NULL all the 'first_tb' fields in all PageDescs */
404static void page_flush_tb(void)
405{
406 int i, j;
407 PageDesc *p;
408
409 for(i = 0; i < L1_SIZE; i++) {
410 p = l1_map[i];
411 if (p) {
9fa3e853
FB
412 for(j = 0; j < L2_SIZE; j++) {
413 p->first_tb = NULL;
414 invalidate_page_bitmap(p);
415 p++;
416 }
fd6ce8f6
FB
417 }
418 }
419}
420
421/* flush all the translation blocks */
d4e8164f 422/* XXX: tb_flush is currently not thread safe */
0124311e 423void tb_flush(CPUState *env)
fd6ce8f6 424{
0124311e 425#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
426 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
427 code_gen_ptr - code_gen_buffer,
428 nb_tbs,
0124311e 429 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
430#endif
431 nb_tbs = 0;
8a8a608f 432 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
9fa3e853
FB
433 virt_page_flush();
434
8a8a608f 435 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 436 page_flush_tb();
9fa3e853 437
fd6ce8f6 438 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
439 /* XXX: flush processor icache at this point if cache flush is
440 expensive */
e3db7226 441 tb_flush_count++;
fd6ce8f6
FB
442}
443
444#ifdef DEBUG_TB_CHECK
445
446static void tb_invalidate_check(unsigned long address)
447{
448 TranslationBlock *tb;
449 int i;
450 address &= TARGET_PAGE_MASK;
451 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
452 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
453 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
454 address >= tb->pc + tb->size)) {
455 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
456 address, tb->pc, tb->size);
457 }
458 }
459 }
460}
461
462/* verify that all the pages have correct rights for code */
463static void tb_page_check(void)
464{
465 TranslationBlock *tb;
466 int i, flags1, flags2;
467
468 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
469 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
470 flags1 = page_get_flags(tb->pc);
471 flags2 = page_get_flags(tb->pc + tb->size - 1);
472 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
473 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
474 tb->pc, tb->size, flags1, flags2);
475 }
476 }
477 }
478}
479
d4e8164f
FB
480void tb_jmp_check(TranslationBlock *tb)
481{
482 TranslationBlock *tb1;
483 unsigned int n1;
484
485 /* suppress any remaining jumps to this TB */
486 tb1 = tb->jmp_first;
487 for(;;) {
488 n1 = (long)tb1 & 3;
489 tb1 = (TranslationBlock *)((long)tb1 & ~3);
490 if (n1 == 2)
491 break;
492 tb1 = tb1->jmp_next[n1];
493 }
494 /* check end of list */
495 if (tb1 != tb) {
496 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
497 }
498}
499
fd6ce8f6
FB
500#endif
501
502/* invalidate one TB */
503static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
504 int next_offset)
505{
506 TranslationBlock *tb1;
507 for(;;) {
508 tb1 = *ptb;
509 if (tb1 == tb) {
510 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
511 break;
512 }
513 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
514 }
515}
516
9fa3e853
FB
517static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
518{
519 TranslationBlock *tb1;
520 unsigned int n1;
521
522 for(;;) {
523 tb1 = *ptb;
524 n1 = (long)tb1 & 3;
525 tb1 = (TranslationBlock *)((long)tb1 & ~3);
526 if (tb1 == tb) {
527 *ptb = tb1->page_next[n1];
528 break;
529 }
530 ptb = &tb1->page_next[n1];
531 }
532}
533
d4e8164f
FB
534static inline void tb_jmp_remove(TranslationBlock *tb, int n)
535{
536 TranslationBlock *tb1, **ptb;
537 unsigned int n1;
538
539 ptb = &tb->jmp_next[n];
540 tb1 = *ptb;
541 if (tb1) {
542 /* find tb(n) in circular list */
543 for(;;) {
544 tb1 = *ptb;
545 n1 = (long)tb1 & 3;
546 tb1 = (TranslationBlock *)((long)tb1 & ~3);
547 if (n1 == n && tb1 == tb)
548 break;
549 if (n1 == 2) {
550 ptb = &tb1->jmp_first;
551 } else {
552 ptb = &tb1->jmp_next[n1];
553 }
554 }
555 /* now we can suppress tb(n) from the list */
556 *ptb = tb->jmp_next[n];
557
558 tb->jmp_next[n] = NULL;
559 }
560}
561
562/* reset the jump entry 'n' of a TB so that it is not chained to
563 another TB */
564static inline void tb_reset_jump(TranslationBlock *tb, int n)
565{
566 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
567}
568
9fa3e853 569static inline void tb_invalidate(TranslationBlock *tb)
fd6ce8f6 570{
d4e8164f 571 unsigned int h, n1;
9fa3e853 572 TranslationBlock *tb1, *tb2, **ptb;
d4e8164f 573
36bdbe54 574 tb_invalidated_flag = 1;
59817ccb 575
fd6ce8f6
FB
576 /* remove the TB from the hash list */
577 h = tb_hash_func(tb->pc);
9fa3e853
FB
578 ptb = &tb_hash[h];
579 for(;;) {
580 tb1 = *ptb;
581 /* NOTE: the TB is not necessarily linked in the hash. It
582 indicates that it is not currently used */
583 if (tb1 == NULL)
584 return;
585 if (tb1 == tb) {
586 *ptb = tb1->hash_next;
587 break;
588 }
589 ptb = &tb1->hash_next;
fd6ce8f6 590 }
d4e8164f
FB
591
592 /* suppress this TB from the two jump lists */
593 tb_jmp_remove(tb, 0);
594 tb_jmp_remove(tb, 1);
595
596 /* suppress any remaining jumps to this TB */
597 tb1 = tb->jmp_first;
598 for(;;) {
599 n1 = (long)tb1 & 3;
600 if (n1 == 2)
601 break;
602 tb1 = (TranslationBlock *)((long)tb1 & ~3);
603 tb2 = tb1->jmp_next[n1];
604 tb_reset_jump(tb1, n1);
605 tb1->jmp_next[n1] = NULL;
606 tb1 = tb2;
607 }
608 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
fd6ce8f6
FB
609}
610
9fa3e853 611static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 612{
fd6ce8f6 613 PageDesc *p;
9fa3e853
FB
614 unsigned int h;
615 target_ulong phys_pc;
616
617 /* remove the TB from the hash list */
618 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
619 h = tb_phys_hash_func(phys_pc);
620 tb_remove(&tb_phys_hash[h], tb,
621 offsetof(TranslationBlock, phys_hash_next));
622
623 /* remove the TB from the page list */
624 if (tb->page_addr[0] != page_addr) {
625 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
626 tb_page_remove(&p->first_tb, tb);
627 invalidate_page_bitmap(p);
628 }
629 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
630 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
631 tb_page_remove(&p->first_tb, tb);
632 invalidate_page_bitmap(p);
633 }
634
635 tb_invalidate(tb);
e3db7226 636 tb_phys_invalidate_count++;
9fa3e853
FB
637}
638
639static inline void set_bits(uint8_t *tab, int start, int len)
640{
641 int end, mask, end1;
642
643 end = start + len;
644 tab += start >> 3;
645 mask = 0xff << (start & 7);
646 if ((start & ~7) == (end & ~7)) {
647 if (start < end) {
648 mask &= ~(0xff << (end & 7));
649 *tab |= mask;
650 }
651 } else {
652 *tab++ |= mask;
653 start = (start + 8) & ~7;
654 end1 = end & ~7;
655 while (start < end1) {
656 *tab++ = 0xff;
657 start += 8;
658 }
659 if (start < end) {
660 mask = ~(0xff << (end & 7));
661 *tab |= mask;
662 }
663 }
664}
665
666static void build_page_bitmap(PageDesc *p)
667{
668 int n, tb_start, tb_end;
669 TranslationBlock *tb;
670
59817ccb 671 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
672 if (!p->code_bitmap)
673 return;
674 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
675
676 tb = p->first_tb;
677 while (tb != NULL) {
678 n = (long)tb & 3;
679 tb = (TranslationBlock *)((long)tb & ~3);
680 /* NOTE: this is subtle as a TB may span two physical pages */
681 if (n == 0) {
682 /* NOTE: tb_end may be after the end of the page, but
683 it is not a problem */
684 tb_start = tb->pc & ~TARGET_PAGE_MASK;
685 tb_end = tb_start + tb->size;
686 if (tb_end > TARGET_PAGE_SIZE)
687 tb_end = TARGET_PAGE_SIZE;
688 } else {
689 tb_start = 0;
690 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
691 }
692 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
693 tb = tb->page_next[n];
694 }
695}
696
d720b93d
FB
697#ifdef TARGET_HAS_PRECISE_SMC
698
699static void tb_gen_code(CPUState *env,
700 target_ulong pc, target_ulong cs_base, int flags,
701 int cflags)
702{
703 TranslationBlock *tb;
704 uint8_t *tc_ptr;
705 target_ulong phys_pc, phys_page2, virt_page2;
706 int code_gen_size;
707
c27004ec
FB
708 phys_pc = get_phys_addr_code(env, pc);
709 tb = tb_alloc(pc);
d720b93d
FB
710 if (!tb) {
711 /* flush must be done */
712 tb_flush(env);
713 /* cannot fail at this point */
c27004ec 714 tb = tb_alloc(pc);
d720b93d
FB
715 }
716 tc_ptr = code_gen_ptr;
717 tb->tc_ptr = tc_ptr;
718 tb->cs_base = cs_base;
719 tb->flags = flags;
720 tb->cflags = cflags;
721 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
722 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
723
724 /* check next page if needed */
c27004ec 725 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 726 phys_page2 = -1;
c27004ec 727 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
728 phys_page2 = get_phys_addr_code(env, virt_page2);
729 }
730 tb_link_phys(tb, phys_pc, phys_page2);
731}
732#endif
733
9fa3e853
FB
734/* invalidate all TBs which intersect with the target physical page
735 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
736 the same physical page. 'is_cpu_write_access' should be true if called
737 from a real cpu write access: the virtual CPU will exit the current
738 TB if code is modified inside this TB. */
739void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
740 int is_cpu_write_access)
741{
742 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 743 CPUState *env = cpu_single_env;
9fa3e853 744 PageDesc *p;
ea1c1802 745 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 746 target_ulong tb_start, tb_end;
d720b93d 747 target_ulong current_pc, current_cs_base;
9fa3e853
FB
748
749 p = page_find(start >> TARGET_PAGE_BITS);
750 if (!p)
751 return;
752 if (!p->code_bitmap &&
d720b93d
FB
753 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
754 is_cpu_write_access) {
9fa3e853
FB
755 /* build code bitmap */
756 build_page_bitmap(p);
757 }
758
759 /* we remove all the TBs in the range [start, end[ */
760 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
761 current_tb_not_found = is_cpu_write_access;
762 current_tb_modified = 0;
763 current_tb = NULL; /* avoid warning */
764 current_pc = 0; /* avoid warning */
765 current_cs_base = 0; /* avoid warning */
766 current_flags = 0; /* avoid warning */
9fa3e853
FB
767 tb = p->first_tb;
768 while (tb != NULL) {
769 n = (long)tb & 3;
770 tb = (TranslationBlock *)((long)tb & ~3);
771 tb_next = tb->page_next[n];
772 /* NOTE: this is subtle as a TB may span two physical pages */
773 if (n == 0) {
774 /* NOTE: tb_end may be after the end of the page, but
775 it is not a problem */
776 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
777 tb_end = tb_start + tb->size;
778 } else {
779 tb_start = tb->page_addr[1];
780 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
781 }
782 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
783#ifdef TARGET_HAS_PRECISE_SMC
784 if (current_tb_not_found) {
785 current_tb_not_found = 0;
786 current_tb = NULL;
787 if (env->mem_write_pc) {
788 /* now we have a real cpu fault */
789 current_tb = tb_find_pc(env->mem_write_pc);
790 }
791 }
792 if (current_tb == tb &&
793 !(current_tb->cflags & CF_SINGLE_INSN)) {
794 /* If we are modifying the current TB, we must stop
795 its execution. We could be more precise by checking
796 that the modification is after the current PC, but it
797 would require a specialized function to partially
798 restore the CPU state */
799
800 current_tb_modified = 1;
801 cpu_restore_state(current_tb, env,
802 env->mem_write_pc, NULL);
803#if defined(TARGET_I386)
804 current_flags = env->hflags;
805 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
806 current_cs_base = (target_ulong)env->segs[R_CS].base;
807 current_pc = current_cs_base + env->eip;
808#else
809#error unsupported CPU
810#endif
811 }
812#endif /* TARGET_HAS_PRECISE_SMC */
ea1c1802
FB
813 saved_tb = env->current_tb;
814 env->current_tb = NULL;
9fa3e853 815 tb_phys_invalidate(tb, -1);
ea1c1802
FB
816 env->current_tb = saved_tb;
817 if (env->interrupt_request && env->current_tb)
818 cpu_interrupt(env, env->interrupt_request);
9fa3e853
FB
819 }
820 tb = tb_next;
821 }
822#if !defined(CONFIG_USER_ONLY)
823 /* if no code remaining, no need to continue to use slow writes */
824 if (!p->first_tb) {
825 invalidate_page_bitmap(p);
d720b93d
FB
826 if (is_cpu_write_access) {
827 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
828 }
829 }
830#endif
831#ifdef TARGET_HAS_PRECISE_SMC
832 if (current_tb_modified) {
833 /* we generate a block containing just the instruction
834 modifying the memory. It will ensure that it cannot modify
835 itself */
ea1c1802 836 env->current_tb = NULL;
d720b93d
FB
837 tb_gen_code(env, current_pc, current_cs_base, current_flags,
838 CF_SINGLE_INSN);
839 cpu_resume_from_signal(env, NULL);
9fa3e853 840 }
fd6ce8f6 841#endif
9fa3e853 842}
fd6ce8f6 843
9fa3e853 844/* len must be <= 8 and start must be a multiple of len */
d720b93d 845static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
846{
847 PageDesc *p;
848 int offset, b;
59817ccb 849#if 0
a4193c8a
FB
850 if (1) {
851 if (loglevel) {
852 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
853 cpu_single_env->mem_write_vaddr, len,
854 cpu_single_env->eip,
855 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
856 }
59817ccb
FB
857 }
858#endif
9fa3e853
FB
859 p = page_find(start >> TARGET_PAGE_BITS);
860 if (!p)
861 return;
862 if (p->code_bitmap) {
863 offset = start & ~TARGET_PAGE_MASK;
864 b = p->code_bitmap[offset >> 3] >> (offset & 7);
865 if (b & ((1 << len) - 1))
866 goto do_invalidate;
867 } else {
868 do_invalidate:
d720b93d 869 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
870 }
871}
872
9fa3e853 873#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
874static void tb_invalidate_phys_page(target_ulong addr,
875 unsigned long pc, void *puc)
9fa3e853 876{
d720b93d
FB
877 int n, current_flags, current_tb_modified;
878 target_ulong current_pc, current_cs_base;
9fa3e853 879 PageDesc *p;
d720b93d
FB
880 TranslationBlock *tb, *current_tb;
881#ifdef TARGET_HAS_PRECISE_SMC
882 CPUState *env = cpu_single_env;
883#endif
9fa3e853
FB
884
885 addr &= TARGET_PAGE_MASK;
886 p = page_find(addr >> TARGET_PAGE_BITS);
887 if (!p)
888 return;
889 tb = p->first_tb;
d720b93d
FB
890 current_tb_modified = 0;
891 current_tb = NULL;
892 current_pc = 0; /* avoid warning */
893 current_cs_base = 0; /* avoid warning */
894 current_flags = 0; /* avoid warning */
895#ifdef TARGET_HAS_PRECISE_SMC
896 if (tb && pc != 0) {
897 current_tb = tb_find_pc(pc);
898 }
899#endif
9fa3e853
FB
900 while (tb != NULL) {
901 n = (long)tb & 3;
902 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
903#ifdef TARGET_HAS_PRECISE_SMC
904 if (current_tb == tb &&
905 !(current_tb->cflags & CF_SINGLE_INSN)) {
906 /* If we are modifying the current TB, we must stop
907 its execution. We could be more precise by checking
908 that the modification is after the current PC, but it
909 would require a specialized function to partially
910 restore the CPU state */
911
912 current_tb_modified = 1;
913 cpu_restore_state(current_tb, env, pc, puc);
914#if defined(TARGET_I386)
915 current_flags = env->hflags;
916 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
917 current_cs_base = (target_ulong)env->segs[R_CS].base;
918 current_pc = current_cs_base + env->eip;
919#else
920#error unsupported CPU
921#endif
922 }
923#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
924 tb_phys_invalidate(tb, addr);
925 tb = tb->page_next[n];
926 }
fd6ce8f6 927 p->first_tb = NULL;
d720b93d
FB
928#ifdef TARGET_HAS_PRECISE_SMC
929 if (current_tb_modified) {
930 /* we generate a block containing just the instruction
931 modifying the memory. It will ensure that it cannot modify
932 itself */
ea1c1802 933 env->current_tb = NULL;
d720b93d
FB
934 tb_gen_code(env, current_pc, current_cs_base, current_flags,
935 CF_SINGLE_INSN);
936 cpu_resume_from_signal(env, puc);
937 }
938#endif
fd6ce8f6 939}
9fa3e853 940#endif
fd6ce8f6
FB
941
942/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
943static inline void tb_alloc_page(TranslationBlock *tb,
944 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
945{
946 PageDesc *p;
9fa3e853
FB
947 TranslationBlock *last_first_tb;
948
949 tb->page_addr[n] = page_addr;
3a7d929e 950 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
951 tb->page_next[n] = p->first_tb;
952 last_first_tb = p->first_tb;
953 p->first_tb = (TranslationBlock *)((long)tb | n);
954 invalidate_page_bitmap(p);
fd6ce8f6 955
107db443 956#if defined(TARGET_HAS_SMC) || 1
d720b93d 957
9fa3e853 958#if defined(CONFIG_USER_ONLY)
fd6ce8f6 959 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
960 unsigned long host_start, host_end, addr;
961 int prot;
962
fd6ce8f6
FB
963 /* force the host page as non writable (writes will have a
964 page fault + mprotect overhead) */
83fb7adf
FB
965 host_start = page_addr & qemu_host_page_mask;
966 host_end = host_start + qemu_host_page_size;
fd6ce8f6
FB
967 prot = 0;
968 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
969 prot |= page_get_flags(addr);
83fb7adf 970 mprotect((void *)host_start, qemu_host_page_size,
fd6ce8f6
FB
971 (prot & PAGE_BITS) & ~PAGE_WRITE);
972#ifdef DEBUG_TB_INVALIDATE
973 printf("protecting code page: 0x%08lx\n",
974 host_start);
975#endif
976 p->flags &= ~PAGE_WRITE;
fd6ce8f6 977 }
9fa3e853
FB
978#else
979 /* if some code is already present, then the pages are already
980 protected. So we handle the case where only the first TB is
981 allocated in a physical page */
982 if (!last_first_tb) {
983 target_ulong virt_addr;
984
985 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
3a7d929e 986 tlb_protect_code(cpu_single_env, page_addr, virt_addr);
9fa3e853
FB
987 }
988#endif
d720b93d
FB
989
990#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
991}
992
993/* Allocate a new translation block. Flush the translation buffer if
994 too many translation blocks or too much generated code. */
c27004ec 995TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
996{
997 TranslationBlock *tb;
fd6ce8f6
FB
998
999 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
1000 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 1001 return NULL;
fd6ce8f6
FB
1002 tb = &tbs[nb_tbs++];
1003 tb->pc = pc;
b448f2f3 1004 tb->cflags = 0;
d4e8164f
FB
1005 return tb;
1006}
1007
9fa3e853
FB
1008/* add a new TB and link it to the physical page tables. phys_page2 is
1009 (-1) to indicate that only one page contains the TB. */
1010void tb_link_phys(TranslationBlock *tb,
1011 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1012{
9fa3e853
FB
1013 unsigned int h;
1014 TranslationBlock **ptb;
1015
1016 /* add in the physical hash table */
1017 h = tb_phys_hash_func(phys_pc);
1018 ptb = &tb_phys_hash[h];
1019 tb->phys_hash_next = *ptb;
1020 *ptb = tb;
fd6ce8f6
FB
1021
1022 /* add in the page list */
9fa3e853
FB
1023 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1024 if (phys_page2 != -1)
1025 tb_alloc_page(tb, 1, phys_page2);
1026 else
1027 tb->page_addr[1] = -1;
61382a50
FB
1028#ifdef DEBUG_TB_CHECK
1029 tb_page_check();
1030#endif
9fa3e853
FB
1031}
1032
1033/* link the tb with the other TBs */
1034void tb_link(TranslationBlock *tb)
1035{
1036#if !defined(CONFIG_USER_ONLY)
1037 {
1038 VirtPageDesc *vp;
1039 target_ulong addr;
1040
1041 /* save the code memory mappings (needed to invalidate the code) */
1042 addr = tb->pc & TARGET_PAGE_MASK;
90f18422 1043 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
98857888
FB
1044#ifdef DEBUG_TLB_CHECK
1045 if (vp->valid_tag == virt_valid_tag &&
1046 vp->phys_addr != tb->page_addr[0]) {
1047 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1048 addr, tb->page_addr[0], vp->phys_addr);
1049 }
1050#endif
9fa3e853 1051 vp->phys_addr = tb->page_addr[0];
59817ccb
FB
1052 if (vp->valid_tag != virt_valid_tag) {
1053 vp->valid_tag = virt_valid_tag;
1054#if !defined(CONFIG_SOFTMMU)
1055 vp->prot = 0;
1056#endif
1057 }
9fa3e853
FB
1058
1059 if (tb->page_addr[1] != -1) {
1060 addr += TARGET_PAGE_SIZE;
90f18422 1061 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
98857888
FB
1062#ifdef DEBUG_TLB_CHECK
1063 if (vp->valid_tag == virt_valid_tag &&
1064 vp->phys_addr != tb->page_addr[1]) {
1065 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1066 addr, tb->page_addr[1], vp->phys_addr);
1067 }
1068#endif
9fa3e853 1069 vp->phys_addr = tb->page_addr[1];
59817ccb
FB
1070 if (vp->valid_tag != virt_valid_tag) {
1071 vp->valid_tag = virt_valid_tag;
1072#if !defined(CONFIG_SOFTMMU)
1073 vp->prot = 0;
1074#endif
1075 }
9fa3e853
FB
1076 }
1077 }
1078#endif
1079
d4e8164f
FB
1080 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1081 tb->jmp_next[0] = NULL;
1082 tb->jmp_next[1] = NULL;
b448f2f3
FB
1083#ifdef USE_CODE_COPY
1084 tb->cflags &= ~CF_FP_USED;
1085 if (tb->cflags & CF_TB_FP_USED)
1086 tb->cflags |= CF_FP_USED;
1087#endif
d4e8164f
FB
1088
1089 /* init original jump addresses */
1090 if (tb->tb_next_offset[0] != 0xffff)
1091 tb_reset_jump(tb, 0);
1092 if (tb->tb_next_offset[1] != 0xffff)
1093 tb_reset_jump(tb, 1);
fd6ce8f6
FB
1094}
1095
9fa3e853
FB
1096/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1097 tb[1].tc_ptr. Return NULL if not found */
1098TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1099{
9fa3e853
FB
1100 int m_min, m_max, m;
1101 unsigned long v;
1102 TranslationBlock *tb;
a513fe19
FB
1103
1104 if (nb_tbs <= 0)
1105 return NULL;
1106 if (tc_ptr < (unsigned long)code_gen_buffer ||
1107 tc_ptr >= (unsigned long)code_gen_ptr)
1108 return NULL;
1109 /* binary search (cf Knuth) */
1110 m_min = 0;
1111 m_max = nb_tbs - 1;
1112 while (m_min <= m_max) {
1113 m = (m_min + m_max) >> 1;
1114 tb = &tbs[m];
1115 v = (unsigned long)tb->tc_ptr;
1116 if (v == tc_ptr)
1117 return tb;
1118 else if (tc_ptr < v) {
1119 m_max = m - 1;
1120 } else {
1121 m_min = m + 1;
1122 }
1123 }
1124 return &tbs[m_max];
1125}
7501267e 1126
ea041c0e
FB
1127static void tb_reset_jump_recursive(TranslationBlock *tb);
1128
1129static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1130{
1131 TranslationBlock *tb1, *tb_next, **ptb;
1132 unsigned int n1;
1133
1134 tb1 = tb->jmp_next[n];
1135 if (tb1 != NULL) {
1136 /* find head of list */
1137 for(;;) {
1138 n1 = (long)tb1 & 3;
1139 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1140 if (n1 == 2)
1141 break;
1142 tb1 = tb1->jmp_next[n1];
1143 }
1144 /* we are now sure now that tb jumps to tb1 */
1145 tb_next = tb1;
1146
1147 /* remove tb from the jmp_first list */
1148 ptb = &tb_next->jmp_first;
1149 for(;;) {
1150 tb1 = *ptb;
1151 n1 = (long)tb1 & 3;
1152 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1153 if (n1 == n && tb1 == tb)
1154 break;
1155 ptb = &tb1->jmp_next[n1];
1156 }
1157 *ptb = tb->jmp_next[n];
1158 tb->jmp_next[n] = NULL;
1159
1160 /* suppress the jump to next tb in generated code */
1161 tb_reset_jump(tb, n);
1162
0124311e 1163 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1164 tb_reset_jump_recursive(tb_next);
1165 }
1166}
1167
1168static void tb_reset_jump_recursive(TranslationBlock *tb)
1169{
1170 tb_reset_jump_recursive2(tb, 0);
1171 tb_reset_jump_recursive2(tb, 1);
1172}
1173
1fddef4b 1174#if defined(TARGET_HAS_ICE)
d720b93d
FB
1175static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1176{
1177 target_ulong phys_addr;
1178
1179 phys_addr = cpu_get_phys_page_debug(env, pc);
1180 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1181}
c27004ec 1182#endif
d720b93d 1183
c33a346e
FB
1184/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1185 breakpoint is reached */
2e12669a 1186int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1187{
1fddef4b 1188#if defined(TARGET_HAS_ICE)
4c3a88a2 1189 int i;
d720b93d 1190
4c3a88a2
FB
1191 for(i = 0; i < env->nb_breakpoints; i++) {
1192 if (env->breakpoints[i] == pc)
1193 return 0;
1194 }
1195
1196 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1197 return -1;
1198 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1199
1200 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1201 return 0;
1202#else
1203 return -1;
1204#endif
1205}
1206
1207/* remove a breakpoint */
2e12669a 1208int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1209{
1fddef4b 1210#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1211 int i;
1212 for(i = 0; i < env->nb_breakpoints; i++) {
1213 if (env->breakpoints[i] == pc)
1214 goto found;
1215 }
1216 return -1;
1217 found:
4c3a88a2 1218 env->nb_breakpoints--;
1fddef4b
FB
1219 if (i < env->nb_breakpoints)
1220 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1221
1222 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1223 return 0;
1224#else
1225 return -1;
1226#endif
1227}
1228
c33a346e
FB
1229/* enable or disable single step mode. EXCP_DEBUG is returned by the
1230 CPU loop after each instruction */
1231void cpu_single_step(CPUState *env, int enabled)
1232{
1fddef4b 1233#if defined(TARGET_HAS_ICE)
c33a346e
FB
1234 if (env->singlestep_enabled != enabled) {
1235 env->singlestep_enabled = enabled;
1236 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1237 /* XXX: only flush what is necessary */
0124311e 1238 tb_flush(env);
c33a346e
FB
1239 }
1240#endif
1241}
1242
34865134
FB
1243/* enable or disable low levels log */
1244void cpu_set_log(int log_flags)
1245{
1246 loglevel = log_flags;
1247 if (loglevel && !logfile) {
1248 logfile = fopen(logfilename, "w");
1249 if (!logfile) {
1250 perror(logfilename);
1251 _exit(1);
1252 }
9fa3e853
FB
1253#if !defined(CONFIG_SOFTMMU)
1254 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1255 {
1256 static uint8_t logfile_buf[4096];
1257 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1258 }
1259#else
34865134 1260 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1261#endif
34865134
FB
1262 }
1263}
1264
1265void cpu_set_log_filename(const char *filename)
1266{
1267 logfilename = strdup(filename);
1268}
c33a346e 1269
0124311e 1270/* mask must never be zero, except for A20 change call */
68a79315 1271void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1272{
1273 TranslationBlock *tb;
ee8b7021 1274 static int interrupt_lock;
59817ccb 1275
68a79315 1276 env->interrupt_request |= mask;
ea041c0e
FB
1277 /* if the cpu is currently executing code, we must unlink it and
1278 all the potentially executing TB */
1279 tb = env->current_tb;
ee8b7021
FB
1280 if (tb && !testandset(&interrupt_lock)) {
1281 env->current_tb = NULL;
ea041c0e 1282 tb_reset_jump_recursive(tb);
ee8b7021 1283 interrupt_lock = 0;
ea041c0e
FB
1284 }
1285}
1286
b54ad049
FB
1287void cpu_reset_interrupt(CPUState *env, int mask)
1288{
1289 env->interrupt_request &= ~mask;
1290}
1291
f193c797
FB
1292CPULogItem cpu_log_items[] = {
1293 { CPU_LOG_TB_OUT_ASM, "out_asm",
1294 "show generated host assembly code for each compiled TB" },
1295 { CPU_LOG_TB_IN_ASM, "in_asm",
1296 "show target assembly code for each compiled TB" },
1297 { CPU_LOG_TB_OP, "op",
1298 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1299#ifdef TARGET_I386
1300 { CPU_LOG_TB_OP_OPT, "op_opt",
1301 "show micro ops after optimization for each compiled TB" },
1302#endif
1303 { CPU_LOG_INT, "int",
1304 "show interrupts/exceptions in short format" },
1305 { CPU_LOG_EXEC, "exec",
1306 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1307 { CPU_LOG_TB_CPU, "cpu",
1308 "show CPU state before bloc translation" },
f193c797
FB
1309#ifdef TARGET_I386
1310 { CPU_LOG_PCALL, "pcall",
1311 "show protected mode far calls/returns/exceptions" },
1312#endif
8e3a9fd2 1313#ifdef DEBUG_IOPORT
fd872598
FB
1314 { CPU_LOG_IOPORT, "ioport",
1315 "show all i/o ports accesses" },
8e3a9fd2 1316#endif
f193c797
FB
1317 { 0, NULL, NULL },
1318};
1319
1320static int cmp1(const char *s1, int n, const char *s2)
1321{
1322 if (strlen(s2) != n)
1323 return 0;
1324 return memcmp(s1, s2, n) == 0;
1325}
1326
1327/* takes a comma separated list of log masks. Return 0 if error. */
1328int cpu_str_to_log_mask(const char *str)
1329{
1330 CPULogItem *item;
1331 int mask;
1332 const char *p, *p1;
1333
1334 p = str;
1335 mask = 0;
1336 for(;;) {
1337 p1 = strchr(p, ',');
1338 if (!p1)
1339 p1 = p + strlen(p);
8e3a9fd2
FB
1340 if(cmp1(p,p1-p,"all")) {
1341 for(item = cpu_log_items; item->mask != 0; item++) {
1342 mask |= item->mask;
1343 }
1344 } else {
f193c797
FB
1345 for(item = cpu_log_items; item->mask != 0; item++) {
1346 if (cmp1(p, p1 - p, item->name))
1347 goto found;
1348 }
1349 return 0;
8e3a9fd2 1350 }
f193c797
FB
1351 found:
1352 mask |= item->mask;
1353 if (*p1 != ',')
1354 break;
1355 p = p1 + 1;
1356 }
1357 return mask;
1358}
ea041c0e 1359
7501267e
FB
1360void cpu_abort(CPUState *env, const char *fmt, ...)
1361{
1362 va_list ap;
1363
1364 va_start(ap, fmt);
1365 fprintf(stderr, "qemu: fatal: ");
1366 vfprintf(stderr, fmt, ap);
1367 fprintf(stderr, "\n");
1368#ifdef TARGET_I386
7fe48483
FB
1369 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1370#else
1371 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1372#endif
1373 va_end(ap);
1374 abort();
1375}
1376
0124311e
FB
1377#if !defined(CONFIG_USER_ONLY)
1378
ee8b7021
FB
1379/* NOTE: if flush_global is true, also flush global entries (not
1380 implemented yet) */
1381void tlb_flush(CPUState *env, int flush_global)
33417e70 1382{
33417e70 1383 int i;
0124311e 1384
9fa3e853
FB
1385#if defined(DEBUG_TLB)
1386 printf("tlb_flush:\n");
1387#endif
0124311e
FB
1388 /* must reset current TB so that interrupts cannot modify the
1389 links while we are modifying them */
1390 env->current_tb = NULL;
1391
33417e70
FB
1392 for(i = 0; i < CPU_TLB_SIZE; i++) {
1393 env->tlb_read[0][i].address = -1;
1394 env->tlb_write[0][i].address = -1;
1395 env->tlb_read[1][i].address = -1;
1396 env->tlb_write[1][i].address = -1;
1397 }
9fa3e853
FB
1398
1399 virt_page_flush();
8a8a608f 1400 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
9fa3e853
FB
1401
1402#if !defined(CONFIG_SOFTMMU)
1403 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1404#endif
1405#ifdef USE_KQEMU
1406 if (env->kqemu_enabled) {
1407 kqemu_flush(env, flush_global);
1408 }
9fa3e853 1409#endif
e3db7226 1410 tlb_flush_count++;
33417e70
FB
1411}
1412
274da6b2 1413static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50
FB
1414{
1415 if (addr == (tlb_entry->address &
1416 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1417 tlb_entry->address = -1;
1418}
1419
2e12669a 1420void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1421{
9fa3e853
FB
1422 int i, n;
1423 VirtPageDesc *vp;
1424 PageDesc *p;
1425 TranslationBlock *tb;
0124311e 1426
9fa3e853 1427#if defined(DEBUG_TLB)
108c49b8 1428 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1429#endif
0124311e
FB
1430 /* must reset current TB so that interrupts cannot modify the
1431 links while we are modifying them */
1432 env->current_tb = NULL;
61382a50
FB
1433
1434 addr &= TARGET_PAGE_MASK;
1435 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1436 tlb_flush_entry(&env->tlb_read[0][i], addr);
1437 tlb_flush_entry(&env->tlb_write[0][i], addr);
1438 tlb_flush_entry(&env->tlb_read[1][i], addr);
1439 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1440
9fa3e853
FB
1441 /* remove from the virtual pc hash table all the TB at this
1442 virtual address */
1443
1444 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1445 if (vp && vp->valid_tag == virt_valid_tag) {
1446 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1447 if (p) {
1448 /* we remove all the links to the TBs in this virtual page */
1449 tb = p->first_tb;
1450 while (tb != NULL) {
1451 n = (long)tb & 3;
1452 tb = (TranslationBlock *)((long)tb & ~3);
1453 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1454 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1455 tb_invalidate(tb);
1456 }
1457 tb = tb->page_next[n];
1458 }
1459 }
98857888 1460 vp->valid_tag = 0;
9fa3e853
FB
1461 }
1462
0124311e 1463#if !defined(CONFIG_SOFTMMU)
9fa3e853 1464 if (addr < MMAP_AREA_END)
0124311e 1465 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1466#endif
0a962c02
FB
1467#ifdef USE_KQEMU
1468 if (env->kqemu_enabled) {
1469 kqemu_flush_page(env, addr);
1470 }
1471#endif
9fa3e853
FB
1472}
1473
4f2ac237 1474static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
9fa3e853
FB
1475{
1476 if (addr == (tlb_entry->address &
1477 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
3a7d929e
FB
1478 (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1479 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
9fa3e853
FB
1480 }
1481}
1482
1483/* update the TLBs so that writes to code in the virtual page 'addr'
1484 can be detected */
3a7d929e
FB
1485static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,
1486 target_ulong vaddr)
9fa3e853
FB
1487{
1488 int i;
1489
3a7d929e
FB
1490 vaddr &= TARGET_PAGE_MASK;
1491 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1492 tlb_protect_code1(&env->tlb_write[0][i], vaddr);
1493 tlb_protect_code1(&env->tlb_write[1][i], vaddr);
1494
3a7d929e
FB
1495#ifdef USE_KQEMU
1496 if (env->kqemu_enabled) {
1497 kqemu_set_notdirty(env, ram_addr);
1498 }
1499#endif
f23db169 1500 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] &= ~CODE_DIRTY_FLAG;
3a7d929e 1501
9fa3e853
FB
1502#if !defined(CONFIG_SOFTMMU)
1503 /* NOTE: as we generated the code for this page, it is already at
1504 least readable */
3a7d929e
FB
1505 if (vaddr < MMAP_AREA_END)
1506 mprotect((void *)vaddr, TARGET_PAGE_SIZE, PROT_READ);
9fa3e853
FB
1507#endif
1508}
1509
9fa3e853 1510/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1511 tested for self modifying code */
1512static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1513 target_ulong vaddr)
9fa3e853 1514{
3a7d929e 1515 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1516}
1517
1518static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1519 unsigned long start, unsigned long length)
1520{
1521 unsigned long addr;
1522 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1523 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1524 if ((addr - start) < length) {
1525 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1526 }
1527 }
1528}
1529
3a7d929e 1530void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1531 int dirty_flags)
1ccde1cb
FB
1532{
1533 CPUState *env;
4f2ac237 1534 unsigned long length, start1;
0a962c02
FB
1535 int i, mask, len;
1536 uint8_t *p;
1ccde1cb
FB
1537
1538 start &= TARGET_PAGE_MASK;
1539 end = TARGET_PAGE_ALIGN(end);
1540
1541 length = end - start;
1542 if (length == 0)
1543 return;
0a962c02 1544 len = length >> TARGET_PAGE_BITS;
1ccde1cb 1545 env = cpu_single_env;
3a7d929e
FB
1546#ifdef USE_KQEMU
1547 if (env->kqemu_enabled) {
f23db169
FB
1548 ram_addr_t addr;
1549 addr = start;
1550 for(i = 0; i < len; i++) {
1551 kqemu_set_notdirty(env, addr);
1552 addr += TARGET_PAGE_SIZE;
1553 }
3a7d929e
FB
1554 }
1555#endif
f23db169
FB
1556 mask = ~dirty_flags;
1557 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1558 for(i = 0; i < len; i++)
1559 p[i] &= mask;
1560
1ccde1cb
FB
1561 /* we modify the TLB cache so that the dirty bit will be set again
1562 when accessing the range */
59817ccb 1563 start1 = start + (unsigned long)phys_ram_base;
9fa3e853 1564 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb 1565 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
9fa3e853 1566 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb
FB
1567 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1568
1569#if !defined(CONFIG_SOFTMMU)
1570 /* XXX: this is expensive */
1571 {
1572 VirtPageDesc *p;
1573 int j;
1574 target_ulong addr;
1575
1576 for(i = 0; i < L1_SIZE; i++) {
1577 p = l1_virt_map[i];
1578 if (p) {
1579 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1580 for(j = 0; j < L2_SIZE; j++) {
1581 if (p->valid_tag == virt_valid_tag &&
1582 p->phys_addr >= start && p->phys_addr < end &&
1583 (p->prot & PROT_WRITE)) {
1584 if (addr < MMAP_AREA_END) {
1585 mprotect((void *)addr, TARGET_PAGE_SIZE,
1586 p->prot & ~PROT_WRITE);
1587 }
1588 }
1589 addr += TARGET_PAGE_SIZE;
1590 p++;
1591 }
1592 }
1593 }
1594 }
1595#endif
1ccde1cb
FB
1596}
1597
3a7d929e
FB
1598static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1599{
1600 ram_addr_t ram_addr;
1601
1602 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1603 ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) +
1604 tlb_entry->addend - (unsigned long)phys_ram_base;
1605 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1606 tlb_entry->address |= IO_MEM_NOTDIRTY;
1607 }
1608 }
1609}
1610
1611/* update the TLB according to the current state of the dirty bits */
1612void cpu_tlb_update_dirty(CPUState *env)
1613{
1614 int i;
1615 for(i = 0; i < CPU_TLB_SIZE; i++)
1616 tlb_update_dirty(&env->tlb_write[0][i]);
1617 for(i = 0; i < CPU_TLB_SIZE; i++)
1618 tlb_update_dirty(&env->tlb_write[1][i]);
1619}
1620
1ccde1cb 1621static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1622 unsigned long start)
1ccde1cb
FB
1623{
1624 unsigned long addr;
1625 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1626 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1627 if (addr == start) {
1628 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1629 }
1630 }
1631}
1632
1633/* update the TLB corresponding to virtual page vaddr and phys addr
1634 addr so that it is no longer dirty */
1635static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1636{
1637 CPUState *env = cpu_single_env;
1638 int i;
1639
1ccde1cb
FB
1640 addr &= TARGET_PAGE_MASK;
1641 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1642 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1643 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1644}
1645
59817ccb
FB
1646/* add a new TLB entry. At most one entry for a given virtual address
1647 is permitted. Return 0 if OK or 2 if the page could not be mapped
1648 (can only happen in non SOFTMMU mode for I/O pages or pages
1649 conflicting with the host address space). */
2e12669a
FB
1650int tlb_set_page(CPUState *env, target_ulong vaddr,
1651 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1652 int is_user, int is_softmmu)
1653{
92e873b9 1654 PhysPageDesc *p;
4f2ac237 1655 unsigned long pd;
9fa3e853 1656 unsigned int index;
4f2ac237 1657 target_ulong address;
108c49b8 1658 target_phys_addr_t addend;
9fa3e853
FB
1659 int ret;
1660
92e873b9 1661 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1662 if (!p) {
1663 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1664 } else {
1665 pd = p->phys_offset;
9fa3e853
FB
1666 }
1667#if defined(DEBUG_TLB)
3a7d929e
FB
1668 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1669 vaddr, paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1670#endif
1671
1672 ret = 0;
1673#if !defined(CONFIG_SOFTMMU)
1674 if (is_softmmu)
1675#endif
1676 {
1677 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1678 /* IO memory case */
1679 address = vaddr | pd;
1680 addend = paddr;
1681 } else {
1682 /* standard memory */
1683 address = vaddr;
1684 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1685 }
1686
90f18422 1687 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1688 addend -= vaddr;
67b915a5 1689 if (prot & PAGE_READ) {
9fa3e853
FB
1690 env->tlb_read[is_user][index].address = address;
1691 env->tlb_read[is_user][index].addend = addend;
1692 } else {
1693 env->tlb_read[is_user][index].address = -1;
1694 env->tlb_read[is_user][index].addend = -1;
1695 }
67b915a5 1696 if (prot & PAGE_WRITE) {
9fa3e853
FB
1697 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1698 /* ROM: access is ignored (same as unassigned) */
1699 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1700 env->tlb_write[is_user][index].addend = addend;
3a7d929e 1701 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb
FB
1702 !cpu_physical_memory_is_dirty(pd)) {
1703 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1704 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1705 } else {
1706 env->tlb_write[is_user][index].address = address;
1707 env->tlb_write[is_user][index].addend = addend;
1708 }
1709 } else {
1710 env->tlb_write[is_user][index].address = -1;
1711 env->tlb_write[is_user][index].addend = -1;
1712 }
1713 }
1714#if !defined(CONFIG_SOFTMMU)
1715 else {
1716 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1717 /* IO access: no mapping is done as it will be handled by the
1718 soft MMU */
1719 if (!(env->hflags & HF_SOFTMMU_MASK))
1720 ret = 2;
1721 } else {
1722 void *map_addr;
59817ccb
FB
1723
1724 if (vaddr >= MMAP_AREA_END) {
1725 ret = 2;
1726 } else {
1727 if (prot & PROT_WRITE) {
1728 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1729#if defined(TARGET_HAS_SMC) || 1
59817ccb 1730 first_tb ||
d720b93d 1731#endif
59817ccb
FB
1732 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1733 !cpu_physical_memory_is_dirty(pd))) {
1734 /* ROM: we do as if code was inside */
1735 /* if code is present, we only map as read only and save the
1736 original mapping */
1737 VirtPageDesc *vp;
1738
90f18422 1739 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1740 vp->phys_addr = pd;
1741 vp->prot = prot;
1742 vp->valid_tag = virt_valid_tag;
1743 prot &= ~PAGE_WRITE;
1744 }
1745 }
1746 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1747 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1748 if (map_addr == MAP_FAILED) {
1749 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1750 paddr, vaddr);
9fa3e853 1751 }
9fa3e853
FB
1752 }
1753 }
1754 }
1755#endif
1756 return ret;
1757}
1758
1759/* called from signal handler: invalidate the code and unprotect the
1760 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1761int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1762{
1763#if !defined(CONFIG_SOFTMMU)
1764 VirtPageDesc *vp;
1765
1766#if defined(DEBUG_TLB)
1767 printf("page_unprotect: addr=0x%08x\n", addr);
1768#endif
1769 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1770
1771 /* if it is not mapped, no need to worry here */
1772 if (addr >= MMAP_AREA_END)
1773 return 0;
9fa3e853
FB
1774 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1775 if (!vp)
1776 return 0;
1777 /* NOTE: in this case, validate_tag is _not_ tested as it
1778 validates only the code TLB */
1779 if (vp->valid_tag != virt_valid_tag)
1780 return 0;
1781 if (!(vp->prot & PAGE_WRITE))
1782 return 0;
1783#if defined(DEBUG_TLB)
1784 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1785 addr, vp->phys_addr, vp->prot);
1786#endif
59817ccb
FB
1787 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1788 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1789 (unsigned long)addr, vp->prot);
d720b93d 1790 /* set the dirty bit */
0a962c02 1791 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1792 /* flush the code inside */
1793 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1794 return 1;
1795#else
1796 return 0;
1797#endif
33417e70
FB
1798}
1799
0124311e
FB
1800#else
1801
ee8b7021 1802void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1803{
1804}
1805
2e12669a 1806void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1807{
1808}
1809
2e12669a
FB
1810int tlb_set_page(CPUState *env, target_ulong vaddr,
1811 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1812 int is_user, int is_softmmu)
1813{
1814 return 0;
1815}
0124311e 1816
9fa3e853
FB
1817/* dump memory mappings */
1818void page_dump(FILE *f)
33417e70 1819{
9fa3e853
FB
1820 unsigned long start, end;
1821 int i, j, prot, prot1;
1822 PageDesc *p;
33417e70 1823
9fa3e853
FB
1824 fprintf(f, "%-8s %-8s %-8s %s\n",
1825 "start", "end", "size", "prot");
1826 start = -1;
1827 end = -1;
1828 prot = 0;
1829 for(i = 0; i <= L1_SIZE; i++) {
1830 if (i < L1_SIZE)
1831 p = l1_map[i];
1832 else
1833 p = NULL;
1834 for(j = 0;j < L2_SIZE; j++) {
1835 if (!p)
1836 prot1 = 0;
1837 else
1838 prot1 = p[j].flags;
1839 if (prot1 != prot) {
1840 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1841 if (start != -1) {
1842 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1843 start, end, end - start,
1844 prot & PAGE_READ ? 'r' : '-',
1845 prot & PAGE_WRITE ? 'w' : '-',
1846 prot & PAGE_EXEC ? 'x' : '-');
1847 }
1848 if (prot1 != 0)
1849 start = end;
1850 else
1851 start = -1;
1852 prot = prot1;
1853 }
1854 if (!p)
1855 break;
1856 }
33417e70 1857 }
33417e70
FB
1858}
1859
9fa3e853 1860int page_get_flags(unsigned long address)
33417e70 1861{
9fa3e853
FB
1862 PageDesc *p;
1863
1864 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1865 if (!p)
9fa3e853
FB
1866 return 0;
1867 return p->flags;
1868}
1869
1870/* modify the flags of a page and invalidate the code if
1871 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1872 depending on PAGE_WRITE */
1873void page_set_flags(unsigned long start, unsigned long end, int flags)
1874{
1875 PageDesc *p;
1876 unsigned long addr;
1877
1878 start = start & TARGET_PAGE_MASK;
1879 end = TARGET_PAGE_ALIGN(end);
1880 if (flags & PAGE_WRITE)
1881 flags |= PAGE_WRITE_ORG;
1882 spin_lock(&tb_lock);
1883 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1884 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1885 /* if the write protection is set, then we invalidate the code
1886 inside */
1887 if (!(p->flags & PAGE_WRITE) &&
1888 (flags & PAGE_WRITE) &&
1889 p->first_tb) {
d720b93d 1890 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1891 }
1892 p->flags = flags;
1893 }
1894 spin_unlock(&tb_lock);
33417e70
FB
1895}
1896
9fa3e853
FB
1897/* called from signal handler: invalidate the code and unprotect the
1898 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1899int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1900{
1901 unsigned int page_index, prot, pindex;
1902 PageDesc *p, *p1;
1903 unsigned long host_start, host_end, addr;
1904
83fb7adf 1905 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1906 page_index = host_start >> TARGET_PAGE_BITS;
1907 p1 = page_find(page_index);
1908 if (!p1)
1909 return 0;
83fb7adf 1910 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1911 p = p1;
1912 prot = 0;
1913 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1914 prot |= p->flags;
1915 p++;
1916 }
1917 /* if the page was really writable, then we change its
1918 protection back to writable */
1919 if (prot & PAGE_WRITE_ORG) {
1920 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1921 if (!(p1[pindex].flags & PAGE_WRITE)) {
83fb7adf 1922 mprotect((void *)host_start, qemu_host_page_size,
9fa3e853
FB
1923 (prot & PAGE_BITS) | PAGE_WRITE);
1924 p1[pindex].flags |= PAGE_WRITE;
1925 /* and since the content will be modified, we must invalidate
1926 the corresponding translated code. */
d720b93d 1927 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1928#ifdef DEBUG_TB_CHECK
1929 tb_invalidate_check(address);
1930#endif
1931 return 1;
1932 }
1933 }
1934 return 0;
1935}
1936
1937/* call this function when system calls directly modify a memory area */
1938void page_unprotect_range(uint8_t *data, unsigned long data_size)
1939{
1940 unsigned long start, end, addr;
1941
1942 start = (unsigned long)data;
1943 end = start + data_size;
1944 start &= TARGET_PAGE_MASK;
1945 end = TARGET_PAGE_ALIGN(end);
1946 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1947 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1948 }
1949}
1950
1ccde1cb
FB
1951static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1952{
1953}
9fa3e853
FB
1954#endif /* defined(CONFIG_USER_ONLY) */
1955
33417e70
FB
1956/* register physical memory. 'size' must be a multiple of the target
1957 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1958 io memory page */
2e12669a
FB
1959void cpu_register_physical_memory(target_phys_addr_t start_addr,
1960 unsigned long size,
1961 unsigned long phys_offset)
33417e70 1962{
108c49b8 1963 target_phys_addr_t addr, end_addr;
92e873b9 1964 PhysPageDesc *p;
33417e70 1965
5fd386f6 1966 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1967 end_addr = start_addr + size;
5fd386f6 1968 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
108c49b8 1969 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
9fa3e853
FB
1970 p->phys_offset = phys_offset;
1971 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1972 phys_offset += TARGET_PAGE_SIZE;
1973 }
1974}
1975
a4193c8a 1976static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1977{
1978 return 0;
1979}
1980
a4193c8a 1981static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1982{
1983}
1984
1985static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1986 unassigned_mem_readb,
1987 unassigned_mem_readb,
1988 unassigned_mem_readb,
1989};
1990
1991static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1992 unassigned_mem_writeb,
1993 unassigned_mem_writeb,
1994 unassigned_mem_writeb,
1995};
1996
3a7d929e 1997static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1998{
3a7d929e
FB
1999 unsigned long ram_addr;
2000 int dirty_flags;
2001 ram_addr = addr - (unsigned long)phys_ram_base;
2002 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2003 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2004#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2005 tb_invalidate_phys_page_fast(ram_addr, 1);
2006 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2007#endif
3a7d929e 2008 }
c27004ec 2009 stb_p((uint8_t *)(long)addr, val);
f23db169
FB
2010 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2011 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2012 /* we remove the notdirty callback only if the code has been
2013 flushed */
2014 if (dirty_flags == 0xff)
3a7d929e 2015 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2016}
2017
3a7d929e 2018static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2019{
3a7d929e
FB
2020 unsigned long ram_addr;
2021 int dirty_flags;
2022 ram_addr = addr - (unsigned long)phys_ram_base;
2023 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2024 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2025#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2026 tb_invalidate_phys_page_fast(ram_addr, 2);
2027 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2028#endif
3a7d929e 2029 }
c27004ec 2030 stw_p((uint8_t *)(long)addr, val);
f23db169
FB
2031 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2032 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2033 /* we remove the notdirty callback only if the code has been
2034 flushed */
2035 if (dirty_flags == 0xff)
3a7d929e 2036 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2037}
2038
3a7d929e 2039static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2040{
3a7d929e
FB
2041 unsigned long ram_addr;
2042 int dirty_flags;
2043 ram_addr = addr - (unsigned long)phys_ram_base;
2044 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2045 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2046#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2047 tb_invalidate_phys_page_fast(ram_addr, 4);
2048 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2049#endif
3a7d929e 2050 }
c27004ec 2051 stl_p((uint8_t *)(long)addr, val);
f23db169
FB
2052 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2053 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2054 /* we remove the notdirty callback only if the code has been
2055 flushed */
2056 if (dirty_flags == 0xff)
3a7d929e 2057 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2058}
2059
3a7d929e 2060static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2061 NULL, /* never used */
2062 NULL, /* never used */
2063 NULL, /* never used */
2064};
2065
1ccde1cb
FB
2066static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2067 notdirty_mem_writeb,
2068 notdirty_mem_writew,
2069 notdirty_mem_writel,
2070};
2071
33417e70
FB
2072static void io_mem_init(void)
2073{
3a7d929e 2074 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2075 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2076 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2077 io_mem_nb = 5;
2078
2079 /* alloc dirty bits array */
0a962c02 2080 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2081 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2082}
2083
2084/* mem_read and mem_write are arrays of functions containing the
2085 function to access byte (index 0), word (index 1) and dword (index
2086 2). All functions must be supplied. If io_index is non zero, the
2087 corresponding io zone is modified. If it is zero, a new io zone is
2088 allocated. The return value can be used with
2089 cpu_register_physical_memory(). (-1) is returned if error. */
2090int cpu_register_io_memory(int io_index,
2091 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2092 CPUWriteMemoryFunc **mem_write,
2093 void *opaque)
33417e70
FB
2094{
2095 int i;
2096
2097 if (io_index <= 0) {
2098 if (io_index >= IO_MEM_NB_ENTRIES)
2099 return -1;
2100 io_index = io_mem_nb++;
2101 } else {
2102 if (io_index >= IO_MEM_NB_ENTRIES)
2103 return -1;
2104 }
2105
2106 for(i = 0;i < 3; i++) {
2107 io_mem_read[io_index][i] = mem_read[i];
2108 io_mem_write[io_index][i] = mem_write[i];
2109 }
a4193c8a 2110 io_mem_opaque[io_index] = opaque;
33417e70
FB
2111 return io_index << IO_MEM_SHIFT;
2112}
61382a50 2113
8926b517
FB
2114CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2115{
2116 return io_mem_write[io_index >> IO_MEM_SHIFT];
2117}
2118
2119CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2120{
2121 return io_mem_read[io_index >> IO_MEM_SHIFT];
2122}
2123
13eb76e0
FB
2124/* physical memory access (slow version, mainly for debug) */
2125#if defined(CONFIG_USER_ONLY)
2e12669a 2126void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2127 int len, int is_write)
2128{
2129 int l, flags;
2130 target_ulong page;
2131
2132 while (len > 0) {
2133 page = addr & TARGET_PAGE_MASK;
2134 l = (page + TARGET_PAGE_SIZE) - addr;
2135 if (l > len)
2136 l = len;
2137 flags = page_get_flags(page);
2138 if (!(flags & PAGE_VALID))
2139 return;
2140 if (is_write) {
2141 if (!(flags & PAGE_WRITE))
2142 return;
2143 memcpy((uint8_t *)addr, buf, len);
2144 } else {
2145 if (!(flags & PAGE_READ))
2146 return;
2147 memcpy(buf, (uint8_t *)addr, len);
2148 }
2149 len -= l;
2150 buf += l;
2151 addr += l;
2152 }
2153}
8df1cd07
FB
2154
2155/* never used */
2156uint32_t ldl_phys(target_phys_addr_t addr)
2157{
2158 return 0;
2159}
2160
2161void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2162{
2163}
2164
2165void stl_phys(target_phys_addr_t addr, uint32_t val)
2166{
2167}
2168
13eb76e0 2169#else
2e12669a 2170void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2171 int len, int is_write)
2172{
2173 int l, io_index;
2174 uint8_t *ptr;
2175 uint32_t val;
2e12669a
FB
2176 target_phys_addr_t page;
2177 unsigned long pd;
92e873b9 2178 PhysPageDesc *p;
13eb76e0
FB
2179
2180 while (len > 0) {
2181 page = addr & TARGET_PAGE_MASK;
2182 l = (page + TARGET_PAGE_SIZE) - addr;
2183 if (l > len)
2184 l = len;
92e873b9 2185 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2186 if (!p) {
2187 pd = IO_MEM_UNASSIGNED;
2188 } else {
2189 pd = p->phys_offset;
2190 }
2191
2192 if (is_write) {
3a7d929e 2193 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0
FB
2194 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2195 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2196 /* 32 bit write access */
c27004ec 2197 val = ldl_p(buf);
a4193c8a 2198 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2199 l = 4;
2200 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2201 /* 16 bit write access */
c27004ec 2202 val = lduw_p(buf);
a4193c8a 2203 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2204 l = 2;
2205 } else {
1c213d19 2206 /* 8 bit write access */
c27004ec 2207 val = ldub_p(buf);
a4193c8a 2208 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2209 l = 1;
2210 }
2211 } else {
b448f2f3
FB
2212 unsigned long addr1;
2213 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2214 /* RAM case */
b448f2f3 2215 ptr = phys_ram_base + addr1;
13eb76e0 2216 memcpy(ptr, buf, l);
3a7d929e
FB
2217 if (!cpu_physical_memory_is_dirty(addr1)) {
2218 /* invalidate code */
2219 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2220 /* set dirty bit */
f23db169
FB
2221 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2222 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2223 }
13eb76e0
FB
2224 }
2225 } else {
3a7d929e 2226 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
13eb76e0
FB
2227 /* I/O case */
2228 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2229 if (l >= 4 && ((addr & 3) == 0)) {
2230 /* 32 bit read access */
a4193c8a 2231 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2232 stl_p(buf, val);
13eb76e0
FB
2233 l = 4;
2234 } else if (l >= 2 && ((addr & 1) == 0)) {
2235 /* 16 bit read access */
a4193c8a 2236 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2237 stw_p(buf, val);
13eb76e0
FB
2238 l = 2;
2239 } else {
1c213d19 2240 /* 8 bit read access */
a4193c8a 2241 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2242 stb_p(buf, val);
13eb76e0
FB
2243 l = 1;
2244 }
2245 } else {
2246 /* RAM case */
2247 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2248 (addr & ~TARGET_PAGE_MASK);
2249 memcpy(buf, ptr, l);
2250 }
2251 }
2252 len -= l;
2253 buf += l;
2254 addr += l;
2255 }
2256}
8df1cd07
FB
2257
2258/* warning: addr must be aligned */
2259uint32_t ldl_phys(target_phys_addr_t addr)
2260{
2261 int io_index;
2262 uint8_t *ptr;
2263 uint32_t val;
2264 unsigned long pd;
2265 PhysPageDesc *p;
2266
2267 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2268 if (!p) {
2269 pd = IO_MEM_UNASSIGNED;
2270 } else {
2271 pd = p->phys_offset;
2272 }
2273
3a7d929e 2274 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
8df1cd07
FB
2275 /* I/O case */
2276 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2277 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2278 } else {
2279 /* RAM case */
2280 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2281 (addr & ~TARGET_PAGE_MASK);
2282 val = ldl_p(ptr);
2283 }
2284 return val;
2285}
2286
2287/* warning: addr must be aligned. The ram page is not masked as dirty
2288 and the code inside is not invalidated. It is useful if the dirty
2289 bits are used to track modified PTEs */
2290void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2291{
2292 int io_index;
2293 uint8_t *ptr;
2294 unsigned long pd;
2295 PhysPageDesc *p;
2296
2297 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2298 if (!p) {
2299 pd = IO_MEM_UNASSIGNED;
2300 } else {
2301 pd = p->phys_offset;
2302 }
2303
3a7d929e 2304 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2305 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2306 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2307 } else {
2308 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2309 (addr & ~TARGET_PAGE_MASK);
2310 stl_p(ptr, val);
2311 }
2312}
2313
2314/* warning: addr must be aligned */
8df1cd07
FB
2315void stl_phys(target_phys_addr_t addr, uint32_t val)
2316{
2317 int io_index;
2318 uint8_t *ptr;
2319 unsigned long pd;
2320 PhysPageDesc *p;
2321
2322 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2323 if (!p) {
2324 pd = IO_MEM_UNASSIGNED;
2325 } else {
2326 pd = p->phys_offset;
2327 }
2328
3a7d929e 2329 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2330 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2331 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2332 } else {
2333 unsigned long addr1;
2334 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2335 /* RAM case */
2336 ptr = phys_ram_base + addr1;
2337 stl_p(ptr, val);
3a7d929e
FB
2338 if (!cpu_physical_memory_is_dirty(addr1)) {
2339 /* invalidate code */
2340 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2341 /* set dirty bit */
f23db169
FB
2342 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2343 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2344 }
8df1cd07
FB
2345 }
2346}
2347
13eb76e0
FB
2348#endif
2349
2350/* virtual memory access for debug */
b448f2f3
FB
2351int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2352 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2353{
2354 int l;
2355 target_ulong page, phys_addr;
2356
2357 while (len > 0) {
2358 page = addr & TARGET_PAGE_MASK;
2359 phys_addr = cpu_get_phys_page_debug(env, page);
2360 /* if no physical page mapped, return an error */
2361 if (phys_addr == -1)
2362 return -1;
2363 l = (page + TARGET_PAGE_SIZE) - addr;
2364 if (l > len)
2365 l = len;
b448f2f3
FB
2366 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2367 buf, l, is_write);
13eb76e0
FB
2368 len -= l;
2369 buf += l;
2370 addr += l;
2371 }
2372 return 0;
2373}
2374
e3db7226
FB
2375void dump_exec_info(FILE *f,
2376 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2377{
2378 int i, target_code_size, max_target_code_size;
2379 int direct_jmp_count, direct_jmp2_count, cross_page;
2380 TranslationBlock *tb;
2381
2382 target_code_size = 0;
2383 max_target_code_size = 0;
2384 cross_page = 0;
2385 direct_jmp_count = 0;
2386 direct_jmp2_count = 0;
2387 for(i = 0; i < nb_tbs; i++) {
2388 tb = &tbs[i];
2389 target_code_size += tb->size;
2390 if (tb->size > max_target_code_size)
2391 max_target_code_size = tb->size;
2392 if (tb->page_addr[1] != -1)
2393 cross_page++;
2394 if (tb->tb_next_offset[0] != 0xffff) {
2395 direct_jmp_count++;
2396 if (tb->tb_next_offset[1] != 0xffff) {
2397 direct_jmp2_count++;
2398 }
2399 }
2400 }
2401 /* XXX: avoid using doubles ? */
2402 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2403 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2404 nb_tbs ? target_code_size / nb_tbs : 0,
2405 max_target_code_size);
2406 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2407 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2408 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2409 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2410 cross_page,
2411 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2412 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2413 direct_jmp_count,
2414 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2415 direct_jmp2_count,
2416 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2417 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2418 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2419 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2420}
2421
61382a50
FB
2422#if !defined(CONFIG_USER_ONLY)
2423
2424#define MMUSUFFIX _cmmu
2425#define GETPC() NULL
2426#define env cpu_single_env
b769d8fe 2427#define SOFTMMU_CODE_ACCESS
61382a50
FB
2428
2429#define SHIFT 0
2430#include "softmmu_template.h"
2431
2432#define SHIFT 1
2433#include "softmmu_template.h"
2434
2435#define SHIFT 2
2436#include "softmmu_template.h"
2437
2438#define SHIFT 3
2439#include "softmmu_template.h"
2440
2441#undef env
2442
2443#endif