]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
use ram_addr_t
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
54936004 37
fd6ce8f6 38//#define DEBUG_TB_INVALIDATE
66e85a21 39//#define DEBUG_FLUSH
9fa3e853 40//#define DEBUG_TLB
fd6ce8f6
FB
41
42/* make various TB consistency checks */
43//#define DEBUG_TB_CHECK
98857888 44//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
45
46/* threshold to flush the translated code buffer */
47#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48
9fa3e853
FB
49#define SMC_BITMAP_USE_THRESHOLD 10
50
51#define MMAP_AREA_START 0x00000000
52#define MMAP_AREA_END 0xa8000000
fd6ce8f6 53
108c49b8
FB
54#if defined(TARGET_SPARC64)
55#define TARGET_PHYS_ADDR_SPACE_BITS 41
56#elif defined(TARGET_PPC64)
57#define TARGET_PHYS_ADDR_SPACE_BITS 42
58#else
59/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60#define TARGET_PHYS_ADDR_SPACE_BITS 32
61#endif
62
fd6ce8f6
FB
63TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
64TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
9fa3e853 65TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 66int nb_tbs;
eb51d102
FB
67/* any access to the tbs or the page table must use this lock */
68spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 69
b8076a74 70uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
71uint8_t *code_gen_ptr;
72
9fa3e853
FB
73int phys_ram_size;
74int phys_ram_fd;
75uint8_t *phys_ram_base;
1ccde1cb 76uint8_t *phys_ram_dirty;
9fa3e853 77
54936004 78typedef struct PageDesc {
92e873b9 79 /* list of TBs intersecting this ram page */
fd6ce8f6 80 TranslationBlock *first_tb;
9fa3e853
FB
81 /* in order to optimize self modifying code, we count the number
82 of lookups we do to a given page to use a bitmap */
83 unsigned int code_write_count;
84 uint8_t *code_bitmap;
85#if defined(CONFIG_USER_ONLY)
86 unsigned long flags;
87#endif
54936004
FB
88} PageDesc;
89
92e873b9
FB
90typedef struct PhysPageDesc {
91 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 92 uint32_t phys_offset;
92e873b9
FB
93} PhysPageDesc;
94
90f18422
FB
95/* Note: the VirtPage handling is absolete and will be suppressed
96 ASAP */
9fa3e853
FB
97typedef struct VirtPageDesc {
98 /* physical address of code page. It is valid only if 'valid_tag'
99 matches 'virt_valid_tag' */
100 target_ulong phys_addr;
101 unsigned int valid_tag;
102#if !defined(CONFIG_SOFTMMU)
103 /* original page access rights. It is valid only if 'valid_tag'
104 matches 'virt_valid_tag' */
105 unsigned int prot;
106#endif
107} VirtPageDesc;
108
54936004
FB
109#define L2_BITS 10
110#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
111
112#define L1_SIZE (1 << L1_BITS)
113#define L2_SIZE (1 << L2_BITS)
114
33417e70 115static void io_mem_init(void);
fd6ce8f6 116
83fb7adf
FB
117unsigned long qemu_real_host_page_size;
118unsigned long qemu_host_page_bits;
119unsigned long qemu_host_page_size;
120unsigned long qemu_host_page_mask;
54936004 121
92e873b9 122/* XXX: for system emulation, it could just be an array */
54936004 123static PageDesc *l1_map[L1_SIZE];
0a962c02 124PhysPageDesc **l1_phys_map;
54936004 125
9fa3e853 126#if !defined(CONFIG_USER_ONLY)
90f18422
FB
127#if TARGET_LONG_BITS > 32
128#define VIRT_L_BITS 9
129#define VIRT_L_SIZE (1 << VIRT_L_BITS)
130static void *l1_virt_map[VIRT_L_SIZE];
131#else
9fa3e853 132static VirtPageDesc *l1_virt_map[L1_SIZE];
90f18422 133#endif
9fa3e853
FB
134static unsigned int virt_valid_tag;
135#endif
136
33417e70 137/* io memory support */
33417e70
FB
138CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
139CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 140void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
141static int io_mem_nb;
142
34865134
FB
143/* log support */
144char *logfilename = "/tmp/qemu.log";
145FILE *logfile;
146int loglevel;
147
e3db7226
FB
148/* statistics */
149static int tlb_flush_count;
150static int tb_flush_count;
151static int tb_phys_invalidate_count;
152
b346ff46 153static void page_init(void)
54936004 154{
83fb7adf 155 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 156 TARGET_PAGE_SIZE */
67b915a5 157#ifdef _WIN32
d5a8f07c
FB
158 {
159 SYSTEM_INFO system_info;
160 DWORD old_protect;
161
162 GetSystemInfo(&system_info);
163 qemu_real_host_page_size = system_info.dwPageSize;
164
165 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
166 PAGE_EXECUTE_READWRITE, &old_protect);
167 }
67b915a5 168#else
83fb7adf 169 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
170 {
171 unsigned long start, end;
172
173 start = (unsigned long)code_gen_buffer;
174 start &= ~(qemu_real_host_page_size - 1);
175
176 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
177 end += qemu_real_host_page_size - 1;
178 end &= ~(qemu_real_host_page_size - 1);
179
180 mprotect((void *)start, end - start,
181 PROT_READ | PROT_WRITE | PROT_EXEC);
182 }
67b915a5 183#endif
d5a8f07c 184
83fb7adf
FB
185 if (qemu_host_page_size == 0)
186 qemu_host_page_size = qemu_real_host_page_size;
187 if (qemu_host_page_size < TARGET_PAGE_SIZE)
188 qemu_host_page_size = TARGET_PAGE_SIZE;
189 qemu_host_page_bits = 0;
190 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
191 qemu_host_page_bits++;
192 qemu_host_page_mask = ~(qemu_host_page_size - 1);
9fa3e853
FB
193#if !defined(CONFIG_USER_ONLY)
194 virt_valid_tag = 1;
195#endif
108c49b8
FB
196 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
197 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
198}
199
fd6ce8f6 200static inline PageDesc *page_find_alloc(unsigned int index)
54936004 201{
54936004
FB
202 PageDesc **lp, *p;
203
54936004
FB
204 lp = &l1_map[index >> L2_BITS];
205 p = *lp;
206 if (!p) {
207 /* allocate if not found */
59817ccb 208 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 209 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
210 *lp = p;
211 }
212 return p + (index & (L2_SIZE - 1));
213}
214
fd6ce8f6 215static inline PageDesc *page_find(unsigned int index)
54936004 216{
54936004
FB
217 PageDesc *p;
218
54936004
FB
219 p = l1_map[index >> L2_BITS];
220 if (!p)
221 return 0;
fd6ce8f6
FB
222 return p + (index & (L2_SIZE - 1));
223}
224
108c49b8 225static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 226{
108c49b8 227 void **lp, **p;
92e873b9 228
108c49b8
FB
229 p = (void **)l1_phys_map;
230#if TARGET_PHYS_ADDR_SPACE_BITS > 32
231
232#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
233#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
234#endif
235 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
236 p = *lp;
237 if (!p) {
238 /* allocate if not found */
108c49b8
FB
239 if (!alloc)
240 return NULL;
241 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
242 memset(p, 0, sizeof(void *) * L1_SIZE);
243 *lp = p;
244 }
245#endif
246 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
247 p = *lp;
248 if (!p) {
249 /* allocate if not found */
250 if (!alloc)
251 return NULL;
0a962c02 252 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
92e873b9
FB
253 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
254 *lp = p;
255 }
108c49b8 256 return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
92e873b9
FB
257}
258
108c49b8 259static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 260{
108c49b8 261 return phys_page_find_alloc(index, 0);
92e873b9
FB
262}
263
9fa3e853 264#if !defined(CONFIG_USER_ONLY)
4f2ac237
FB
265static void tlb_protect_code(CPUState *env, target_ulong addr);
266static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
9fa3e853 267
90f18422 268static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc)
fd6ce8f6 269{
c27004ec 270#if TARGET_LONG_BITS > 32
90f18422
FB
271 void **p, **lp;
272
273 p = l1_virt_map;
274 lp = p + ((index >> (5 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
275 p = *lp;
276 if (!p) {
277 if (!alloc)
278 return NULL;
279 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
280 *lp = p;
281 }
282 lp = p + ((index >> (4 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
283 p = *lp;
284 if (!p) {
285 if (!alloc)
286 return NULL;
287 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
288 *lp = p;
289 }
290 lp = p + ((index >> (3 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
291 p = *lp;
292 if (!p) {
293 if (!alloc)
294 return NULL;
295 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
296 *lp = p;
297 }
298 lp = p + ((index >> (2 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
299 p = *lp;
300 if (!p) {
301 if (!alloc)
302 return NULL;
303 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
304 *lp = p;
305 }
306 lp = p + ((index >> (1 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
307 p = *lp;
308 if (!p) {
309 if (!alloc)
310 return NULL;
311 p = qemu_mallocz(sizeof(VirtPageDesc) * VIRT_L_SIZE);
312 *lp = p;
313 }
314 return ((VirtPageDesc *)p) + (index & (VIRT_L_SIZE - 1));
315#else
316 VirtPageDesc *p, **lp;
317
9fa3e853
FB
318 lp = &l1_virt_map[index >> L2_BITS];
319 p = *lp;
320 if (!p) {
321 /* allocate if not found */
90f18422
FB
322 if (!alloc)
323 return NULL;
324 p = qemu_mallocz(sizeof(VirtPageDesc) * L2_SIZE);
9fa3e853
FB
325 *lp = p;
326 }
327 return p + (index & (L2_SIZE - 1));
90f18422 328#endif
9fa3e853
FB
329}
330
90f18422 331static inline VirtPageDesc *virt_page_find(target_ulong index)
9fa3e853 332{
90f18422
FB
333 return virt_page_find_alloc(index, 0);
334}
9fa3e853 335
90f18422
FB
336#if TARGET_LONG_BITS > 32
337static void virt_page_flush_internal(void **p, int level)
338{
339 int i;
340 if (level == 0) {
341 VirtPageDesc *q = (VirtPageDesc *)p;
342 for(i = 0; i < VIRT_L_SIZE; i++)
343 q[i].valid_tag = 0;
344 } else {
345 level--;
346 for(i = 0; i < VIRT_L_SIZE; i++) {
347 if (p[i])
348 virt_page_flush_internal(p[i], level);
349 }
350 }
54936004 351}
90f18422 352#endif
54936004 353
9fa3e853 354static void virt_page_flush(void)
54936004 355{
9fa3e853
FB
356 virt_valid_tag++;
357
358 if (virt_valid_tag == 0) {
359 virt_valid_tag = 1;
90f18422
FB
360#if TARGET_LONG_BITS > 32
361 virt_page_flush_internal(l1_virt_map, 5);
362#else
363 {
364 int i, j;
365 VirtPageDesc *p;
366 for(i = 0; i < L1_SIZE; i++) {
367 p = l1_virt_map[i];
368 if (p) {
369 for(j = 0; j < L2_SIZE; j++)
370 p[j].valid_tag = 0;
371 }
9fa3e853 372 }
fd6ce8f6 373 }
90f18422 374#endif
54936004
FB
375 }
376}
9fa3e853
FB
377#else
378static void virt_page_flush(void)
379{
380}
381#endif
fd6ce8f6 382
b346ff46 383void cpu_exec_init(void)
fd6ce8f6
FB
384{
385 if (!code_gen_ptr) {
386 code_gen_ptr = code_gen_buffer;
b346ff46 387 page_init();
33417e70 388 io_mem_init();
fd6ce8f6
FB
389 }
390}
391
9fa3e853
FB
392static inline void invalidate_page_bitmap(PageDesc *p)
393{
394 if (p->code_bitmap) {
59817ccb 395 qemu_free(p->code_bitmap);
9fa3e853
FB
396 p->code_bitmap = NULL;
397 }
398 p->code_write_count = 0;
399}
400
fd6ce8f6
FB
401/* set to NULL all the 'first_tb' fields in all PageDescs */
402static void page_flush_tb(void)
403{
404 int i, j;
405 PageDesc *p;
406
407 for(i = 0; i < L1_SIZE; i++) {
408 p = l1_map[i];
409 if (p) {
9fa3e853
FB
410 for(j = 0; j < L2_SIZE; j++) {
411 p->first_tb = NULL;
412 invalidate_page_bitmap(p);
413 p++;
414 }
fd6ce8f6
FB
415 }
416 }
417}
418
419/* flush all the translation blocks */
d4e8164f 420/* XXX: tb_flush is currently not thread safe */
0124311e 421void tb_flush(CPUState *env)
fd6ce8f6 422{
0124311e 423#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
424 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
425 code_gen_ptr - code_gen_buffer,
426 nb_tbs,
0124311e 427 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
428#endif
429 nb_tbs = 0;
8a8a608f 430 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
9fa3e853
FB
431 virt_page_flush();
432
8a8a608f 433 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 434 page_flush_tb();
9fa3e853 435
fd6ce8f6 436 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
437 /* XXX: flush processor icache at this point if cache flush is
438 expensive */
e3db7226 439 tb_flush_count++;
fd6ce8f6
FB
440}
441
442#ifdef DEBUG_TB_CHECK
443
444static void tb_invalidate_check(unsigned long address)
445{
446 TranslationBlock *tb;
447 int i;
448 address &= TARGET_PAGE_MASK;
449 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
450 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
451 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
452 address >= tb->pc + tb->size)) {
453 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
454 address, tb->pc, tb->size);
455 }
456 }
457 }
458}
459
460/* verify that all the pages have correct rights for code */
461static void tb_page_check(void)
462{
463 TranslationBlock *tb;
464 int i, flags1, flags2;
465
466 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
467 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
468 flags1 = page_get_flags(tb->pc);
469 flags2 = page_get_flags(tb->pc + tb->size - 1);
470 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
471 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
472 tb->pc, tb->size, flags1, flags2);
473 }
474 }
475 }
476}
477
d4e8164f
FB
478void tb_jmp_check(TranslationBlock *tb)
479{
480 TranslationBlock *tb1;
481 unsigned int n1;
482
483 /* suppress any remaining jumps to this TB */
484 tb1 = tb->jmp_first;
485 for(;;) {
486 n1 = (long)tb1 & 3;
487 tb1 = (TranslationBlock *)((long)tb1 & ~3);
488 if (n1 == 2)
489 break;
490 tb1 = tb1->jmp_next[n1];
491 }
492 /* check end of list */
493 if (tb1 != tb) {
494 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
495 }
496}
497
fd6ce8f6
FB
498#endif
499
500/* invalidate one TB */
501static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
502 int next_offset)
503{
504 TranslationBlock *tb1;
505 for(;;) {
506 tb1 = *ptb;
507 if (tb1 == tb) {
508 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
509 break;
510 }
511 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
512 }
513}
514
9fa3e853
FB
515static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
516{
517 TranslationBlock *tb1;
518 unsigned int n1;
519
520 for(;;) {
521 tb1 = *ptb;
522 n1 = (long)tb1 & 3;
523 tb1 = (TranslationBlock *)((long)tb1 & ~3);
524 if (tb1 == tb) {
525 *ptb = tb1->page_next[n1];
526 break;
527 }
528 ptb = &tb1->page_next[n1];
529 }
530}
531
d4e8164f
FB
532static inline void tb_jmp_remove(TranslationBlock *tb, int n)
533{
534 TranslationBlock *tb1, **ptb;
535 unsigned int n1;
536
537 ptb = &tb->jmp_next[n];
538 tb1 = *ptb;
539 if (tb1) {
540 /* find tb(n) in circular list */
541 for(;;) {
542 tb1 = *ptb;
543 n1 = (long)tb1 & 3;
544 tb1 = (TranslationBlock *)((long)tb1 & ~3);
545 if (n1 == n && tb1 == tb)
546 break;
547 if (n1 == 2) {
548 ptb = &tb1->jmp_first;
549 } else {
550 ptb = &tb1->jmp_next[n1];
551 }
552 }
553 /* now we can suppress tb(n) from the list */
554 *ptb = tb->jmp_next[n];
555
556 tb->jmp_next[n] = NULL;
557 }
558}
559
560/* reset the jump entry 'n' of a TB so that it is not chained to
561 another TB */
562static inline void tb_reset_jump(TranslationBlock *tb, int n)
563{
564 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
565}
566
9fa3e853 567static inline void tb_invalidate(TranslationBlock *tb)
fd6ce8f6 568{
d4e8164f 569 unsigned int h, n1;
9fa3e853 570 TranslationBlock *tb1, *tb2, **ptb;
d4e8164f 571
36bdbe54 572 tb_invalidated_flag = 1;
59817ccb 573
fd6ce8f6
FB
574 /* remove the TB from the hash list */
575 h = tb_hash_func(tb->pc);
9fa3e853
FB
576 ptb = &tb_hash[h];
577 for(;;) {
578 tb1 = *ptb;
579 /* NOTE: the TB is not necessarily linked in the hash. It
580 indicates that it is not currently used */
581 if (tb1 == NULL)
582 return;
583 if (tb1 == tb) {
584 *ptb = tb1->hash_next;
585 break;
586 }
587 ptb = &tb1->hash_next;
fd6ce8f6 588 }
d4e8164f
FB
589
590 /* suppress this TB from the two jump lists */
591 tb_jmp_remove(tb, 0);
592 tb_jmp_remove(tb, 1);
593
594 /* suppress any remaining jumps to this TB */
595 tb1 = tb->jmp_first;
596 for(;;) {
597 n1 = (long)tb1 & 3;
598 if (n1 == 2)
599 break;
600 tb1 = (TranslationBlock *)((long)tb1 & ~3);
601 tb2 = tb1->jmp_next[n1];
602 tb_reset_jump(tb1, n1);
603 tb1->jmp_next[n1] = NULL;
604 tb1 = tb2;
605 }
606 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
fd6ce8f6
FB
607}
608
9fa3e853 609static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 610{
fd6ce8f6 611 PageDesc *p;
9fa3e853
FB
612 unsigned int h;
613 target_ulong phys_pc;
614
615 /* remove the TB from the hash list */
616 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
617 h = tb_phys_hash_func(phys_pc);
618 tb_remove(&tb_phys_hash[h], tb,
619 offsetof(TranslationBlock, phys_hash_next));
620
621 /* remove the TB from the page list */
622 if (tb->page_addr[0] != page_addr) {
623 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
624 tb_page_remove(&p->first_tb, tb);
625 invalidate_page_bitmap(p);
626 }
627 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
628 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
629 tb_page_remove(&p->first_tb, tb);
630 invalidate_page_bitmap(p);
631 }
632
633 tb_invalidate(tb);
e3db7226 634 tb_phys_invalidate_count++;
9fa3e853
FB
635}
636
637static inline void set_bits(uint8_t *tab, int start, int len)
638{
639 int end, mask, end1;
640
641 end = start + len;
642 tab += start >> 3;
643 mask = 0xff << (start & 7);
644 if ((start & ~7) == (end & ~7)) {
645 if (start < end) {
646 mask &= ~(0xff << (end & 7));
647 *tab |= mask;
648 }
649 } else {
650 *tab++ |= mask;
651 start = (start + 8) & ~7;
652 end1 = end & ~7;
653 while (start < end1) {
654 *tab++ = 0xff;
655 start += 8;
656 }
657 if (start < end) {
658 mask = ~(0xff << (end & 7));
659 *tab |= mask;
660 }
661 }
662}
663
664static void build_page_bitmap(PageDesc *p)
665{
666 int n, tb_start, tb_end;
667 TranslationBlock *tb;
668
59817ccb 669 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
670 if (!p->code_bitmap)
671 return;
672 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
673
674 tb = p->first_tb;
675 while (tb != NULL) {
676 n = (long)tb & 3;
677 tb = (TranslationBlock *)((long)tb & ~3);
678 /* NOTE: this is subtle as a TB may span two physical pages */
679 if (n == 0) {
680 /* NOTE: tb_end may be after the end of the page, but
681 it is not a problem */
682 tb_start = tb->pc & ~TARGET_PAGE_MASK;
683 tb_end = tb_start + tb->size;
684 if (tb_end > TARGET_PAGE_SIZE)
685 tb_end = TARGET_PAGE_SIZE;
686 } else {
687 tb_start = 0;
688 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
689 }
690 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
691 tb = tb->page_next[n];
692 }
693}
694
d720b93d
FB
695#ifdef TARGET_HAS_PRECISE_SMC
696
697static void tb_gen_code(CPUState *env,
698 target_ulong pc, target_ulong cs_base, int flags,
699 int cflags)
700{
701 TranslationBlock *tb;
702 uint8_t *tc_ptr;
703 target_ulong phys_pc, phys_page2, virt_page2;
704 int code_gen_size;
705
c27004ec
FB
706 phys_pc = get_phys_addr_code(env, pc);
707 tb = tb_alloc(pc);
d720b93d
FB
708 if (!tb) {
709 /* flush must be done */
710 tb_flush(env);
711 /* cannot fail at this point */
c27004ec 712 tb = tb_alloc(pc);
d720b93d
FB
713 }
714 tc_ptr = code_gen_ptr;
715 tb->tc_ptr = tc_ptr;
716 tb->cs_base = cs_base;
717 tb->flags = flags;
718 tb->cflags = cflags;
719 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
720 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
721
722 /* check next page if needed */
c27004ec 723 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 724 phys_page2 = -1;
c27004ec 725 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
726 phys_page2 = get_phys_addr_code(env, virt_page2);
727 }
728 tb_link_phys(tb, phys_pc, phys_page2);
729}
730#endif
731
9fa3e853
FB
732/* invalidate all TBs which intersect with the target physical page
733 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
734 the same physical page. 'is_cpu_write_access' should be true if called
735 from a real cpu write access: the virtual CPU will exit the current
736 TB if code is modified inside this TB. */
737void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
738 int is_cpu_write_access)
739{
740 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 741 CPUState *env = cpu_single_env;
9fa3e853 742 PageDesc *p;
ea1c1802 743 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 744 target_ulong tb_start, tb_end;
d720b93d 745 target_ulong current_pc, current_cs_base;
9fa3e853
FB
746
747 p = page_find(start >> TARGET_PAGE_BITS);
748 if (!p)
749 return;
750 if (!p->code_bitmap &&
d720b93d
FB
751 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
752 is_cpu_write_access) {
9fa3e853
FB
753 /* build code bitmap */
754 build_page_bitmap(p);
755 }
756
757 /* we remove all the TBs in the range [start, end[ */
758 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
759 current_tb_not_found = is_cpu_write_access;
760 current_tb_modified = 0;
761 current_tb = NULL; /* avoid warning */
762 current_pc = 0; /* avoid warning */
763 current_cs_base = 0; /* avoid warning */
764 current_flags = 0; /* avoid warning */
9fa3e853
FB
765 tb = p->first_tb;
766 while (tb != NULL) {
767 n = (long)tb & 3;
768 tb = (TranslationBlock *)((long)tb & ~3);
769 tb_next = tb->page_next[n];
770 /* NOTE: this is subtle as a TB may span two physical pages */
771 if (n == 0) {
772 /* NOTE: tb_end may be after the end of the page, but
773 it is not a problem */
774 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
775 tb_end = tb_start + tb->size;
776 } else {
777 tb_start = tb->page_addr[1];
778 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
779 }
780 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
781#ifdef TARGET_HAS_PRECISE_SMC
782 if (current_tb_not_found) {
783 current_tb_not_found = 0;
784 current_tb = NULL;
785 if (env->mem_write_pc) {
786 /* now we have a real cpu fault */
787 current_tb = tb_find_pc(env->mem_write_pc);
788 }
789 }
790 if (current_tb == tb &&
791 !(current_tb->cflags & CF_SINGLE_INSN)) {
792 /* If we are modifying the current TB, we must stop
793 its execution. We could be more precise by checking
794 that the modification is after the current PC, but it
795 would require a specialized function to partially
796 restore the CPU state */
797
798 current_tb_modified = 1;
799 cpu_restore_state(current_tb, env,
800 env->mem_write_pc, NULL);
801#if defined(TARGET_I386)
802 current_flags = env->hflags;
803 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
804 current_cs_base = (target_ulong)env->segs[R_CS].base;
805 current_pc = current_cs_base + env->eip;
806#else
807#error unsupported CPU
808#endif
809 }
810#endif /* TARGET_HAS_PRECISE_SMC */
ea1c1802
FB
811 saved_tb = env->current_tb;
812 env->current_tb = NULL;
9fa3e853 813 tb_phys_invalidate(tb, -1);
ea1c1802
FB
814 env->current_tb = saved_tb;
815 if (env->interrupt_request && env->current_tb)
816 cpu_interrupt(env, env->interrupt_request);
9fa3e853
FB
817 }
818 tb = tb_next;
819 }
820#if !defined(CONFIG_USER_ONLY)
821 /* if no code remaining, no need to continue to use slow writes */
822 if (!p->first_tb) {
823 invalidate_page_bitmap(p);
d720b93d
FB
824 if (is_cpu_write_access) {
825 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
826 }
827 }
828#endif
829#ifdef TARGET_HAS_PRECISE_SMC
830 if (current_tb_modified) {
831 /* we generate a block containing just the instruction
832 modifying the memory. It will ensure that it cannot modify
833 itself */
ea1c1802 834 env->current_tb = NULL;
d720b93d
FB
835 tb_gen_code(env, current_pc, current_cs_base, current_flags,
836 CF_SINGLE_INSN);
837 cpu_resume_from_signal(env, NULL);
9fa3e853 838 }
fd6ce8f6 839#endif
9fa3e853 840}
fd6ce8f6 841
9fa3e853 842/* len must be <= 8 and start must be a multiple of len */
d720b93d 843static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
844{
845 PageDesc *p;
846 int offset, b;
59817ccb 847#if 0
a4193c8a
FB
848 if (1) {
849 if (loglevel) {
850 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
851 cpu_single_env->mem_write_vaddr, len,
852 cpu_single_env->eip,
853 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
854 }
59817ccb
FB
855 }
856#endif
9fa3e853
FB
857 p = page_find(start >> TARGET_PAGE_BITS);
858 if (!p)
859 return;
860 if (p->code_bitmap) {
861 offset = start & ~TARGET_PAGE_MASK;
862 b = p->code_bitmap[offset >> 3] >> (offset & 7);
863 if (b & ((1 << len) - 1))
864 goto do_invalidate;
865 } else {
866 do_invalidate:
d720b93d 867 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
868 }
869}
870
9fa3e853 871#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
872static void tb_invalidate_phys_page(target_ulong addr,
873 unsigned long pc, void *puc)
9fa3e853 874{
d720b93d
FB
875 int n, current_flags, current_tb_modified;
876 target_ulong current_pc, current_cs_base;
9fa3e853 877 PageDesc *p;
d720b93d
FB
878 TranslationBlock *tb, *current_tb;
879#ifdef TARGET_HAS_PRECISE_SMC
880 CPUState *env = cpu_single_env;
881#endif
9fa3e853
FB
882
883 addr &= TARGET_PAGE_MASK;
884 p = page_find(addr >> TARGET_PAGE_BITS);
885 if (!p)
886 return;
887 tb = p->first_tb;
d720b93d
FB
888 current_tb_modified = 0;
889 current_tb = NULL;
890 current_pc = 0; /* avoid warning */
891 current_cs_base = 0; /* avoid warning */
892 current_flags = 0; /* avoid warning */
893#ifdef TARGET_HAS_PRECISE_SMC
894 if (tb && pc != 0) {
895 current_tb = tb_find_pc(pc);
896 }
897#endif
9fa3e853
FB
898 while (tb != NULL) {
899 n = (long)tb & 3;
900 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
901#ifdef TARGET_HAS_PRECISE_SMC
902 if (current_tb == tb &&
903 !(current_tb->cflags & CF_SINGLE_INSN)) {
904 /* If we are modifying the current TB, we must stop
905 its execution. We could be more precise by checking
906 that the modification is after the current PC, but it
907 would require a specialized function to partially
908 restore the CPU state */
909
910 current_tb_modified = 1;
911 cpu_restore_state(current_tb, env, pc, puc);
912#if defined(TARGET_I386)
913 current_flags = env->hflags;
914 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
915 current_cs_base = (target_ulong)env->segs[R_CS].base;
916 current_pc = current_cs_base + env->eip;
917#else
918#error unsupported CPU
919#endif
920 }
921#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
922 tb_phys_invalidate(tb, addr);
923 tb = tb->page_next[n];
924 }
fd6ce8f6 925 p->first_tb = NULL;
d720b93d
FB
926#ifdef TARGET_HAS_PRECISE_SMC
927 if (current_tb_modified) {
928 /* we generate a block containing just the instruction
929 modifying the memory. It will ensure that it cannot modify
930 itself */
ea1c1802 931 env->current_tb = NULL;
d720b93d
FB
932 tb_gen_code(env, current_pc, current_cs_base, current_flags,
933 CF_SINGLE_INSN);
934 cpu_resume_from_signal(env, puc);
935 }
936#endif
fd6ce8f6 937}
9fa3e853 938#endif
fd6ce8f6
FB
939
940/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
941static inline void tb_alloc_page(TranslationBlock *tb,
942 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
943{
944 PageDesc *p;
9fa3e853
FB
945 TranslationBlock *last_first_tb;
946
947 tb->page_addr[n] = page_addr;
948 p = page_find(page_addr >> TARGET_PAGE_BITS);
949 tb->page_next[n] = p->first_tb;
950 last_first_tb = p->first_tb;
951 p->first_tb = (TranslationBlock *)((long)tb | n);
952 invalidate_page_bitmap(p);
fd6ce8f6 953
107db443 954#if defined(TARGET_HAS_SMC) || 1
d720b93d 955
9fa3e853 956#if defined(CONFIG_USER_ONLY)
fd6ce8f6 957 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
958 unsigned long host_start, host_end, addr;
959 int prot;
960
fd6ce8f6
FB
961 /* force the host page as non writable (writes will have a
962 page fault + mprotect overhead) */
83fb7adf
FB
963 host_start = page_addr & qemu_host_page_mask;
964 host_end = host_start + qemu_host_page_size;
fd6ce8f6
FB
965 prot = 0;
966 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
967 prot |= page_get_flags(addr);
83fb7adf 968 mprotect((void *)host_start, qemu_host_page_size,
fd6ce8f6
FB
969 (prot & PAGE_BITS) & ~PAGE_WRITE);
970#ifdef DEBUG_TB_INVALIDATE
971 printf("protecting code page: 0x%08lx\n",
972 host_start);
973#endif
974 p->flags &= ~PAGE_WRITE;
fd6ce8f6 975 }
9fa3e853
FB
976#else
977 /* if some code is already present, then the pages are already
978 protected. So we handle the case where only the first TB is
979 allocated in a physical page */
980 if (!last_first_tb) {
981 target_ulong virt_addr;
982
983 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
984 tlb_protect_code(cpu_single_env, virt_addr);
985 }
986#endif
d720b93d
FB
987
988#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
989}
990
991/* Allocate a new translation block. Flush the translation buffer if
992 too many translation blocks or too much generated code. */
c27004ec 993TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
994{
995 TranslationBlock *tb;
fd6ce8f6
FB
996
997 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
998 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 999 return NULL;
fd6ce8f6
FB
1000 tb = &tbs[nb_tbs++];
1001 tb->pc = pc;
b448f2f3 1002 tb->cflags = 0;
d4e8164f
FB
1003 return tb;
1004}
1005
9fa3e853
FB
1006/* add a new TB and link it to the physical page tables. phys_page2 is
1007 (-1) to indicate that only one page contains the TB. */
1008void tb_link_phys(TranslationBlock *tb,
1009 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1010{
9fa3e853
FB
1011 unsigned int h;
1012 TranslationBlock **ptb;
1013
1014 /* add in the physical hash table */
1015 h = tb_phys_hash_func(phys_pc);
1016 ptb = &tb_phys_hash[h];
1017 tb->phys_hash_next = *ptb;
1018 *ptb = tb;
fd6ce8f6
FB
1019
1020 /* add in the page list */
9fa3e853
FB
1021 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1022 if (phys_page2 != -1)
1023 tb_alloc_page(tb, 1, phys_page2);
1024 else
1025 tb->page_addr[1] = -1;
61382a50
FB
1026#ifdef DEBUG_TB_CHECK
1027 tb_page_check();
1028#endif
9fa3e853
FB
1029}
1030
1031/* link the tb with the other TBs */
1032void tb_link(TranslationBlock *tb)
1033{
1034#if !defined(CONFIG_USER_ONLY)
1035 {
1036 VirtPageDesc *vp;
1037 target_ulong addr;
1038
1039 /* save the code memory mappings (needed to invalidate the code) */
1040 addr = tb->pc & TARGET_PAGE_MASK;
90f18422 1041 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
98857888
FB
1042#ifdef DEBUG_TLB_CHECK
1043 if (vp->valid_tag == virt_valid_tag &&
1044 vp->phys_addr != tb->page_addr[0]) {
1045 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1046 addr, tb->page_addr[0], vp->phys_addr);
1047 }
1048#endif
9fa3e853 1049 vp->phys_addr = tb->page_addr[0];
59817ccb
FB
1050 if (vp->valid_tag != virt_valid_tag) {
1051 vp->valid_tag = virt_valid_tag;
1052#if !defined(CONFIG_SOFTMMU)
1053 vp->prot = 0;
1054#endif
1055 }
9fa3e853
FB
1056
1057 if (tb->page_addr[1] != -1) {
1058 addr += TARGET_PAGE_SIZE;
90f18422 1059 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
98857888
FB
1060#ifdef DEBUG_TLB_CHECK
1061 if (vp->valid_tag == virt_valid_tag &&
1062 vp->phys_addr != tb->page_addr[1]) {
1063 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1064 addr, tb->page_addr[1], vp->phys_addr);
1065 }
1066#endif
9fa3e853 1067 vp->phys_addr = tb->page_addr[1];
59817ccb
FB
1068 if (vp->valid_tag != virt_valid_tag) {
1069 vp->valid_tag = virt_valid_tag;
1070#if !defined(CONFIG_SOFTMMU)
1071 vp->prot = 0;
1072#endif
1073 }
9fa3e853
FB
1074 }
1075 }
1076#endif
1077
d4e8164f
FB
1078 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1079 tb->jmp_next[0] = NULL;
1080 tb->jmp_next[1] = NULL;
b448f2f3
FB
1081#ifdef USE_CODE_COPY
1082 tb->cflags &= ~CF_FP_USED;
1083 if (tb->cflags & CF_TB_FP_USED)
1084 tb->cflags |= CF_FP_USED;
1085#endif
d4e8164f
FB
1086
1087 /* init original jump addresses */
1088 if (tb->tb_next_offset[0] != 0xffff)
1089 tb_reset_jump(tb, 0);
1090 if (tb->tb_next_offset[1] != 0xffff)
1091 tb_reset_jump(tb, 1);
fd6ce8f6
FB
1092}
1093
9fa3e853
FB
1094/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1095 tb[1].tc_ptr. Return NULL if not found */
1096TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1097{
9fa3e853
FB
1098 int m_min, m_max, m;
1099 unsigned long v;
1100 TranslationBlock *tb;
a513fe19
FB
1101
1102 if (nb_tbs <= 0)
1103 return NULL;
1104 if (tc_ptr < (unsigned long)code_gen_buffer ||
1105 tc_ptr >= (unsigned long)code_gen_ptr)
1106 return NULL;
1107 /* binary search (cf Knuth) */
1108 m_min = 0;
1109 m_max = nb_tbs - 1;
1110 while (m_min <= m_max) {
1111 m = (m_min + m_max) >> 1;
1112 tb = &tbs[m];
1113 v = (unsigned long)tb->tc_ptr;
1114 if (v == tc_ptr)
1115 return tb;
1116 else if (tc_ptr < v) {
1117 m_max = m - 1;
1118 } else {
1119 m_min = m + 1;
1120 }
1121 }
1122 return &tbs[m_max];
1123}
7501267e 1124
ea041c0e
FB
1125static void tb_reset_jump_recursive(TranslationBlock *tb);
1126
1127static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1128{
1129 TranslationBlock *tb1, *tb_next, **ptb;
1130 unsigned int n1;
1131
1132 tb1 = tb->jmp_next[n];
1133 if (tb1 != NULL) {
1134 /* find head of list */
1135 for(;;) {
1136 n1 = (long)tb1 & 3;
1137 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1138 if (n1 == 2)
1139 break;
1140 tb1 = tb1->jmp_next[n1];
1141 }
1142 /* we are now sure now that tb jumps to tb1 */
1143 tb_next = tb1;
1144
1145 /* remove tb from the jmp_first list */
1146 ptb = &tb_next->jmp_first;
1147 for(;;) {
1148 tb1 = *ptb;
1149 n1 = (long)tb1 & 3;
1150 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1151 if (n1 == n && tb1 == tb)
1152 break;
1153 ptb = &tb1->jmp_next[n1];
1154 }
1155 *ptb = tb->jmp_next[n];
1156 tb->jmp_next[n] = NULL;
1157
1158 /* suppress the jump to next tb in generated code */
1159 tb_reset_jump(tb, n);
1160
0124311e 1161 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1162 tb_reset_jump_recursive(tb_next);
1163 }
1164}
1165
1166static void tb_reset_jump_recursive(TranslationBlock *tb)
1167{
1168 tb_reset_jump_recursive2(tb, 0);
1169 tb_reset_jump_recursive2(tb, 1);
1170}
1171
1fddef4b 1172#if defined(TARGET_HAS_ICE)
d720b93d
FB
1173static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1174{
1175 target_ulong phys_addr;
1176
1177 phys_addr = cpu_get_phys_page_debug(env, pc);
1178 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1179}
c27004ec 1180#endif
d720b93d 1181
c33a346e
FB
1182/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1183 breakpoint is reached */
2e12669a 1184int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1185{
1fddef4b 1186#if defined(TARGET_HAS_ICE)
4c3a88a2 1187 int i;
d720b93d 1188
4c3a88a2
FB
1189 for(i = 0; i < env->nb_breakpoints; i++) {
1190 if (env->breakpoints[i] == pc)
1191 return 0;
1192 }
1193
1194 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1195 return -1;
1196 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1197
1198 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1199 return 0;
1200#else
1201 return -1;
1202#endif
1203}
1204
1205/* remove a breakpoint */
2e12669a 1206int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1207{
1fddef4b 1208#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1209 int i;
1210 for(i = 0; i < env->nb_breakpoints; i++) {
1211 if (env->breakpoints[i] == pc)
1212 goto found;
1213 }
1214 return -1;
1215 found:
4c3a88a2 1216 env->nb_breakpoints--;
1fddef4b
FB
1217 if (i < env->nb_breakpoints)
1218 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1219
1220 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1221 return 0;
1222#else
1223 return -1;
1224#endif
1225}
1226
c33a346e
FB
1227/* enable or disable single step mode. EXCP_DEBUG is returned by the
1228 CPU loop after each instruction */
1229void cpu_single_step(CPUState *env, int enabled)
1230{
1fddef4b 1231#if defined(TARGET_HAS_ICE)
c33a346e
FB
1232 if (env->singlestep_enabled != enabled) {
1233 env->singlestep_enabled = enabled;
1234 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1235 /* XXX: only flush what is necessary */
0124311e 1236 tb_flush(env);
c33a346e
FB
1237 }
1238#endif
1239}
1240
34865134
FB
1241/* enable or disable low levels log */
1242void cpu_set_log(int log_flags)
1243{
1244 loglevel = log_flags;
1245 if (loglevel && !logfile) {
1246 logfile = fopen(logfilename, "w");
1247 if (!logfile) {
1248 perror(logfilename);
1249 _exit(1);
1250 }
9fa3e853
FB
1251#if !defined(CONFIG_SOFTMMU)
1252 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1253 {
1254 static uint8_t logfile_buf[4096];
1255 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1256 }
1257#else
34865134 1258 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1259#endif
34865134
FB
1260 }
1261}
1262
1263void cpu_set_log_filename(const char *filename)
1264{
1265 logfilename = strdup(filename);
1266}
c33a346e 1267
0124311e 1268/* mask must never be zero, except for A20 change call */
68a79315 1269void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1270{
1271 TranslationBlock *tb;
ee8b7021 1272 static int interrupt_lock;
59817ccb 1273
68a79315 1274 env->interrupt_request |= mask;
ea041c0e
FB
1275 /* if the cpu is currently executing code, we must unlink it and
1276 all the potentially executing TB */
1277 tb = env->current_tb;
ee8b7021
FB
1278 if (tb && !testandset(&interrupt_lock)) {
1279 env->current_tb = NULL;
ea041c0e 1280 tb_reset_jump_recursive(tb);
ee8b7021 1281 interrupt_lock = 0;
ea041c0e
FB
1282 }
1283}
1284
b54ad049
FB
1285void cpu_reset_interrupt(CPUState *env, int mask)
1286{
1287 env->interrupt_request &= ~mask;
1288}
1289
f193c797
FB
1290CPULogItem cpu_log_items[] = {
1291 { CPU_LOG_TB_OUT_ASM, "out_asm",
1292 "show generated host assembly code for each compiled TB" },
1293 { CPU_LOG_TB_IN_ASM, "in_asm",
1294 "show target assembly code for each compiled TB" },
1295 { CPU_LOG_TB_OP, "op",
1296 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1297#ifdef TARGET_I386
1298 { CPU_LOG_TB_OP_OPT, "op_opt",
1299 "show micro ops after optimization for each compiled TB" },
1300#endif
1301 { CPU_LOG_INT, "int",
1302 "show interrupts/exceptions in short format" },
1303 { CPU_LOG_EXEC, "exec",
1304 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1305 { CPU_LOG_TB_CPU, "cpu",
1306 "show CPU state before bloc translation" },
f193c797
FB
1307#ifdef TARGET_I386
1308 { CPU_LOG_PCALL, "pcall",
1309 "show protected mode far calls/returns/exceptions" },
1310#endif
8e3a9fd2 1311#ifdef DEBUG_IOPORT
fd872598
FB
1312 { CPU_LOG_IOPORT, "ioport",
1313 "show all i/o ports accesses" },
8e3a9fd2 1314#endif
f193c797
FB
1315 { 0, NULL, NULL },
1316};
1317
1318static int cmp1(const char *s1, int n, const char *s2)
1319{
1320 if (strlen(s2) != n)
1321 return 0;
1322 return memcmp(s1, s2, n) == 0;
1323}
1324
1325/* takes a comma separated list of log masks. Return 0 if error. */
1326int cpu_str_to_log_mask(const char *str)
1327{
1328 CPULogItem *item;
1329 int mask;
1330 const char *p, *p1;
1331
1332 p = str;
1333 mask = 0;
1334 for(;;) {
1335 p1 = strchr(p, ',');
1336 if (!p1)
1337 p1 = p + strlen(p);
8e3a9fd2
FB
1338 if(cmp1(p,p1-p,"all")) {
1339 for(item = cpu_log_items; item->mask != 0; item++) {
1340 mask |= item->mask;
1341 }
1342 } else {
f193c797
FB
1343 for(item = cpu_log_items; item->mask != 0; item++) {
1344 if (cmp1(p, p1 - p, item->name))
1345 goto found;
1346 }
1347 return 0;
8e3a9fd2 1348 }
f193c797
FB
1349 found:
1350 mask |= item->mask;
1351 if (*p1 != ',')
1352 break;
1353 p = p1 + 1;
1354 }
1355 return mask;
1356}
ea041c0e 1357
7501267e
FB
1358void cpu_abort(CPUState *env, const char *fmt, ...)
1359{
1360 va_list ap;
1361
1362 va_start(ap, fmt);
1363 fprintf(stderr, "qemu: fatal: ");
1364 vfprintf(stderr, fmt, ap);
1365 fprintf(stderr, "\n");
1366#ifdef TARGET_I386
7fe48483
FB
1367 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1368#else
1369 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1370#endif
1371 va_end(ap);
1372 abort();
1373}
1374
0124311e
FB
1375#if !defined(CONFIG_USER_ONLY)
1376
ee8b7021
FB
1377/* NOTE: if flush_global is true, also flush global entries (not
1378 implemented yet) */
1379void tlb_flush(CPUState *env, int flush_global)
33417e70 1380{
33417e70 1381 int i;
0124311e 1382
9fa3e853
FB
1383#if defined(DEBUG_TLB)
1384 printf("tlb_flush:\n");
1385#endif
0124311e
FB
1386 /* must reset current TB so that interrupts cannot modify the
1387 links while we are modifying them */
1388 env->current_tb = NULL;
1389
33417e70
FB
1390 for(i = 0; i < CPU_TLB_SIZE; i++) {
1391 env->tlb_read[0][i].address = -1;
1392 env->tlb_write[0][i].address = -1;
1393 env->tlb_read[1][i].address = -1;
1394 env->tlb_write[1][i].address = -1;
1395 }
9fa3e853
FB
1396
1397 virt_page_flush();
8a8a608f 1398 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
9fa3e853
FB
1399
1400#if !defined(CONFIG_SOFTMMU)
1401 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1402#endif
1403#ifdef USE_KQEMU
1404 if (env->kqemu_enabled) {
1405 kqemu_flush(env, flush_global);
1406 }
9fa3e853 1407#endif
e3db7226 1408 tlb_flush_count++;
33417e70
FB
1409}
1410
274da6b2 1411static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50
FB
1412{
1413 if (addr == (tlb_entry->address &
1414 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1415 tlb_entry->address = -1;
1416}
1417
2e12669a 1418void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1419{
9fa3e853
FB
1420 int i, n;
1421 VirtPageDesc *vp;
1422 PageDesc *p;
1423 TranslationBlock *tb;
0124311e 1424
9fa3e853 1425#if defined(DEBUG_TLB)
108c49b8 1426 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1427#endif
0124311e
FB
1428 /* must reset current TB so that interrupts cannot modify the
1429 links while we are modifying them */
1430 env->current_tb = NULL;
61382a50
FB
1431
1432 addr &= TARGET_PAGE_MASK;
1433 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1434 tlb_flush_entry(&env->tlb_read[0][i], addr);
1435 tlb_flush_entry(&env->tlb_write[0][i], addr);
1436 tlb_flush_entry(&env->tlb_read[1][i], addr);
1437 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1438
9fa3e853
FB
1439 /* remove from the virtual pc hash table all the TB at this
1440 virtual address */
1441
1442 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1443 if (vp && vp->valid_tag == virt_valid_tag) {
1444 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1445 if (p) {
1446 /* we remove all the links to the TBs in this virtual page */
1447 tb = p->first_tb;
1448 while (tb != NULL) {
1449 n = (long)tb & 3;
1450 tb = (TranslationBlock *)((long)tb & ~3);
1451 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1452 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1453 tb_invalidate(tb);
1454 }
1455 tb = tb->page_next[n];
1456 }
1457 }
98857888 1458 vp->valid_tag = 0;
9fa3e853
FB
1459 }
1460
0124311e 1461#if !defined(CONFIG_SOFTMMU)
9fa3e853 1462 if (addr < MMAP_AREA_END)
0124311e 1463 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1464#endif
0a962c02
FB
1465#ifdef USE_KQEMU
1466 if (env->kqemu_enabled) {
1467 kqemu_flush_page(env, addr);
1468 }
1469#endif
9fa3e853
FB
1470}
1471
4f2ac237 1472static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
9fa3e853
FB
1473{
1474 if (addr == (tlb_entry->address &
1475 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
98857888
FB
1476 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1477 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1ccde1cb 1478 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
9fa3e853
FB
1479 }
1480}
1481
1482/* update the TLBs so that writes to code in the virtual page 'addr'
1483 can be detected */
4f2ac237 1484static void tlb_protect_code(CPUState *env, target_ulong addr)
9fa3e853
FB
1485{
1486 int i;
1487
1488 addr &= TARGET_PAGE_MASK;
1489 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1490 tlb_protect_code1(&env->tlb_write[0][i], addr);
1491 tlb_protect_code1(&env->tlb_write[1][i], addr);
1492#if !defined(CONFIG_SOFTMMU)
1493 /* NOTE: as we generated the code for this page, it is already at
1494 least readable */
1495 if (addr < MMAP_AREA_END)
1496 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1497#endif
1498}
1499
9fa3e853 1500static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
4f2ac237 1501 unsigned long phys_addr)
9fa3e853
FB
1502{
1503 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1504 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1ccde1cb 1505 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
9fa3e853
FB
1506 }
1507}
1508
1509/* update the TLB so that writes in physical page 'phys_addr' are no longer
1510 tested self modifying code */
4f2ac237 1511static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
9fa3e853
FB
1512{
1513 int i;
1514
1515 phys_addr &= TARGET_PAGE_MASK;
1ccde1cb
FB
1516 phys_addr += (long)phys_ram_base;
1517 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1518 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1519 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1520}
1521
1522static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1523 unsigned long start, unsigned long length)
1524{
1525 unsigned long addr;
1526 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1527 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1528 if ((addr - start) < length) {
1529 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1530 }
1531 }
1532}
1533
0a962c02
FB
1534void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end,
1535 int dirty_flags)
1ccde1cb
FB
1536{
1537 CPUState *env;
4f2ac237 1538 unsigned long length, start1;
0a962c02
FB
1539 int i, mask, len;
1540 uint8_t *p;
1ccde1cb
FB
1541
1542 start &= TARGET_PAGE_MASK;
1543 end = TARGET_PAGE_ALIGN(end);
1544
1545 length = end - start;
1546 if (length == 0)
1547 return;
0a962c02
FB
1548 mask = ~dirty_flags;
1549 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1550 len = length >> TARGET_PAGE_BITS;
1551 for(i = 0; i < len; i++)
1552 p[i] &= mask;
1ccde1cb
FB
1553
1554 env = cpu_single_env;
1555 /* we modify the TLB cache so that the dirty bit will be set again
1556 when accessing the range */
59817ccb 1557 start1 = start + (unsigned long)phys_ram_base;
9fa3e853 1558 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb 1559 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
9fa3e853 1560 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb
FB
1561 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1562
1563#if !defined(CONFIG_SOFTMMU)
1564 /* XXX: this is expensive */
1565 {
1566 VirtPageDesc *p;
1567 int j;
1568 target_ulong addr;
1569
1570 for(i = 0; i < L1_SIZE; i++) {
1571 p = l1_virt_map[i];
1572 if (p) {
1573 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1574 for(j = 0; j < L2_SIZE; j++) {
1575 if (p->valid_tag == virt_valid_tag &&
1576 p->phys_addr >= start && p->phys_addr < end &&
1577 (p->prot & PROT_WRITE)) {
1578 if (addr < MMAP_AREA_END) {
1579 mprotect((void *)addr, TARGET_PAGE_SIZE,
1580 p->prot & ~PROT_WRITE);
1581 }
1582 }
1583 addr += TARGET_PAGE_SIZE;
1584 p++;
1585 }
1586 }
1587 }
1588 }
1589#endif
1ccde1cb
FB
1590}
1591
1592static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1593 unsigned long start)
1ccde1cb
FB
1594{
1595 unsigned long addr;
1596 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1597 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1598 if (addr == start) {
1599 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1600 }
1601 }
1602}
1603
1604/* update the TLB corresponding to virtual page vaddr and phys addr
1605 addr so that it is no longer dirty */
1606static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1607{
1608 CPUState *env = cpu_single_env;
1609 int i;
1610
0a962c02 1611 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 0xff;
1ccde1cb
FB
1612
1613 addr &= TARGET_PAGE_MASK;
1614 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1615 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1616 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1617}
1618
59817ccb
FB
1619/* add a new TLB entry. At most one entry for a given virtual address
1620 is permitted. Return 0 if OK or 2 if the page could not be mapped
1621 (can only happen in non SOFTMMU mode for I/O pages or pages
1622 conflicting with the host address space). */
2e12669a
FB
1623int tlb_set_page(CPUState *env, target_ulong vaddr,
1624 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1625 int is_user, int is_softmmu)
1626{
92e873b9 1627 PhysPageDesc *p;
4f2ac237 1628 unsigned long pd;
9fa3e853
FB
1629 TranslationBlock *first_tb;
1630 unsigned int index;
4f2ac237 1631 target_ulong address;
108c49b8 1632 target_phys_addr_t addend;
9fa3e853
FB
1633 int ret;
1634
92e873b9
FB
1635 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1636 first_tb = NULL;
9fa3e853
FB
1637 if (!p) {
1638 pd = IO_MEM_UNASSIGNED;
9fa3e853 1639 } else {
92e873b9 1640 PageDesc *p1;
9fa3e853 1641 pd = p->phys_offset;
92e873b9
FB
1642 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1643 /* NOTE: we also allocate the page at this stage */
1644 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1645 first_tb = p1->first_tb;
1646 }
9fa3e853
FB
1647 }
1648#if defined(DEBUG_TLB)
108c49b8 1649 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
9fa3e853
FB
1650 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1651#endif
1652
1653 ret = 0;
1654#if !defined(CONFIG_SOFTMMU)
1655 if (is_softmmu)
1656#endif
1657 {
1658 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1659 /* IO memory case */
1660 address = vaddr | pd;
1661 addend = paddr;
1662 } else {
1663 /* standard memory */
1664 address = vaddr;
1665 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1666 }
1667
90f18422 1668 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1669 addend -= vaddr;
67b915a5 1670 if (prot & PAGE_READ) {
9fa3e853
FB
1671 env->tlb_read[is_user][index].address = address;
1672 env->tlb_read[is_user][index].addend = addend;
1673 } else {
1674 env->tlb_read[is_user][index].address = -1;
1675 env->tlb_read[is_user][index].addend = -1;
1676 }
67b915a5 1677 if (prot & PAGE_WRITE) {
9fa3e853
FB
1678 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1679 /* ROM: access is ignored (same as unassigned) */
1680 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1681 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1682 } else
1683 /* XXX: the PowerPC code seems not ready to handle
1684 self modifying code with DCBI */
1685#if defined(TARGET_HAS_SMC) || 1
1686 if (first_tb) {
9fa3e853
FB
1687 /* if code is present, we use a specific memory
1688 handler. It works only for physical memory access */
1689 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1ccde1cb 1690 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1691 } else
1692#endif
1693 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb
FB
1694 !cpu_physical_memory_is_dirty(pd)) {
1695 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1696 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1697 } else {
1698 env->tlb_write[is_user][index].address = address;
1699 env->tlb_write[is_user][index].addend = addend;
1700 }
1701 } else {
1702 env->tlb_write[is_user][index].address = -1;
1703 env->tlb_write[is_user][index].addend = -1;
1704 }
1705 }
1706#if !defined(CONFIG_SOFTMMU)
1707 else {
1708 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1709 /* IO access: no mapping is done as it will be handled by the
1710 soft MMU */
1711 if (!(env->hflags & HF_SOFTMMU_MASK))
1712 ret = 2;
1713 } else {
1714 void *map_addr;
59817ccb
FB
1715
1716 if (vaddr >= MMAP_AREA_END) {
1717 ret = 2;
1718 } else {
1719 if (prot & PROT_WRITE) {
1720 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1721#if defined(TARGET_HAS_SMC) || 1
59817ccb 1722 first_tb ||
d720b93d 1723#endif
59817ccb
FB
1724 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1725 !cpu_physical_memory_is_dirty(pd))) {
1726 /* ROM: we do as if code was inside */
1727 /* if code is present, we only map as read only and save the
1728 original mapping */
1729 VirtPageDesc *vp;
1730
90f18422 1731 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1732 vp->phys_addr = pd;
1733 vp->prot = prot;
1734 vp->valid_tag = virt_valid_tag;
1735 prot &= ~PAGE_WRITE;
1736 }
1737 }
1738 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1739 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1740 if (map_addr == MAP_FAILED) {
1741 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1742 paddr, vaddr);
9fa3e853 1743 }
9fa3e853
FB
1744 }
1745 }
1746 }
1747#endif
1748 return ret;
1749}
1750
1751/* called from signal handler: invalidate the code and unprotect the
1752 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1753int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1754{
1755#if !defined(CONFIG_SOFTMMU)
1756 VirtPageDesc *vp;
1757
1758#if defined(DEBUG_TLB)
1759 printf("page_unprotect: addr=0x%08x\n", addr);
1760#endif
1761 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1762
1763 /* if it is not mapped, no need to worry here */
1764 if (addr >= MMAP_AREA_END)
1765 return 0;
9fa3e853
FB
1766 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1767 if (!vp)
1768 return 0;
1769 /* NOTE: in this case, validate_tag is _not_ tested as it
1770 validates only the code TLB */
1771 if (vp->valid_tag != virt_valid_tag)
1772 return 0;
1773 if (!(vp->prot & PAGE_WRITE))
1774 return 0;
1775#if defined(DEBUG_TLB)
1776 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1777 addr, vp->phys_addr, vp->prot);
1778#endif
59817ccb
FB
1779 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1780 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1781 (unsigned long)addr, vp->prot);
d720b93d 1782 /* set the dirty bit */
0a962c02 1783 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1784 /* flush the code inside */
1785 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1786 return 1;
1787#else
1788 return 0;
1789#endif
33417e70
FB
1790}
1791
0124311e
FB
1792#else
1793
ee8b7021 1794void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1795{
1796}
1797
2e12669a 1798void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1799{
1800}
1801
2e12669a
FB
1802int tlb_set_page(CPUState *env, target_ulong vaddr,
1803 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1804 int is_user, int is_softmmu)
1805{
1806 return 0;
1807}
0124311e 1808
9fa3e853
FB
1809/* dump memory mappings */
1810void page_dump(FILE *f)
33417e70 1811{
9fa3e853
FB
1812 unsigned long start, end;
1813 int i, j, prot, prot1;
1814 PageDesc *p;
33417e70 1815
9fa3e853
FB
1816 fprintf(f, "%-8s %-8s %-8s %s\n",
1817 "start", "end", "size", "prot");
1818 start = -1;
1819 end = -1;
1820 prot = 0;
1821 for(i = 0; i <= L1_SIZE; i++) {
1822 if (i < L1_SIZE)
1823 p = l1_map[i];
1824 else
1825 p = NULL;
1826 for(j = 0;j < L2_SIZE; j++) {
1827 if (!p)
1828 prot1 = 0;
1829 else
1830 prot1 = p[j].flags;
1831 if (prot1 != prot) {
1832 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1833 if (start != -1) {
1834 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1835 start, end, end - start,
1836 prot & PAGE_READ ? 'r' : '-',
1837 prot & PAGE_WRITE ? 'w' : '-',
1838 prot & PAGE_EXEC ? 'x' : '-');
1839 }
1840 if (prot1 != 0)
1841 start = end;
1842 else
1843 start = -1;
1844 prot = prot1;
1845 }
1846 if (!p)
1847 break;
1848 }
33417e70 1849 }
33417e70
FB
1850}
1851
9fa3e853 1852int page_get_flags(unsigned long address)
33417e70 1853{
9fa3e853
FB
1854 PageDesc *p;
1855
1856 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1857 if (!p)
9fa3e853
FB
1858 return 0;
1859 return p->flags;
1860}
1861
1862/* modify the flags of a page and invalidate the code if
1863 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1864 depending on PAGE_WRITE */
1865void page_set_flags(unsigned long start, unsigned long end, int flags)
1866{
1867 PageDesc *p;
1868 unsigned long addr;
1869
1870 start = start & TARGET_PAGE_MASK;
1871 end = TARGET_PAGE_ALIGN(end);
1872 if (flags & PAGE_WRITE)
1873 flags |= PAGE_WRITE_ORG;
1874 spin_lock(&tb_lock);
1875 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1876 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1877 /* if the write protection is set, then we invalidate the code
1878 inside */
1879 if (!(p->flags & PAGE_WRITE) &&
1880 (flags & PAGE_WRITE) &&
1881 p->first_tb) {
d720b93d 1882 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1883 }
1884 p->flags = flags;
1885 }
1886 spin_unlock(&tb_lock);
33417e70
FB
1887}
1888
9fa3e853
FB
1889/* called from signal handler: invalidate the code and unprotect the
1890 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1891int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1892{
1893 unsigned int page_index, prot, pindex;
1894 PageDesc *p, *p1;
1895 unsigned long host_start, host_end, addr;
1896
83fb7adf 1897 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1898 page_index = host_start >> TARGET_PAGE_BITS;
1899 p1 = page_find(page_index);
1900 if (!p1)
1901 return 0;
83fb7adf 1902 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1903 p = p1;
1904 prot = 0;
1905 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1906 prot |= p->flags;
1907 p++;
1908 }
1909 /* if the page was really writable, then we change its
1910 protection back to writable */
1911 if (prot & PAGE_WRITE_ORG) {
1912 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1913 if (!(p1[pindex].flags & PAGE_WRITE)) {
83fb7adf 1914 mprotect((void *)host_start, qemu_host_page_size,
9fa3e853
FB
1915 (prot & PAGE_BITS) | PAGE_WRITE);
1916 p1[pindex].flags |= PAGE_WRITE;
1917 /* and since the content will be modified, we must invalidate
1918 the corresponding translated code. */
d720b93d 1919 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1920#ifdef DEBUG_TB_CHECK
1921 tb_invalidate_check(address);
1922#endif
1923 return 1;
1924 }
1925 }
1926 return 0;
1927}
1928
1929/* call this function when system calls directly modify a memory area */
1930void page_unprotect_range(uint8_t *data, unsigned long data_size)
1931{
1932 unsigned long start, end, addr;
1933
1934 start = (unsigned long)data;
1935 end = start + data_size;
1936 start &= TARGET_PAGE_MASK;
1937 end = TARGET_PAGE_ALIGN(end);
1938 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1939 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1940 }
1941}
1942
1ccde1cb
FB
1943static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1944{
1945}
9fa3e853
FB
1946#endif /* defined(CONFIG_USER_ONLY) */
1947
33417e70
FB
1948/* register physical memory. 'size' must be a multiple of the target
1949 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1950 io memory page */
2e12669a
FB
1951void cpu_register_physical_memory(target_phys_addr_t start_addr,
1952 unsigned long size,
1953 unsigned long phys_offset)
33417e70 1954{
108c49b8 1955 target_phys_addr_t addr, end_addr;
92e873b9 1956 PhysPageDesc *p;
33417e70 1957
5fd386f6 1958 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1959 end_addr = start_addr + size;
5fd386f6 1960 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
108c49b8 1961 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
9fa3e853
FB
1962 p->phys_offset = phys_offset;
1963 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1964 phys_offset += TARGET_PAGE_SIZE;
1965 }
1966}
1967
a4193c8a 1968static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1969{
1970 return 0;
1971}
1972
a4193c8a 1973static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1974{
1975}
1976
1977static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1978 unassigned_mem_readb,
1979 unassigned_mem_readb,
1980 unassigned_mem_readb,
1981};
1982
1983static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1984 unassigned_mem_writeb,
1985 unassigned_mem_writeb,
1986 unassigned_mem_writeb,
1987};
1988
9fa3e853
FB
1989/* self modifying code support in soft mmu mode : writing to a page
1990 containing code comes to these functions */
1991
a4193c8a 1992static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1993{
1ccde1cb
FB
1994 unsigned long phys_addr;
1995
274da6b2 1996 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1997#if !defined(CONFIG_USER_ONLY)
d720b93d 1998 tb_invalidate_phys_page_fast(phys_addr, 1);
9fa3e853 1999#endif
c27004ec 2000 stb_p((uint8_t *)(long)addr, val);
0a962c02 2001 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
9fa3e853
FB
2002}
2003
a4193c8a 2004static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2005{
1ccde1cb
FB
2006 unsigned long phys_addr;
2007
274da6b2 2008 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 2009#if !defined(CONFIG_USER_ONLY)
d720b93d 2010 tb_invalidate_phys_page_fast(phys_addr, 2);
9fa3e853 2011#endif
c27004ec 2012 stw_p((uint8_t *)(long)addr, val);
0a962c02 2013 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
9fa3e853
FB
2014}
2015
a4193c8a 2016static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2017{
1ccde1cb
FB
2018 unsigned long phys_addr;
2019
274da6b2 2020 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 2021#if !defined(CONFIG_USER_ONLY)
d720b93d 2022 tb_invalidate_phys_page_fast(phys_addr, 4);
9fa3e853 2023#endif
c27004ec 2024 stl_p((uint8_t *)(long)addr, val);
0a962c02 2025 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
9fa3e853
FB
2026}
2027
2028static CPUReadMemoryFunc *code_mem_read[3] = {
2029 NULL, /* never used */
2030 NULL, /* never used */
2031 NULL, /* never used */
2032};
2033
2034static CPUWriteMemoryFunc *code_mem_write[3] = {
2035 code_mem_writeb,
2036 code_mem_writew,
2037 code_mem_writel,
2038};
33417e70 2039
a4193c8a 2040static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb 2041{
c27004ec 2042 stb_p((uint8_t *)(long)addr, val);
d720b93d 2043 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
2044}
2045
a4193c8a 2046static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb 2047{
c27004ec 2048 stw_p((uint8_t *)(long)addr, val);
d720b93d 2049 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
2050}
2051
a4193c8a 2052static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb 2053{
c27004ec 2054 stl_p((uint8_t *)(long)addr, val);
d720b93d 2055 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
2056}
2057
2058static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2059 notdirty_mem_writeb,
2060 notdirty_mem_writew,
2061 notdirty_mem_writel,
2062};
2063
33417e70
FB
2064static void io_mem_init(void)
2065{
a4193c8a
FB
2066 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
2067 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2068 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
2069 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2070 io_mem_nb = 5;
2071
2072 /* alloc dirty bits array */
0a962c02 2073 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2074}
2075
2076/* mem_read and mem_write are arrays of functions containing the
2077 function to access byte (index 0), word (index 1) and dword (index
2078 2). All functions must be supplied. If io_index is non zero, the
2079 corresponding io zone is modified. If it is zero, a new io zone is
2080 allocated. The return value can be used with
2081 cpu_register_physical_memory(). (-1) is returned if error. */
2082int cpu_register_io_memory(int io_index,
2083 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2084 CPUWriteMemoryFunc **mem_write,
2085 void *opaque)
33417e70
FB
2086{
2087 int i;
2088
2089 if (io_index <= 0) {
2090 if (io_index >= IO_MEM_NB_ENTRIES)
2091 return -1;
2092 io_index = io_mem_nb++;
2093 } else {
2094 if (io_index >= IO_MEM_NB_ENTRIES)
2095 return -1;
2096 }
2097
2098 for(i = 0;i < 3; i++) {
2099 io_mem_read[io_index][i] = mem_read[i];
2100 io_mem_write[io_index][i] = mem_write[i];
2101 }
a4193c8a 2102 io_mem_opaque[io_index] = opaque;
33417e70
FB
2103 return io_index << IO_MEM_SHIFT;
2104}
61382a50 2105
8926b517
FB
2106CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2107{
2108 return io_mem_write[io_index >> IO_MEM_SHIFT];
2109}
2110
2111CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2112{
2113 return io_mem_read[io_index >> IO_MEM_SHIFT];
2114}
2115
13eb76e0
FB
2116/* physical memory access (slow version, mainly for debug) */
2117#if defined(CONFIG_USER_ONLY)
2e12669a 2118void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2119 int len, int is_write)
2120{
2121 int l, flags;
2122 target_ulong page;
2123
2124 while (len > 0) {
2125 page = addr & TARGET_PAGE_MASK;
2126 l = (page + TARGET_PAGE_SIZE) - addr;
2127 if (l > len)
2128 l = len;
2129 flags = page_get_flags(page);
2130 if (!(flags & PAGE_VALID))
2131 return;
2132 if (is_write) {
2133 if (!(flags & PAGE_WRITE))
2134 return;
2135 memcpy((uint8_t *)addr, buf, len);
2136 } else {
2137 if (!(flags & PAGE_READ))
2138 return;
2139 memcpy(buf, (uint8_t *)addr, len);
2140 }
2141 len -= l;
2142 buf += l;
2143 addr += l;
2144 }
2145}
8df1cd07
FB
2146
2147/* never used */
2148uint32_t ldl_phys(target_phys_addr_t addr)
2149{
2150 return 0;
2151}
2152
2153void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2154{
2155}
2156
2157void stl_phys(target_phys_addr_t addr, uint32_t val)
2158{
2159}
2160
13eb76e0 2161#else
2e12669a 2162void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2163 int len, int is_write)
2164{
2165 int l, io_index;
2166 uint8_t *ptr;
2167 uint32_t val;
2e12669a
FB
2168 target_phys_addr_t page;
2169 unsigned long pd;
92e873b9 2170 PhysPageDesc *p;
13eb76e0
FB
2171
2172 while (len > 0) {
2173 page = addr & TARGET_PAGE_MASK;
2174 l = (page + TARGET_PAGE_SIZE) - addr;
2175 if (l > len)
2176 l = len;
92e873b9 2177 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2178 if (!p) {
2179 pd = IO_MEM_UNASSIGNED;
2180 } else {
2181 pd = p->phys_offset;
2182 }
2183
2184 if (is_write) {
2185 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2186 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2187 if (l >= 4 && ((addr & 3) == 0)) {
2188 /* 32 bit read access */
c27004ec 2189 val = ldl_p(buf);
a4193c8a 2190 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2191 l = 4;
2192 } else if (l >= 2 && ((addr & 1) == 0)) {
2193 /* 16 bit read access */
c27004ec 2194 val = lduw_p(buf);
a4193c8a 2195 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2196 l = 2;
2197 } else {
2198 /* 8 bit access */
c27004ec 2199 val = ldub_p(buf);
a4193c8a 2200 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2201 l = 1;
2202 }
2203 } else {
b448f2f3
FB
2204 unsigned long addr1;
2205 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2206 /* RAM case */
b448f2f3 2207 ptr = phys_ram_base + addr1;
13eb76e0 2208 memcpy(ptr, buf, l);
b448f2f3
FB
2209 /* invalidate code */
2210 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2211 /* set dirty bit */
0a962c02 2212 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff;
13eb76e0
FB
2213 }
2214 } else {
2215 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2216 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2217 /* I/O case */
2218 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2219 if (l >= 4 && ((addr & 3) == 0)) {
2220 /* 32 bit read access */
a4193c8a 2221 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2222 stl_p(buf, val);
13eb76e0
FB
2223 l = 4;
2224 } else if (l >= 2 && ((addr & 1) == 0)) {
2225 /* 16 bit read access */
a4193c8a 2226 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2227 stw_p(buf, val);
13eb76e0
FB
2228 l = 2;
2229 } else {
2230 /* 8 bit access */
a4193c8a 2231 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2232 stb_p(buf, val);
13eb76e0
FB
2233 l = 1;
2234 }
2235 } else {
2236 /* RAM case */
2237 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2238 (addr & ~TARGET_PAGE_MASK);
2239 memcpy(buf, ptr, l);
2240 }
2241 }
2242 len -= l;
2243 buf += l;
2244 addr += l;
2245 }
2246}
8df1cd07
FB
2247
2248/* warning: addr must be aligned */
2249uint32_t ldl_phys(target_phys_addr_t addr)
2250{
2251 int io_index;
2252 uint8_t *ptr;
2253 uint32_t val;
2254 unsigned long pd;
2255 PhysPageDesc *p;
2256
2257 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2258 if (!p) {
2259 pd = IO_MEM_UNASSIGNED;
2260 } else {
2261 pd = p->phys_offset;
2262 }
2263
2264 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2265 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2266 /* I/O case */
2267 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2268 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2269 } else {
2270 /* RAM case */
2271 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2272 (addr & ~TARGET_PAGE_MASK);
2273 val = ldl_p(ptr);
2274 }
2275 return val;
2276}
2277
2278/* warning: addr must be aligned. The ram page is not masked as dirty
2279 and the code inside is not invalidated. It is useful if the dirty
2280 bits are used to track modified PTEs */
2281void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2282{
2283 int io_index;
2284 uint8_t *ptr;
2285 unsigned long pd;
2286 PhysPageDesc *p;
2287
2288 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2289 if (!p) {
2290 pd = IO_MEM_UNASSIGNED;
2291 } else {
2292 pd = p->phys_offset;
2293 }
2294
2295 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2296 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2297 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2298 } else {
2299 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2300 (addr & ~TARGET_PAGE_MASK);
2301 stl_p(ptr, val);
2302 }
2303}
2304
2305/* warning: addr must be aligned */
2306/* XXX: optimize code invalidation test */
2307void stl_phys(target_phys_addr_t addr, uint32_t val)
2308{
2309 int io_index;
2310 uint8_t *ptr;
2311 unsigned long pd;
2312 PhysPageDesc *p;
2313
2314 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2315 if (!p) {
2316 pd = IO_MEM_UNASSIGNED;
2317 } else {
2318 pd = p->phys_offset;
2319 }
2320
2321 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2322 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2323 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2324 } else {
2325 unsigned long addr1;
2326 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2327 /* RAM case */
2328 ptr = phys_ram_base + addr1;
2329 stl_p(ptr, val);
2330 /* invalidate code */
2331 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2332 /* set dirty bit */
0a962c02 2333 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff;
8df1cd07
FB
2334 }
2335}
2336
13eb76e0
FB
2337#endif
2338
2339/* virtual memory access for debug */
b448f2f3
FB
2340int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2341 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2342{
2343 int l;
2344 target_ulong page, phys_addr;
2345
2346 while (len > 0) {
2347 page = addr & TARGET_PAGE_MASK;
2348 phys_addr = cpu_get_phys_page_debug(env, page);
2349 /* if no physical page mapped, return an error */
2350 if (phys_addr == -1)
2351 return -1;
2352 l = (page + TARGET_PAGE_SIZE) - addr;
2353 if (l > len)
2354 l = len;
b448f2f3
FB
2355 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2356 buf, l, is_write);
13eb76e0
FB
2357 len -= l;
2358 buf += l;
2359 addr += l;
2360 }
2361 return 0;
2362}
2363
e3db7226
FB
2364void dump_exec_info(FILE *f,
2365 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2366{
2367 int i, target_code_size, max_target_code_size;
2368 int direct_jmp_count, direct_jmp2_count, cross_page;
2369 TranslationBlock *tb;
2370
2371 target_code_size = 0;
2372 max_target_code_size = 0;
2373 cross_page = 0;
2374 direct_jmp_count = 0;
2375 direct_jmp2_count = 0;
2376 for(i = 0; i < nb_tbs; i++) {
2377 tb = &tbs[i];
2378 target_code_size += tb->size;
2379 if (tb->size > max_target_code_size)
2380 max_target_code_size = tb->size;
2381 if (tb->page_addr[1] != -1)
2382 cross_page++;
2383 if (tb->tb_next_offset[0] != 0xffff) {
2384 direct_jmp_count++;
2385 if (tb->tb_next_offset[1] != 0xffff) {
2386 direct_jmp2_count++;
2387 }
2388 }
2389 }
2390 /* XXX: avoid using doubles ? */
2391 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2392 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2393 nb_tbs ? target_code_size / nb_tbs : 0,
2394 max_target_code_size);
2395 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2396 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2397 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2398 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2399 cross_page,
2400 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2401 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2402 direct_jmp_count,
2403 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2404 direct_jmp2_count,
2405 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2406 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2407 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2408 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2409}
2410
61382a50
FB
2411#if !defined(CONFIG_USER_ONLY)
2412
2413#define MMUSUFFIX _cmmu
2414#define GETPC() NULL
2415#define env cpu_single_env
b769d8fe 2416#define SOFTMMU_CODE_ACCESS
61382a50
FB
2417
2418#define SHIFT 0
2419#include "softmmu_template.h"
2420
2421#define SHIFT 1
2422#include "softmmu_template.h"
2423
2424#define SHIFT 2
2425#include "softmmu_template.h"
2426
2427#define SHIFT 3
2428#include "softmmu_template.h"
2429
2430#undef env
2431
2432#endif