]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
64 bit target support
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
54936004 37
fd6ce8f6 38//#define DEBUG_TB_INVALIDATE
66e85a21 39//#define DEBUG_FLUSH
9fa3e853 40//#define DEBUG_TLB
fd6ce8f6
FB
41
42/* make various TB consistency checks */
43//#define DEBUG_TB_CHECK
98857888 44//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
45
46/* threshold to flush the translated code buffer */
47#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48
9fa3e853
FB
49#define SMC_BITMAP_USE_THRESHOLD 10
50
51#define MMAP_AREA_START 0x00000000
52#define MMAP_AREA_END 0xa8000000
fd6ce8f6
FB
53
54TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
55TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
9fa3e853 56TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 57int nb_tbs;
eb51d102
FB
58/* any access to the tbs or the page table must use this lock */
59spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6
FB
60
61uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
62uint8_t *code_gen_ptr;
63
9fa3e853
FB
64int phys_ram_size;
65int phys_ram_fd;
66uint8_t *phys_ram_base;
1ccde1cb 67uint8_t *phys_ram_dirty;
9fa3e853 68
54936004 69typedef struct PageDesc {
92e873b9 70 /* list of TBs intersecting this ram page */
fd6ce8f6 71 TranslationBlock *first_tb;
9fa3e853
FB
72 /* in order to optimize self modifying code, we count the number
73 of lookups we do to a given page to use a bitmap */
74 unsigned int code_write_count;
75 uint8_t *code_bitmap;
76#if defined(CONFIG_USER_ONLY)
77 unsigned long flags;
78#endif
54936004
FB
79} PageDesc;
80
92e873b9
FB
81typedef struct PhysPageDesc {
82 /* offset in host memory of the page + io_index in the low 12 bits */
83 unsigned long phys_offset;
84} PhysPageDesc;
85
9fa3e853
FB
86typedef struct VirtPageDesc {
87 /* physical address of code page. It is valid only if 'valid_tag'
88 matches 'virt_valid_tag' */
89 target_ulong phys_addr;
90 unsigned int valid_tag;
91#if !defined(CONFIG_SOFTMMU)
92 /* original page access rights. It is valid only if 'valid_tag'
93 matches 'virt_valid_tag' */
94 unsigned int prot;
95#endif
96} VirtPageDesc;
97
54936004
FB
98#define L2_BITS 10
99#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
100
101#define L1_SIZE (1 << L1_BITS)
102#define L2_SIZE (1 << L2_BITS)
103
33417e70 104static void io_mem_init(void);
fd6ce8f6 105
83fb7adf
FB
106unsigned long qemu_real_host_page_size;
107unsigned long qemu_host_page_bits;
108unsigned long qemu_host_page_size;
109unsigned long qemu_host_page_mask;
54936004 110
92e873b9 111/* XXX: for system emulation, it could just be an array */
54936004 112static PageDesc *l1_map[L1_SIZE];
92e873b9 113static PhysPageDesc *l1_phys_map[L1_SIZE];
54936004 114
9fa3e853
FB
115#if !defined(CONFIG_USER_ONLY)
116static VirtPageDesc *l1_virt_map[L1_SIZE];
117static unsigned int virt_valid_tag;
118#endif
119
33417e70 120/* io memory support */
33417e70
FB
121CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
122CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 123void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
124static int io_mem_nb;
125
34865134
FB
126/* log support */
127char *logfilename = "/tmp/qemu.log";
128FILE *logfile;
129int loglevel;
130
b346ff46 131static void page_init(void)
54936004 132{
83fb7adf 133 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 134 TARGET_PAGE_SIZE */
67b915a5 135#ifdef _WIN32
d5a8f07c
FB
136 {
137 SYSTEM_INFO system_info;
138 DWORD old_protect;
139
140 GetSystemInfo(&system_info);
141 qemu_real_host_page_size = system_info.dwPageSize;
142
143 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
144 PAGE_EXECUTE_READWRITE, &old_protect);
145 }
67b915a5 146#else
83fb7adf 147 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
148 {
149 unsigned long start, end;
150
151 start = (unsigned long)code_gen_buffer;
152 start &= ~(qemu_real_host_page_size - 1);
153
154 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
155 end += qemu_real_host_page_size - 1;
156 end &= ~(qemu_real_host_page_size - 1);
157
158 mprotect((void *)start, end - start,
159 PROT_READ | PROT_WRITE | PROT_EXEC);
160 }
67b915a5 161#endif
d5a8f07c 162
83fb7adf
FB
163 if (qemu_host_page_size == 0)
164 qemu_host_page_size = qemu_real_host_page_size;
165 if (qemu_host_page_size < TARGET_PAGE_SIZE)
166 qemu_host_page_size = TARGET_PAGE_SIZE;
167 qemu_host_page_bits = 0;
168 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
169 qemu_host_page_bits++;
170 qemu_host_page_mask = ~(qemu_host_page_size - 1);
9fa3e853
FB
171#if !defined(CONFIG_USER_ONLY)
172 virt_valid_tag = 1;
173#endif
54936004
FB
174}
175
fd6ce8f6 176static inline PageDesc *page_find_alloc(unsigned int index)
54936004 177{
54936004
FB
178 PageDesc **lp, *p;
179
54936004
FB
180 lp = &l1_map[index >> L2_BITS];
181 p = *lp;
182 if (!p) {
183 /* allocate if not found */
59817ccb 184 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 185 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
186 *lp = p;
187 }
188 return p + (index & (L2_SIZE - 1));
189}
190
fd6ce8f6 191static inline PageDesc *page_find(unsigned int index)
54936004 192{
54936004
FB
193 PageDesc *p;
194
54936004
FB
195 p = l1_map[index >> L2_BITS];
196 if (!p)
197 return 0;
fd6ce8f6
FB
198 return p + (index & (L2_SIZE - 1));
199}
200
92e873b9
FB
201static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
202{
203 PhysPageDesc **lp, *p;
204
205 lp = &l1_phys_map[index >> L2_BITS];
206 p = *lp;
207 if (!p) {
208 /* allocate if not found */
209 p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
210 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
211 *lp = p;
212 }
213 return p + (index & (L2_SIZE - 1));
214}
215
216static inline PhysPageDesc *phys_page_find(unsigned int index)
217{
218 PhysPageDesc *p;
219
220 p = l1_phys_map[index >> L2_BITS];
221 if (!p)
222 return 0;
223 return p + (index & (L2_SIZE - 1));
224}
225
9fa3e853 226#if !defined(CONFIG_USER_ONLY)
4f2ac237
FB
227static void tlb_protect_code(CPUState *env, target_ulong addr);
228static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
9fa3e853
FB
229
230static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
fd6ce8f6 231{
9fa3e853 232 VirtPageDesc **lp, *p;
fd6ce8f6 233
c27004ec
FB
234 /* XXX: should not truncate for 64 bit addresses */
235#if TARGET_LONG_BITS > 32
236 index &= (L1_SIZE - 1);
237#endif
9fa3e853
FB
238 lp = &l1_virt_map[index >> L2_BITS];
239 p = *lp;
240 if (!p) {
241 /* allocate if not found */
59817ccb 242 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
9fa3e853
FB
243 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
244 *lp = p;
245 }
246 return p + (index & (L2_SIZE - 1));
247}
248
249static inline VirtPageDesc *virt_page_find(unsigned int index)
250{
251 VirtPageDesc *p;
252
253 p = l1_virt_map[index >> L2_BITS];
fd6ce8f6
FB
254 if (!p)
255 return 0;
9fa3e853 256 return p + (index & (L2_SIZE - 1));
54936004
FB
257}
258
9fa3e853 259static void virt_page_flush(void)
54936004 260{
9fa3e853
FB
261 int i, j;
262 VirtPageDesc *p;
263
264 virt_valid_tag++;
265
266 if (virt_valid_tag == 0) {
267 virt_valid_tag = 1;
268 for(i = 0; i < L1_SIZE; i++) {
269 p = l1_virt_map[i];
270 if (p) {
271 for(j = 0; j < L2_SIZE; j++)
272 p[j].valid_tag = 0;
273 }
fd6ce8f6 274 }
54936004
FB
275 }
276}
9fa3e853
FB
277#else
278static void virt_page_flush(void)
279{
280}
281#endif
fd6ce8f6 282
b346ff46 283void cpu_exec_init(void)
fd6ce8f6
FB
284{
285 if (!code_gen_ptr) {
286 code_gen_ptr = code_gen_buffer;
b346ff46 287 page_init();
33417e70 288 io_mem_init();
fd6ce8f6
FB
289 }
290}
291
9fa3e853
FB
292static inline void invalidate_page_bitmap(PageDesc *p)
293{
294 if (p->code_bitmap) {
59817ccb 295 qemu_free(p->code_bitmap);
9fa3e853
FB
296 p->code_bitmap = NULL;
297 }
298 p->code_write_count = 0;
299}
300
fd6ce8f6
FB
301/* set to NULL all the 'first_tb' fields in all PageDescs */
302static void page_flush_tb(void)
303{
304 int i, j;
305 PageDesc *p;
306
307 for(i = 0; i < L1_SIZE; i++) {
308 p = l1_map[i];
309 if (p) {
9fa3e853
FB
310 for(j = 0; j < L2_SIZE; j++) {
311 p->first_tb = NULL;
312 invalidate_page_bitmap(p);
313 p++;
314 }
fd6ce8f6
FB
315 }
316 }
317}
318
319/* flush all the translation blocks */
d4e8164f 320/* XXX: tb_flush is currently not thread safe */
0124311e 321void tb_flush(CPUState *env)
fd6ce8f6 322{
0124311e 323#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
324 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
325 code_gen_ptr - code_gen_buffer,
326 nb_tbs,
0124311e 327 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
328#endif
329 nb_tbs = 0;
8a8a608f 330 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
9fa3e853
FB
331 virt_page_flush();
332
8a8a608f 333 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 334 page_flush_tb();
9fa3e853 335
fd6ce8f6 336 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
337 /* XXX: flush processor icache at this point if cache flush is
338 expensive */
fd6ce8f6
FB
339}
340
341#ifdef DEBUG_TB_CHECK
342
343static void tb_invalidate_check(unsigned long address)
344{
345 TranslationBlock *tb;
346 int i;
347 address &= TARGET_PAGE_MASK;
348 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
349 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
350 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
351 address >= tb->pc + tb->size)) {
352 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
353 address, tb->pc, tb->size);
354 }
355 }
356 }
357}
358
359/* verify that all the pages have correct rights for code */
360static void tb_page_check(void)
361{
362 TranslationBlock *tb;
363 int i, flags1, flags2;
364
365 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
366 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
367 flags1 = page_get_flags(tb->pc);
368 flags2 = page_get_flags(tb->pc + tb->size - 1);
369 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
370 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
371 tb->pc, tb->size, flags1, flags2);
372 }
373 }
374 }
375}
376
d4e8164f
FB
377void tb_jmp_check(TranslationBlock *tb)
378{
379 TranslationBlock *tb1;
380 unsigned int n1;
381
382 /* suppress any remaining jumps to this TB */
383 tb1 = tb->jmp_first;
384 for(;;) {
385 n1 = (long)tb1 & 3;
386 tb1 = (TranslationBlock *)((long)tb1 & ~3);
387 if (n1 == 2)
388 break;
389 tb1 = tb1->jmp_next[n1];
390 }
391 /* check end of list */
392 if (tb1 != tb) {
393 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
394 }
395}
396
fd6ce8f6
FB
397#endif
398
399/* invalidate one TB */
400static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
401 int next_offset)
402{
403 TranslationBlock *tb1;
404 for(;;) {
405 tb1 = *ptb;
406 if (tb1 == tb) {
407 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
408 break;
409 }
410 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
411 }
412}
413
9fa3e853
FB
414static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
415{
416 TranslationBlock *tb1;
417 unsigned int n1;
418
419 for(;;) {
420 tb1 = *ptb;
421 n1 = (long)tb1 & 3;
422 tb1 = (TranslationBlock *)((long)tb1 & ~3);
423 if (tb1 == tb) {
424 *ptb = tb1->page_next[n1];
425 break;
426 }
427 ptb = &tb1->page_next[n1];
428 }
429}
430
d4e8164f
FB
431static inline void tb_jmp_remove(TranslationBlock *tb, int n)
432{
433 TranslationBlock *tb1, **ptb;
434 unsigned int n1;
435
436 ptb = &tb->jmp_next[n];
437 tb1 = *ptb;
438 if (tb1) {
439 /* find tb(n) in circular list */
440 for(;;) {
441 tb1 = *ptb;
442 n1 = (long)tb1 & 3;
443 tb1 = (TranslationBlock *)((long)tb1 & ~3);
444 if (n1 == n && tb1 == tb)
445 break;
446 if (n1 == 2) {
447 ptb = &tb1->jmp_first;
448 } else {
449 ptb = &tb1->jmp_next[n1];
450 }
451 }
452 /* now we can suppress tb(n) from the list */
453 *ptb = tb->jmp_next[n];
454
455 tb->jmp_next[n] = NULL;
456 }
457}
458
459/* reset the jump entry 'n' of a TB so that it is not chained to
460 another TB */
461static inline void tb_reset_jump(TranslationBlock *tb, int n)
462{
463 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
464}
465
9fa3e853 466static inline void tb_invalidate(TranslationBlock *tb)
fd6ce8f6 467{
d4e8164f 468 unsigned int h, n1;
9fa3e853 469 TranslationBlock *tb1, *tb2, **ptb;
d4e8164f 470
36bdbe54 471 tb_invalidated_flag = 1;
59817ccb 472
fd6ce8f6
FB
473 /* remove the TB from the hash list */
474 h = tb_hash_func(tb->pc);
9fa3e853
FB
475 ptb = &tb_hash[h];
476 for(;;) {
477 tb1 = *ptb;
478 /* NOTE: the TB is not necessarily linked in the hash. It
479 indicates that it is not currently used */
480 if (tb1 == NULL)
481 return;
482 if (tb1 == tb) {
483 *ptb = tb1->hash_next;
484 break;
485 }
486 ptb = &tb1->hash_next;
fd6ce8f6 487 }
d4e8164f
FB
488
489 /* suppress this TB from the two jump lists */
490 tb_jmp_remove(tb, 0);
491 tb_jmp_remove(tb, 1);
492
493 /* suppress any remaining jumps to this TB */
494 tb1 = tb->jmp_first;
495 for(;;) {
496 n1 = (long)tb1 & 3;
497 if (n1 == 2)
498 break;
499 tb1 = (TranslationBlock *)((long)tb1 & ~3);
500 tb2 = tb1->jmp_next[n1];
501 tb_reset_jump(tb1, n1);
502 tb1->jmp_next[n1] = NULL;
503 tb1 = tb2;
504 }
505 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
fd6ce8f6
FB
506}
507
9fa3e853 508static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 509{
fd6ce8f6 510 PageDesc *p;
9fa3e853
FB
511 unsigned int h;
512 target_ulong phys_pc;
513
514 /* remove the TB from the hash list */
515 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
516 h = tb_phys_hash_func(phys_pc);
517 tb_remove(&tb_phys_hash[h], tb,
518 offsetof(TranslationBlock, phys_hash_next));
519
520 /* remove the TB from the page list */
521 if (tb->page_addr[0] != page_addr) {
522 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
523 tb_page_remove(&p->first_tb, tb);
524 invalidate_page_bitmap(p);
525 }
526 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
527 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
528 tb_page_remove(&p->first_tb, tb);
529 invalidate_page_bitmap(p);
530 }
531
532 tb_invalidate(tb);
533}
534
535static inline void set_bits(uint8_t *tab, int start, int len)
536{
537 int end, mask, end1;
538
539 end = start + len;
540 tab += start >> 3;
541 mask = 0xff << (start & 7);
542 if ((start & ~7) == (end & ~7)) {
543 if (start < end) {
544 mask &= ~(0xff << (end & 7));
545 *tab |= mask;
546 }
547 } else {
548 *tab++ |= mask;
549 start = (start + 8) & ~7;
550 end1 = end & ~7;
551 while (start < end1) {
552 *tab++ = 0xff;
553 start += 8;
554 }
555 if (start < end) {
556 mask = ~(0xff << (end & 7));
557 *tab |= mask;
558 }
559 }
560}
561
562static void build_page_bitmap(PageDesc *p)
563{
564 int n, tb_start, tb_end;
565 TranslationBlock *tb;
566
59817ccb 567 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
568 if (!p->code_bitmap)
569 return;
570 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
571
572 tb = p->first_tb;
573 while (tb != NULL) {
574 n = (long)tb & 3;
575 tb = (TranslationBlock *)((long)tb & ~3);
576 /* NOTE: this is subtle as a TB may span two physical pages */
577 if (n == 0) {
578 /* NOTE: tb_end may be after the end of the page, but
579 it is not a problem */
580 tb_start = tb->pc & ~TARGET_PAGE_MASK;
581 tb_end = tb_start + tb->size;
582 if (tb_end > TARGET_PAGE_SIZE)
583 tb_end = TARGET_PAGE_SIZE;
584 } else {
585 tb_start = 0;
586 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
587 }
588 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
589 tb = tb->page_next[n];
590 }
591}
592
d720b93d
FB
593#ifdef TARGET_HAS_PRECISE_SMC
594
595static void tb_gen_code(CPUState *env,
596 target_ulong pc, target_ulong cs_base, int flags,
597 int cflags)
598{
599 TranslationBlock *tb;
600 uint8_t *tc_ptr;
601 target_ulong phys_pc, phys_page2, virt_page2;
602 int code_gen_size;
603
c27004ec
FB
604 phys_pc = get_phys_addr_code(env, pc);
605 tb = tb_alloc(pc);
d720b93d
FB
606 if (!tb) {
607 /* flush must be done */
608 tb_flush(env);
609 /* cannot fail at this point */
c27004ec 610 tb = tb_alloc(pc);
d720b93d
FB
611 }
612 tc_ptr = code_gen_ptr;
613 tb->tc_ptr = tc_ptr;
614 tb->cs_base = cs_base;
615 tb->flags = flags;
616 tb->cflags = cflags;
617 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
618 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
619
620 /* check next page if needed */
c27004ec 621 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 622 phys_page2 = -1;
c27004ec 623 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
624 phys_page2 = get_phys_addr_code(env, virt_page2);
625 }
626 tb_link_phys(tb, phys_pc, phys_page2);
627}
628#endif
629
9fa3e853
FB
630/* invalidate all TBs which intersect with the target physical page
631 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
632 the same physical page. 'is_cpu_write_access' should be true if called
633 from a real cpu write access: the virtual CPU will exit the current
634 TB if code is modified inside this TB. */
635void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
636 int is_cpu_write_access)
637{
638 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 639 CPUState *env = cpu_single_env;
9fa3e853 640 PageDesc *p;
ea1c1802 641 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 642 target_ulong tb_start, tb_end;
d720b93d 643 target_ulong current_pc, current_cs_base;
9fa3e853
FB
644
645 p = page_find(start >> TARGET_PAGE_BITS);
646 if (!p)
647 return;
648 if (!p->code_bitmap &&
d720b93d
FB
649 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
650 is_cpu_write_access) {
9fa3e853
FB
651 /* build code bitmap */
652 build_page_bitmap(p);
653 }
654
655 /* we remove all the TBs in the range [start, end[ */
656 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
657 current_tb_not_found = is_cpu_write_access;
658 current_tb_modified = 0;
659 current_tb = NULL; /* avoid warning */
660 current_pc = 0; /* avoid warning */
661 current_cs_base = 0; /* avoid warning */
662 current_flags = 0; /* avoid warning */
9fa3e853
FB
663 tb = p->first_tb;
664 while (tb != NULL) {
665 n = (long)tb & 3;
666 tb = (TranslationBlock *)((long)tb & ~3);
667 tb_next = tb->page_next[n];
668 /* NOTE: this is subtle as a TB may span two physical pages */
669 if (n == 0) {
670 /* NOTE: tb_end may be after the end of the page, but
671 it is not a problem */
672 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
673 tb_end = tb_start + tb->size;
674 } else {
675 tb_start = tb->page_addr[1];
676 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
677 }
678 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
679#ifdef TARGET_HAS_PRECISE_SMC
680 if (current_tb_not_found) {
681 current_tb_not_found = 0;
682 current_tb = NULL;
683 if (env->mem_write_pc) {
684 /* now we have a real cpu fault */
685 current_tb = tb_find_pc(env->mem_write_pc);
686 }
687 }
688 if (current_tb == tb &&
689 !(current_tb->cflags & CF_SINGLE_INSN)) {
690 /* If we are modifying the current TB, we must stop
691 its execution. We could be more precise by checking
692 that the modification is after the current PC, but it
693 would require a specialized function to partially
694 restore the CPU state */
695
696 current_tb_modified = 1;
697 cpu_restore_state(current_tb, env,
698 env->mem_write_pc, NULL);
699#if defined(TARGET_I386)
700 current_flags = env->hflags;
701 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
702 current_cs_base = (target_ulong)env->segs[R_CS].base;
703 current_pc = current_cs_base + env->eip;
704#else
705#error unsupported CPU
706#endif
707 }
708#endif /* TARGET_HAS_PRECISE_SMC */
ea1c1802
FB
709 saved_tb = env->current_tb;
710 env->current_tb = NULL;
9fa3e853 711 tb_phys_invalidate(tb, -1);
ea1c1802
FB
712 env->current_tb = saved_tb;
713 if (env->interrupt_request && env->current_tb)
714 cpu_interrupt(env, env->interrupt_request);
9fa3e853
FB
715 }
716 tb = tb_next;
717 }
718#if !defined(CONFIG_USER_ONLY)
719 /* if no code remaining, no need to continue to use slow writes */
720 if (!p->first_tb) {
721 invalidate_page_bitmap(p);
d720b93d
FB
722 if (is_cpu_write_access) {
723 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
724 }
725 }
726#endif
727#ifdef TARGET_HAS_PRECISE_SMC
728 if (current_tb_modified) {
729 /* we generate a block containing just the instruction
730 modifying the memory. It will ensure that it cannot modify
731 itself */
ea1c1802 732 env->current_tb = NULL;
d720b93d
FB
733 tb_gen_code(env, current_pc, current_cs_base, current_flags,
734 CF_SINGLE_INSN);
735 cpu_resume_from_signal(env, NULL);
9fa3e853 736 }
fd6ce8f6 737#endif
9fa3e853 738}
fd6ce8f6 739
9fa3e853 740/* len must be <= 8 and start must be a multiple of len */
d720b93d 741static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
742{
743 PageDesc *p;
744 int offset, b;
59817ccb 745#if 0
a4193c8a
FB
746 if (1) {
747 if (loglevel) {
748 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
749 cpu_single_env->mem_write_vaddr, len,
750 cpu_single_env->eip,
751 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
752 }
59817ccb
FB
753 }
754#endif
9fa3e853
FB
755 p = page_find(start >> TARGET_PAGE_BITS);
756 if (!p)
757 return;
758 if (p->code_bitmap) {
759 offset = start & ~TARGET_PAGE_MASK;
760 b = p->code_bitmap[offset >> 3] >> (offset & 7);
761 if (b & ((1 << len) - 1))
762 goto do_invalidate;
763 } else {
764 do_invalidate:
d720b93d 765 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
766 }
767}
768
9fa3e853 769#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
770static void tb_invalidate_phys_page(target_ulong addr,
771 unsigned long pc, void *puc)
9fa3e853 772{
d720b93d
FB
773 int n, current_flags, current_tb_modified;
774 target_ulong current_pc, current_cs_base;
9fa3e853 775 PageDesc *p;
d720b93d
FB
776 TranslationBlock *tb, *current_tb;
777#ifdef TARGET_HAS_PRECISE_SMC
778 CPUState *env = cpu_single_env;
779#endif
9fa3e853
FB
780
781 addr &= TARGET_PAGE_MASK;
782 p = page_find(addr >> TARGET_PAGE_BITS);
783 if (!p)
784 return;
785 tb = p->first_tb;
d720b93d
FB
786 current_tb_modified = 0;
787 current_tb = NULL;
788 current_pc = 0; /* avoid warning */
789 current_cs_base = 0; /* avoid warning */
790 current_flags = 0; /* avoid warning */
791#ifdef TARGET_HAS_PRECISE_SMC
792 if (tb && pc != 0) {
793 current_tb = tb_find_pc(pc);
794 }
795#endif
9fa3e853
FB
796 while (tb != NULL) {
797 n = (long)tb & 3;
798 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
799#ifdef TARGET_HAS_PRECISE_SMC
800 if (current_tb == tb &&
801 !(current_tb->cflags & CF_SINGLE_INSN)) {
802 /* If we are modifying the current TB, we must stop
803 its execution. We could be more precise by checking
804 that the modification is after the current PC, but it
805 would require a specialized function to partially
806 restore the CPU state */
807
808 current_tb_modified = 1;
809 cpu_restore_state(current_tb, env, pc, puc);
810#if defined(TARGET_I386)
811 current_flags = env->hflags;
812 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
813 current_cs_base = (target_ulong)env->segs[R_CS].base;
814 current_pc = current_cs_base + env->eip;
815#else
816#error unsupported CPU
817#endif
818 }
819#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
820 tb_phys_invalidate(tb, addr);
821 tb = tb->page_next[n];
822 }
fd6ce8f6 823 p->first_tb = NULL;
d720b93d
FB
824#ifdef TARGET_HAS_PRECISE_SMC
825 if (current_tb_modified) {
826 /* we generate a block containing just the instruction
827 modifying the memory. It will ensure that it cannot modify
828 itself */
ea1c1802 829 env->current_tb = NULL;
d720b93d
FB
830 tb_gen_code(env, current_pc, current_cs_base, current_flags,
831 CF_SINGLE_INSN);
832 cpu_resume_from_signal(env, puc);
833 }
834#endif
fd6ce8f6 835}
9fa3e853 836#endif
fd6ce8f6
FB
837
838/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
839static inline void tb_alloc_page(TranslationBlock *tb,
840 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
841{
842 PageDesc *p;
9fa3e853
FB
843 TranslationBlock *last_first_tb;
844
845 tb->page_addr[n] = page_addr;
846 p = page_find(page_addr >> TARGET_PAGE_BITS);
847 tb->page_next[n] = p->first_tb;
848 last_first_tb = p->first_tb;
849 p->first_tb = (TranslationBlock *)((long)tb | n);
850 invalidate_page_bitmap(p);
fd6ce8f6 851
107db443 852#if defined(TARGET_HAS_SMC) || 1
d720b93d 853
9fa3e853 854#if defined(CONFIG_USER_ONLY)
fd6ce8f6 855 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
856 unsigned long host_start, host_end, addr;
857 int prot;
858
fd6ce8f6
FB
859 /* force the host page as non writable (writes will have a
860 page fault + mprotect overhead) */
83fb7adf
FB
861 host_start = page_addr & qemu_host_page_mask;
862 host_end = host_start + qemu_host_page_size;
fd6ce8f6
FB
863 prot = 0;
864 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
865 prot |= page_get_flags(addr);
83fb7adf 866 mprotect((void *)host_start, qemu_host_page_size,
fd6ce8f6
FB
867 (prot & PAGE_BITS) & ~PAGE_WRITE);
868#ifdef DEBUG_TB_INVALIDATE
869 printf("protecting code page: 0x%08lx\n",
870 host_start);
871#endif
872 p->flags &= ~PAGE_WRITE;
fd6ce8f6 873 }
9fa3e853
FB
874#else
875 /* if some code is already present, then the pages are already
876 protected. So we handle the case where only the first TB is
877 allocated in a physical page */
878 if (!last_first_tb) {
879 target_ulong virt_addr;
880
881 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
882 tlb_protect_code(cpu_single_env, virt_addr);
883 }
884#endif
d720b93d
FB
885
886#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
887}
888
889/* Allocate a new translation block. Flush the translation buffer if
890 too many translation blocks or too much generated code. */
c27004ec 891TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
892{
893 TranslationBlock *tb;
fd6ce8f6
FB
894
895 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
896 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 897 return NULL;
fd6ce8f6
FB
898 tb = &tbs[nb_tbs++];
899 tb->pc = pc;
b448f2f3 900 tb->cflags = 0;
d4e8164f
FB
901 return tb;
902}
903
9fa3e853
FB
904/* add a new TB and link it to the physical page tables. phys_page2 is
905 (-1) to indicate that only one page contains the TB. */
906void tb_link_phys(TranslationBlock *tb,
907 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 908{
9fa3e853
FB
909 unsigned int h;
910 TranslationBlock **ptb;
911
912 /* add in the physical hash table */
913 h = tb_phys_hash_func(phys_pc);
914 ptb = &tb_phys_hash[h];
915 tb->phys_hash_next = *ptb;
916 *ptb = tb;
fd6ce8f6
FB
917
918 /* add in the page list */
9fa3e853
FB
919 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
920 if (phys_page2 != -1)
921 tb_alloc_page(tb, 1, phys_page2);
922 else
923 tb->page_addr[1] = -1;
61382a50
FB
924#ifdef DEBUG_TB_CHECK
925 tb_page_check();
926#endif
9fa3e853
FB
927}
928
929/* link the tb with the other TBs */
930void tb_link(TranslationBlock *tb)
931{
932#if !defined(CONFIG_USER_ONLY)
933 {
934 VirtPageDesc *vp;
935 target_ulong addr;
936
937 /* save the code memory mappings (needed to invalidate the code) */
938 addr = tb->pc & TARGET_PAGE_MASK;
939 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
940#ifdef DEBUG_TLB_CHECK
941 if (vp->valid_tag == virt_valid_tag &&
942 vp->phys_addr != tb->page_addr[0]) {
943 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
944 addr, tb->page_addr[0], vp->phys_addr);
945 }
946#endif
9fa3e853 947 vp->phys_addr = tb->page_addr[0];
59817ccb
FB
948 if (vp->valid_tag != virt_valid_tag) {
949 vp->valid_tag = virt_valid_tag;
950#if !defined(CONFIG_SOFTMMU)
951 vp->prot = 0;
952#endif
953 }
9fa3e853
FB
954
955 if (tb->page_addr[1] != -1) {
956 addr += TARGET_PAGE_SIZE;
957 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
958#ifdef DEBUG_TLB_CHECK
959 if (vp->valid_tag == virt_valid_tag &&
960 vp->phys_addr != tb->page_addr[1]) {
961 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
962 addr, tb->page_addr[1], vp->phys_addr);
963 }
964#endif
9fa3e853 965 vp->phys_addr = tb->page_addr[1];
59817ccb
FB
966 if (vp->valid_tag != virt_valid_tag) {
967 vp->valid_tag = virt_valid_tag;
968#if !defined(CONFIG_SOFTMMU)
969 vp->prot = 0;
970#endif
971 }
9fa3e853
FB
972 }
973 }
974#endif
975
d4e8164f
FB
976 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
977 tb->jmp_next[0] = NULL;
978 tb->jmp_next[1] = NULL;
b448f2f3
FB
979#ifdef USE_CODE_COPY
980 tb->cflags &= ~CF_FP_USED;
981 if (tb->cflags & CF_TB_FP_USED)
982 tb->cflags |= CF_FP_USED;
983#endif
d4e8164f
FB
984
985 /* init original jump addresses */
986 if (tb->tb_next_offset[0] != 0xffff)
987 tb_reset_jump(tb, 0);
988 if (tb->tb_next_offset[1] != 0xffff)
989 tb_reset_jump(tb, 1);
fd6ce8f6
FB
990}
991
9fa3e853
FB
992/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
993 tb[1].tc_ptr. Return NULL if not found */
994TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 995{
9fa3e853
FB
996 int m_min, m_max, m;
997 unsigned long v;
998 TranslationBlock *tb;
a513fe19
FB
999
1000 if (nb_tbs <= 0)
1001 return NULL;
1002 if (tc_ptr < (unsigned long)code_gen_buffer ||
1003 tc_ptr >= (unsigned long)code_gen_ptr)
1004 return NULL;
1005 /* binary search (cf Knuth) */
1006 m_min = 0;
1007 m_max = nb_tbs - 1;
1008 while (m_min <= m_max) {
1009 m = (m_min + m_max) >> 1;
1010 tb = &tbs[m];
1011 v = (unsigned long)tb->tc_ptr;
1012 if (v == tc_ptr)
1013 return tb;
1014 else if (tc_ptr < v) {
1015 m_max = m - 1;
1016 } else {
1017 m_min = m + 1;
1018 }
1019 }
1020 return &tbs[m_max];
1021}
7501267e 1022
ea041c0e
FB
1023static void tb_reset_jump_recursive(TranslationBlock *tb);
1024
1025static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1026{
1027 TranslationBlock *tb1, *tb_next, **ptb;
1028 unsigned int n1;
1029
1030 tb1 = tb->jmp_next[n];
1031 if (tb1 != NULL) {
1032 /* find head of list */
1033 for(;;) {
1034 n1 = (long)tb1 & 3;
1035 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1036 if (n1 == 2)
1037 break;
1038 tb1 = tb1->jmp_next[n1];
1039 }
1040 /* we are now sure now that tb jumps to tb1 */
1041 tb_next = tb1;
1042
1043 /* remove tb from the jmp_first list */
1044 ptb = &tb_next->jmp_first;
1045 for(;;) {
1046 tb1 = *ptb;
1047 n1 = (long)tb1 & 3;
1048 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1049 if (n1 == n && tb1 == tb)
1050 break;
1051 ptb = &tb1->jmp_next[n1];
1052 }
1053 *ptb = tb->jmp_next[n];
1054 tb->jmp_next[n] = NULL;
1055
1056 /* suppress the jump to next tb in generated code */
1057 tb_reset_jump(tb, n);
1058
0124311e 1059 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1060 tb_reset_jump_recursive(tb_next);
1061 }
1062}
1063
1064static void tb_reset_jump_recursive(TranslationBlock *tb)
1065{
1066 tb_reset_jump_recursive2(tb, 0);
1067 tb_reset_jump_recursive2(tb, 1);
1068}
1069
c27004ec 1070#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
d720b93d
FB
1071static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1072{
1073 target_ulong phys_addr;
1074
1075 phys_addr = cpu_get_phys_page_debug(env, pc);
1076 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1077}
c27004ec 1078#endif
d720b93d 1079
c33a346e
FB
1080/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1081 breakpoint is reached */
2e12669a 1082int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1083{
e95c8d51 1084#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
4c3a88a2 1085 int i;
d720b93d 1086
4c3a88a2
FB
1087 for(i = 0; i < env->nb_breakpoints; i++) {
1088 if (env->breakpoints[i] == pc)
1089 return 0;
1090 }
1091
1092 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1093 return -1;
1094 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1095
1096 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1097 return 0;
1098#else
1099 return -1;
1100#endif
1101}
1102
1103/* remove a breakpoint */
2e12669a 1104int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1105{
e95c8d51 1106#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
4c3a88a2
FB
1107 int i;
1108 for(i = 0; i < env->nb_breakpoints; i++) {
1109 if (env->breakpoints[i] == pc)
1110 goto found;
1111 }
1112 return -1;
1113 found:
1114 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1115 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1116 env->nb_breakpoints--;
d720b93d
FB
1117
1118 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1119 return 0;
1120#else
1121 return -1;
1122#endif
1123}
1124
c33a346e
FB
1125/* enable or disable single step mode. EXCP_DEBUG is returned by the
1126 CPU loop after each instruction */
1127void cpu_single_step(CPUState *env, int enabled)
1128{
e95c8d51 1129#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
c33a346e
FB
1130 if (env->singlestep_enabled != enabled) {
1131 env->singlestep_enabled = enabled;
1132 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1133 /* XXX: only flush what is necessary */
0124311e 1134 tb_flush(env);
c33a346e
FB
1135 }
1136#endif
1137}
1138
34865134
FB
1139/* enable or disable low levels log */
1140void cpu_set_log(int log_flags)
1141{
1142 loglevel = log_flags;
1143 if (loglevel && !logfile) {
1144 logfile = fopen(logfilename, "w");
1145 if (!logfile) {
1146 perror(logfilename);
1147 _exit(1);
1148 }
9fa3e853
FB
1149#if !defined(CONFIG_SOFTMMU)
1150 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1151 {
1152 static uint8_t logfile_buf[4096];
1153 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1154 }
1155#else
34865134 1156 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1157#endif
34865134
FB
1158 }
1159}
1160
1161void cpu_set_log_filename(const char *filename)
1162{
1163 logfilename = strdup(filename);
1164}
c33a346e 1165
0124311e 1166/* mask must never be zero, except for A20 change call */
68a79315 1167void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1168{
1169 TranslationBlock *tb;
ee8b7021 1170 static int interrupt_lock;
59817ccb 1171
68a79315 1172 env->interrupt_request |= mask;
ea041c0e
FB
1173 /* if the cpu is currently executing code, we must unlink it and
1174 all the potentially executing TB */
1175 tb = env->current_tb;
ee8b7021
FB
1176 if (tb && !testandset(&interrupt_lock)) {
1177 env->current_tb = NULL;
ea041c0e 1178 tb_reset_jump_recursive(tb);
ee8b7021 1179 interrupt_lock = 0;
ea041c0e
FB
1180 }
1181}
1182
b54ad049
FB
1183void cpu_reset_interrupt(CPUState *env, int mask)
1184{
1185 env->interrupt_request &= ~mask;
1186}
1187
f193c797
FB
1188CPULogItem cpu_log_items[] = {
1189 { CPU_LOG_TB_OUT_ASM, "out_asm",
1190 "show generated host assembly code for each compiled TB" },
1191 { CPU_LOG_TB_IN_ASM, "in_asm",
1192 "show target assembly code for each compiled TB" },
1193 { CPU_LOG_TB_OP, "op",
1194 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1195#ifdef TARGET_I386
1196 { CPU_LOG_TB_OP_OPT, "op_opt",
1197 "show micro ops after optimization for each compiled TB" },
1198#endif
1199 { CPU_LOG_INT, "int",
1200 "show interrupts/exceptions in short format" },
1201 { CPU_LOG_EXEC, "exec",
1202 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1203 { CPU_LOG_TB_CPU, "cpu",
1204 "show CPU state before bloc translation" },
f193c797
FB
1205#ifdef TARGET_I386
1206 { CPU_LOG_PCALL, "pcall",
1207 "show protected mode far calls/returns/exceptions" },
1208#endif
8e3a9fd2 1209#ifdef DEBUG_IOPORT
fd872598
FB
1210 { CPU_LOG_IOPORT, "ioport",
1211 "show all i/o ports accesses" },
8e3a9fd2 1212#endif
f193c797
FB
1213 { 0, NULL, NULL },
1214};
1215
1216static int cmp1(const char *s1, int n, const char *s2)
1217{
1218 if (strlen(s2) != n)
1219 return 0;
1220 return memcmp(s1, s2, n) == 0;
1221}
1222
1223/* takes a comma separated list of log masks. Return 0 if error. */
1224int cpu_str_to_log_mask(const char *str)
1225{
1226 CPULogItem *item;
1227 int mask;
1228 const char *p, *p1;
1229
1230 p = str;
1231 mask = 0;
1232 for(;;) {
1233 p1 = strchr(p, ',');
1234 if (!p1)
1235 p1 = p + strlen(p);
8e3a9fd2
FB
1236 if(cmp1(p,p1-p,"all")) {
1237 for(item = cpu_log_items; item->mask != 0; item++) {
1238 mask |= item->mask;
1239 }
1240 } else {
f193c797
FB
1241 for(item = cpu_log_items; item->mask != 0; item++) {
1242 if (cmp1(p, p1 - p, item->name))
1243 goto found;
1244 }
1245 return 0;
8e3a9fd2 1246 }
f193c797
FB
1247 found:
1248 mask |= item->mask;
1249 if (*p1 != ',')
1250 break;
1251 p = p1 + 1;
1252 }
1253 return mask;
1254}
ea041c0e 1255
7501267e
FB
1256void cpu_abort(CPUState *env, const char *fmt, ...)
1257{
1258 va_list ap;
1259
1260 va_start(ap, fmt);
1261 fprintf(stderr, "qemu: fatal: ");
1262 vfprintf(stderr, fmt, ap);
1263 fprintf(stderr, "\n");
1264#ifdef TARGET_I386
7fe48483
FB
1265 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1266#else
1267 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1268#endif
1269 va_end(ap);
1270 abort();
1271}
1272
0124311e
FB
1273#if !defined(CONFIG_USER_ONLY)
1274
ee8b7021
FB
1275/* NOTE: if flush_global is true, also flush global entries (not
1276 implemented yet) */
1277void tlb_flush(CPUState *env, int flush_global)
33417e70 1278{
33417e70 1279 int i;
0124311e 1280
9fa3e853
FB
1281#if defined(DEBUG_TLB)
1282 printf("tlb_flush:\n");
1283#endif
0124311e
FB
1284 /* must reset current TB so that interrupts cannot modify the
1285 links while we are modifying them */
1286 env->current_tb = NULL;
1287
33417e70
FB
1288 for(i = 0; i < CPU_TLB_SIZE; i++) {
1289 env->tlb_read[0][i].address = -1;
1290 env->tlb_write[0][i].address = -1;
1291 env->tlb_read[1][i].address = -1;
1292 env->tlb_write[1][i].address = -1;
1293 }
9fa3e853
FB
1294
1295 virt_page_flush();
8a8a608f 1296 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
9fa3e853
FB
1297
1298#if !defined(CONFIG_SOFTMMU)
1299 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1300#endif
33417e70
FB
1301}
1302
274da6b2 1303static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50
FB
1304{
1305 if (addr == (tlb_entry->address &
1306 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1307 tlb_entry->address = -1;
1308}
1309
2e12669a 1310void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1311{
9fa3e853
FB
1312 int i, n;
1313 VirtPageDesc *vp;
1314 PageDesc *p;
1315 TranslationBlock *tb;
0124311e 1316
9fa3e853
FB
1317#if defined(DEBUG_TLB)
1318 printf("tlb_flush_page: 0x%08x\n", addr);
1319#endif
0124311e
FB
1320 /* must reset current TB so that interrupts cannot modify the
1321 links while we are modifying them */
1322 env->current_tb = NULL;
61382a50
FB
1323
1324 addr &= TARGET_PAGE_MASK;
1325 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1326 tlb_flush_entry(&env->tlb_read[0][i], addr);
1327 tlb_flush_entry(&env->tlb_write[0][i], addr);
1328 tlb_flush_entry(&env->tlb_read[1][i], addr);
1329 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1330
9fa3e853
FB
1331 /* remove from the virtual pc hash table all the TB at this
1332 virtual address */
1333
1334 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1335 if (vp && vp->valid_tag == virt_valid_tag) {
1336 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1337 if (p) {
1338 /* we remove all the links to the TBs in this virtual page */
1339 tb = p->first_tb;
1340 while (tb != NULL) {
1341 n = (long)tb & 3;
1342 tb = (TranslationBlock *)((long)tb & ~3);
1343 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1344 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1345 tb_invalidate(tb);
1346 }
1347 tb = tb->page_next[n];
1348 }
1349 }
98857888 1350 vp->valid_tag = 0;
9fa3e853
FB
1351 }
1352
0124311e 1353#if !defined(CONFIG_SOFTMMU)
9fa3e853 1354 if (addr < MMAP_AREA_END)
0124311e 1355 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1356#endif
9fa3e853
FB
1357}
1358
4f2ac237 1359static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
9fa3e853
FB
1360{
1361 if (addr == (tlb_entry->address &
1362 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
98857888
FB
1363 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1364 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1ccde1cb 1365 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
9fa3e853
FB
1366 }
1367}
1368
1369/* update the TLBs so that writes to code in the virtual page 'addr'
1370 can be detected */
4f2ac237 1371static void tlb_protect_code(CPUState *env, target_ulong addr)
9fa3e853
FB
1372{
1373 int i;
1374
1375 addr &= TARGET_PAGE_MASK;
1376 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1377 tlb_protect_code1(&env->tlb_write[0][i], addr);
1378 tlb_protect_code1(&env->tlb_write[1][i], addr);
1379#if !defined(CONFIG_SOFTMMU)
1380 /* NOTE: as we generated the code for this page, it is already at
1381 least readable */
1382 if (addr < MMAP_AREA_END)
1383 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1384#endif
1385}
1386
9fa3e853 1387static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
4f2ac237 1388 unsigned long phys_addr)
9fa3e853
FB
1389{
1390 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1391 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1ccde1cb 1392 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
9fa3e853
FB
1393 }
1394}
1395
1396/* update the TLB so that writes in physical page 'phys_addr' are no longer
1397 tested self modifying code */
4f2ac237 1398static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
9fa3e853
FB
1399{
1400 int i;
1401
1402 phys_addr &= TARGET_PAGE_MASK;
1ccde1cb
FB
1403 phys_addr += (long)phys_ram_base;
1404 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1405 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1406 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1407}
1408
1409static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1410 unsigned long start, unsigned long length)
1411{
1412 unsigned long addr;
1413 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1414 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1415 if ((addr - start) < length) {
1416 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1417 }
1418 }
1419}
1420
1421void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1422{
1423 CPUState *env;
4f2ac237 1424 unsigned long length, start1;
1ccde1cb
FB
1425 int i;
1426
1427 start &= TARGET_PAGE_MASK;
1428 end = TARGET_PAGE_ALIGN(end);
1429
1430 length = end - start;
1431 if (length == 0)
1432 return;
1433 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1434
1435 env = cpu_single_env;
1436 /* we modify the TLB cache so that the dirty bit will be set again
1437 when accessing the range */
59817ccb 1438 start1 = start + (unsigned long)phys_ram_base;
9fa3e853 1439 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb 1440 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
9fa3e853 1441 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb
FB
1442 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1443
1444#if !defined(CONFIG_SOFTMMU)
1445 /* XXX: this is expensive */
1446 {
1447 VirtPageDesc *p;
1448 int j;
1449 target_ulong addr;
1450
1451 for(i = 0; i < L1_SIZE; i++) {
1452 p = l1_virt_map[i];
1453 if (p) {
1454 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1455 for(j = 0; j < L2_SIZE; j++) {
1456 if (p->valid_tag == virt_valid_tag &&
1457 p->phys_addr >= start && p->phys_addr < end &&
1458 (p->prot & PROT_WRITE)) {
1459 if (addr < MMAP_AREA_END) {
1460 mprotect((void *)addr, TARGET_PAGE_SIZE,
1461 p->prot & ~PROT_WRITE);
1462 }
1463 }
1464 addr += TARGET_PAGE_SIZE;
1465 p++;
1466 }
1467 }
1468 }
1469 }
1470#endif
1ccde1cb
FB
1471}
1472
1473static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1474 unsigned long start)
1475{
1476 unsigned long addr;
1477 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1478 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1479 if (addr == start) {
1480 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1481 }
1482 }
1483}
1484
1485/* update the TLB corresponding to virtual page vaddr and phys addr
1486 addr so that it is no longer dirty */
1487static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1488{
1489 CPUState *env = cpu_single_env;
1490 int i;
1491
1492 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1493
1494 addr &= TARGET_PAGE_MASK;
1495 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1496 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1497 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1498}
1499
59817ccb
FB
1500/* add a new TLB entry. At most one entry for a given virtual address
1501 is permitted. Return 0 if OK or 2 if the page could not be mapped
1502 (can only happen in non SOFTMMU mode for I/O pages or pages
1503 conflicting with the host address space). */
2e12669a
FB
1504int tlb_set_page(CPUState *env, target_ulong vaddr,
1505 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1506 int is_user, int is_softmmu)
1507{
92e873b9 1508 PhysPageDesc *p;
4f2ac237 1509 unsigned long pd;
9fa3e853
FB
1510 TranslationBlock *first_tb;
1511 unsigned int index;
4f2ac237
FB
1512 target_ulong address;
1513 unsigned long addend;
9fa3e853
FB
1514 int ret;
1515
92e873b9
FB
1516 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1517 first_tb = NULL;
9fa3e853
FB
1518 if (!p) {
1519 pd = IO_MEM_UNASSIGNED;
9fa3e853 1520 } else {
92e873b9 1521 PageDesc *p1;
9fa3e853 1522 pd = p->phys_offset;
92e873b9
FB
1523 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1524 /* NOTE: we also allocate the page at this stage */
1525 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1526 first_tb = p1->first_tb;
1527 }
9fa3e853
FB
1528 }
1529#if defined(DEBUG_TLB)
1530 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1531 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1532#endif
1533
1534 ret = 0;
1535#if !defined(CONFIG_SOFTMMU)
1536 if (is_softmmu)
1537#endif
1538 {
1539 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1540 /* IO memory case */
1541 address = vaddr | pd;
1542 addend = paddr;
1543 } else {
1544 /* standard memory */
1545 address = vaddr;
1546 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1547 }
1548
1549 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1550 addend -= vaddr;
67b915a5 1551 if (prot & PAGE_READ) {
9fa3e853
FB
1552 env->tlb_read[is_user][index].address = address;
1553 env->tlb_read[is_user][index].addend = addend;
1554 } else {
1555 env->tlb_read[is_user][index].address = -1;
1556 env->tlb_read[is_user][index].addend = -1;
1557 }
67b915a5 1558 if (prot & PAGE_WRITE) {
9fa3e853
FB
1559 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1560 /* ROM: access is ignored (same as unassigned) */
1561 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1562 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1563 } else
1564 /* XXX: the PowerPC code seems not ready to handle
1565 self modifying code with DCBI */
1566#if defined(TARGET_HAS_SMC) || 1
1567 if (first_tb) {
9fa3e853
FB
1568 /* if code is present, we use a specific memory
1569 handler. It works only for physical memory access */
1570 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1ccde1cb 1571 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1572 } else
1573#endif
1574 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb
FB
1575 !cpu_physical_memory_is_dirty(pd)) {
1576 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1577 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1578 } else {
1579 env->tlb_write[is_user][index].address = address;
1580 env->tlb_write[is_user][index].addend = addend;
1581 }
1582 } else {
1583 env->tlb_write[is_user][index].address = -1;
1584 env->tlb_write[is_user][index].addend = -1;
1585 }
1586 }
1587#if !defined(CONFIG_SOFTMMU)
1588 else {
1589 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1590 /* IO access: no mapping is done as it will be handled by the
1591 soft MMU */
1592 if (!(env->hflags & HF_SOFTMMU_MASK))
1593 ret = 2;
1594 } else {
1595 void *map_addr;
59817ccb
FB
1596
1597 if (vaddr >= MMAP_AREA_END) {
1598 ret = 2;
1599 } else {
1600 if (prot & PROT_WRITE) {
1601 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1602#if defined(TARGET_HAS_SMC) || 1
59817ccb 1603 first_tb ||
d720b93d 1604#endif
59817ccb
FB
1605 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1606 !cpu_physical_memory_is_dirty(pd))) {
1607 /* ROM: we do as if code was inside */
1608 /* if code is present, we only map as read only and save the
1609 original mapping */
1610 VirtPageDesc *vp;
1611
1612 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1613 vp->phys_addr = pd;
1614 vp->prot = prot;
1615 vp->valid_tag = virt_valid_tag;
1616 prot &= ~PAGE_WRITE;
1617 }
1618 }
1619 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1620 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1621 if (map_addr == MAP_FAILED) {
1622 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1623 paddr, vaddr);
9fa3e853 1624 }
9fa3e853
FB
1625 }
1626 }
1627 }
1628#endif
1629 return ret;
1630}
1631
1632/* called from signal handler: invalidate the code and unprotect the
1633 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1634int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1635{
1636#if !defined(CONFIG_SOFTMMU)
1637 VirtPageDesc *vp;
1638
1639#if defined(DEBUG_TLB)
1640 printf("page_unprotect: addr=0x%08x\n", addr);
1641#endif
1642 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1643
1644 /* if it is not mapped, no need to worry here */
1645 if (addr >= MMAP_AREA_END)
1646 return 0;
9fa3e853
FB
1647 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1648 if (!vp)
1649 return 0;
1650 /* NOTE: in this case, validate_tag is _not_ tested as it
1651 validates only the code TLB */
1652 if (vp->valid_tag != virt_valid_tag)
1653 return 0;
1654 if (!(vp->prot & PAGE_WRITE))
1655 return 0;
1656#if defined(DEBUG_TLB)
1657 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1658 addr, vp->phys_addr, vp->prot);
1659#endif
59817ccb
FB
1660 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1661 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1662 (unsigned long)addr, vp->prot);
d720b93d
FB
1663 /* set the dirty bit */
1664 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1665 /* flush the code inside */
1666 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1667 return 1;
1668#else
1669 return 0;
1670#endif
33417e70
FB
1671}
1672
0124311e
FB
1673#else
1674
ee8b7021 1675void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1676{
1677}
1678
2e12669a 1679void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1680{
1681}
1682
2e12669a
FB
1683int tlb_set_page(CPUState *env, target_ulong vaddr,
1684 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1685 int is_user, int is_softmmu)
1686{
1687 return 0;
1688}
0124311e 1689
9fa3e853
FB
1690/* dump memory mappings */
1691void page_dump(FILE *f)
33417e70 1692{
9fa3e853
FB
1693 unsigned long start, end;
1694 int i, j, prot, prot1;
1695 PageDesc *p;
33417e70 1696
9fa3e853
FB
1697 fprintf(f, "%-8s %-8s %-8s %s\n",
1698 "start", "end", "size", "prot");
1699 start = -1;
1700 end = -1;
1701 prot = 0;
1702 for(i = 0; i <= L1_SIZE; i++) {
1703 if (i < L1_SIZE)
1704 p = l1_map[i];
1705 else
1706 p = NULL;
1707 for(j = 0;j < L2_SIZE; j++) {
1708 if (!p)
1709 prot1 = 0;
1710 else
1711 prot1 = p[j].flags;
1712 if (prot1 != prot) {
1713 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1714 if (start != -1) {
1715 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1716 start, end, end - start,
1717 prot & PAGE_READ ? 'r' : '-',
1718 prot & PAGE_WRITE ? 'w' : '-',
1719 prot & PAGE_EXEC ? 'x' : '-');
1720 }
1721 if (prot1 != 0)
1722 start = end;
1723 else
1724 start = -1;
1725 prot = prot1;
1726 }
1727 if (!p)
1728 break;
1729 }
33417e70 1730 }
33417e70
FB
1731}
1732
9fa3e853 1733int page_get_flags(unsigned long address)
33417e70 1734{
9fa3e853
FB
1735 PageDesc *p;
1736
1737 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1738 if (!p)
9fa3e853
FB
1739 return 0;
1740 return p->flags;
1741}
1742
1743/* modify the flags of a page and invalidate the code if
1744 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1745 depending on PAGE_WRITE */
1746void page_set_flags(unsigned long start, unsigned long end, int flags)
1747{
1748 PageDesc *p;
1749 unsigned long addr;
1750
1751 start = start & TARGET_PAGE_MASK;
1752 end = TARGET_PAGE_ALIGN(end);
1753 if (flags & PAGE_WRITE)
1754 flags |= PAGE_WRITE_ORG;
1755 spin_lock(&tb_lock);
1756 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1757 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1758 /* if the write protection is set, then we invalidate the code
1759 inside */
1760 if (!(p->flags & PAGE_WRITE) &&
1761 (flags & PAGE_WRITE) &&
1762 p->first_tb) {
d720b93d 1763 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1764 }
1765 p->flags = flags;
1766 }
1767 spin_unlock(&tb_lock);
33417e70
FB
1768}
1769
9fa3e853
FB
1770/* called from signal handler: invalidate the code and unprotect the
1771 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1772int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1773{
1774 unsigned int page_index, prot, pindex;
1775 PageDesc *p, *p1;
1776 unsigned long host_start, host_end, addr;
1777
83fb7adf 1778 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1779 page_index = host_start >> TARGET_PAGE_BITS;
1780 p1 = page_find(page_index);
1781 if (!p1)
1782 return 0;
83fb7adf 1783 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1784 p = p1;
1785 prot = 0;
1786 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1787 prot |= p->flags;
1788 p++;
1789 }
1790 /* if the page was really writable, then we change its
1791 protection back to writable */
1792 if (prot & PAGE_WRITE_ORG) {
1793 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1794 if (!(p1[pindex].flags & PAGE_WRITE)) {
83fb7adf 1795 mprotect((void *)host_start, qemu_host_page_size,
9fa3e853
FB
1796 (prot & PAGE_BITS) | PAGE_WRITE);
1797 p1[pindex].flags |= PAGE_WRITE;
1798 /* and since the content will be modified, we must invalidate
1799 the corresponding translated code. */
d720b93d 1800 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1801#ifdef DEBUG_TB_CHECK
1802 tb_invalidate_check(address);
1803#endif
1804 return 1;
1805 }
1806 }
1807 return 0;
1808}
1809
1810/* call this function when system calls directly modify a memory area */
1811void page_unprotect_range(uint8_t *data, unsigned long data_size)
1812{
1813 unsigned long start, end, addr;
1814
1815 start = (unsigned long)data;
1816 end = start + data_size;
1817 start &= TARGET_PAGE_MASK;
1818 end = TARGET_PAGE_ALIGN(end);
1819 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1820 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1821 }
1822}
1823
1ccde1cb
FB
1824static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1825{
1826}
9fa3e853
FB
1827#endif /* defined(CONFIG_USER_ONLY) */
1828
33417e70
FB
1829/* register physical memory. 'size' must be a multiple of the target
1830 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1831 io memory page */
2e12669a
FB
1832void cpu_register_physical_memory(target_phys_addr_t start_addr,
1833 unsigned long size,
1834 unsigned long phys_offset)
33417e70
FB
1835{
1836 unsigned long addr, end_addr;
92e873b9 1837 PhysPageDesc *p;
33417e70 1838
5fd386f6 1839 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1840 end_addr = start_addr + size;
5fd386f6 1841 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
92e873b9 1842 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1843 p->phys_offset = phys_offset;
1844 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1845 phys_offset += TARGET_PAGE_SIZE;
1846 }
1847}
1848
a4193c8a 1849static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1850{
1851 return 0;
1852}
1853
a4193c8a 1854static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1855{
1856}
1857
1858static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1859 unassigned_mem_readb,
1860 unassigned_mem_readb,
1861 unassigned_mem_readb,
1862};
1863
1864static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1865 unassigned_mem_writeb,
1866 unassigned_mem_writeb,
1867 unassigned_mem_writeb,
1868};
1869
9fa3e853
FB
1870/* self modifying code support in soft mmu mode : writing to a page
1871 containing code comes to these functions */
1872
a4193c8a 1873static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1874{
1ccde1cb
FB
1875 unsigned long phys_addr;
1876
274da6b2 1877 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1878#if !defined(CONFIG_USER_ONLY)
d720b93d 1879 tb_invalidate_phys_page_fast(phys_addr, 1);
9fa3e853 1880#endif
c27004ec 1881 stb_p((uint8_t *)(long)addr, val);
1ccde1cb 1882 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1883}
1884
a4193c8a 1885static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1886{
1ccde1cb
FB
1887 unsigned long phys_addr;
1888
274da6b2 1889 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1890#if !defined(CONFIG_USER_ONLY)
d720b93d 1891 tb_invalidate_phys_page_fast(phys_addr, 2);
9fa3e853 1892#endif
c27004ec 1893 stw_p((uint8_t *)(long)addr, val);
1ccde1cb 1894 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1895}
1896
a4193c8a 1897static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1898{
1ccde1cb
FB
1899 unsigned long phys_addr;
1900
274da6b2 1901 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1902#if !defined(CONFIG_USER_ONLY)
d720b93d 1903 tb_invalidate_phys_page_fast(phys_addr, 4);
9fa3e853 1904#endif
c27004ec 1905 stl_p((uint8_t *)(long)addr, val);
1ccde1cb 1906 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1907}
1908
1909static CPUReadMemoryFunc *code_mem_read[3] = {
1910 NULL, /* never used */
1911 NULL, /* never used */
1912 NULL, /* never used */
1913};
1914
1915static CPUWriteMemoryFunc *code_mem_write[3] = {
1916 code_mem_writeb,
1917 code_mem_writew,
1918 code_mem_writel,
1919};
33417e70 1920
a4193c8a 1921static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb 1922{
c27004ec 1923 stb_p((uint8_t *)(long)addr, val);
d720b93d 1924 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1925}
1926
a4193c8a 1927static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb 1928{
c27004ec 1929 stw_p((uint8_t *)(long)addr, val);
d720b93d 1930 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1931}
1932
a4193c8a 1933static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb 1934{
c27004ec 1935 stl_p((uint8_t *)(long)addr, val);
d720b93d 1936 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1937}
1938
1939static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1940 notdirty_mem_writeb,
1941 notdirty_mem_writew,
1942 notdirty_mem_writel,
1943};
1944
33417e70
FB
1945static void io_mem_init(void)
1946{
a4193c8a
FB
1947 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1948 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1949 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1950 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1951 io_mem_nb = 5;
1952
1953 /* alloc dirty bits array */
59817ccb 1954 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1955}
1956
1957/* mem_read and mem_write are arrays of functions containing the
1958 function to access byte (index 0), word (index 1) and dword (index
1959 2). All functions must be supplied. If io_index is non zero, the
1960 corresponding io zone is modified. If it is zero, a new io zone is
1961 allocated. The return value can be used with
1962 cpu_register_physical_memory(). (-1) is returned if error. */
1963int cpu_register_io_memory(int io_index,
1964 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1965 CPUWriteMemoryFunc **mem_write,
1966 void *opaque)
33417e70
FB
1967{
1968 int i;
1969
1970 if (io_index <= 0) {
1971 if (io_index >= IO_MEM_NB_ENTRIES)
1972 return -1;
1973 io_index = io_mem_nb++;
1974 } else {
1975 if (io_index >= IO_MEM_NB_ENTRIES)
1976 return -1;
1977 }
1978
1979 for(i = 0;i < 3; i++) {
1980 io_mem_read[io_index][i] = mem_read[i];
1981 io_mem_write[io_index][i] = mem_write[i];
1982 }
a4193c8a 1983 io_mem_opaque[io_index] = opaque;
33417e70
FB
1984 return io_index << IO_MEM_SHIFT;
1985}
61382a50 1986
8926b517
FB
1987CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1988{
1989 return io_mem_write[io_index >> IO_MEM_SHIFT];
1990}
1991
1992CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1993{
1994 return io_mem_read[io_index >> IO_MEM_SHIFT];
1995}
1996
13eb76e0
FB
1997/* physical memory access (slow version, mainly for debug) */
1998#if defined(CONFIG_USER_ONLY)
2e12669a 1999void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2000 int len, int is_write)
2001{
2002 int l, flags;
2003 target_ulong page;
2004
2005 while (len > 0) {
2006 page = addr & TARGET_PAGE_MASK;
2007 l = (page + TARGET_PAGE_SIZE) - addr;
2008 if (l > len)
2009 l = len;
2010 flags = page_get_flags(page);
2011 if (!(flags & PAGE_VALID))
2012 return;
2013 if (is_write) {
2014 if (!(flags & PAGE_WRITE))
2015 return;
2016 memcpy((uint8_t *)addr, buf, len);
2017 } else {
2018 if (!(flags & PAGE_READ))
2019 return;
2020 memcpy(buf, (uint8_t *)addr, len);
2021 }
2022 len -= l;
2023 buf += l;
2024 addr += l;
2025 }
2026}
2027#else
2e12669a 2028void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2029 int len, int is_write)
2030{
2031 int l, io_index;
2032 uint8_t *ptr;
2033 uint32_t val;
2e12669a
FB
2034 target_phys_addr_t page;
2035 unsigned long pd;
92e873b9 2036 PhysPageDesc *p;
13eb76e0
FB
2037
2038 while (len > 0) {
2039 page = addr & TARGET_PAGE_MASK;
2040 l = (page + TARGET_PAGE_SIZE) - addr;
2041 if (l > len)
2042 l = len;
92e873b9 2043 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2044 if (!p) {
2045 pd = IO_MEM_UNASSIGNED;
2046 } else {
2047 pd = p->phys_offset;
2048 }
2049
2050 if (is_write) {
2051 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2052 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2053 if (l >= 4 && ((addr & 3) == 0)) {
2054 /* 32 bit read access */
c27004ec 2055 val = ldl_p(buf);
a4193c8a 2056 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2057 l = 4;
2058 } else if (l >= 2 && ((addr & 1) == 0)) {
2059 /* 16 bit read access */
c27004ec 2060 val = lduw_p(buf);
a4193c8a 2061 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2062 l = 2;
2063 } else {
2064 /* 8 bit access */
c27004ec 2065 val = ldub_p(buf);
a4193c8a 2066 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2067 l = 1;
2068 }
2069 } else {
b448f2f3
FB
2070 unsigned long addr1;
2071 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2072 /* RAM case */
b448f2f3 2073 ptr = phys_ram_base + addr1;
13eb76e0 2074 memcpy(ptr, buf, l);
b448f2f3
FB
2075 /* invalidate code */
2076 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2077 /* set dirty bit */
2078 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
13eb76e0
FB
2079 }
2080 } else {
2081 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2082 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2083 /* I/O case */
2084 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2085 if (l >= 4 && ((addr & 3) == 0)) {
2086 /* 32 bit read access */
a4193c8a 2087 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2088 stl_p(buf, val);
13eb76e0
FB
2089 l = 4;
2090 } else if (l >= 2 && ((addr & 1) == 0)) {
2091 /* 16 bit read access */
a4193c8a 2092 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2093 stw_p(buf, val);
13eb76e0
FB
2094 l = 2;
2095 } else {
2096 /* 8 bit access */
a4193c8a 2097 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2098 stb_p(buf, val);
13eb76e0
FB
2099 l = 1;
2100 }
2101 } else {
2102 /* RAM case */
2103 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2104 (addr & ~TARGET_PAGE_MASK);
2105 memcpy(buf, ptr, l);
2106 }
2107 }
2108 len -= l;
2109 buf += l;
2110 addr += l;
2111 }
2112}
2113#endif
2114
2115/* virtual memory access for debug */
b448f2f3
FB
2116int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2117 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2118{
2119 int l;
2120 target_ulong page, phys_addr;
2121
2122 while (len > 0) {
2123 page = addr & TARGET_PAGE_MASK;
2124 phys_addr = cpu_get_phys_page_debug(env, page);
2125 /* if no physical page mapped, return an error */
2126 if (phys_addr == -1)
2127 return -1;
2128 l = (page + TARGET_PAGE_SIZE) - addr;
2129 if (l > len)
2130 l = len;
b448f2f3
FB
2131 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2132 buf, l, is_write);
13eb76e0
FB
2133 len -= l;
2134 buf += l;
2135 addr += l;
2136 }
2137 return 0;
2138}
2139
61382a50
FB
2140#if !defined(CONFIG_USER_ONLY)
2141
2142#define MMUSUFFIX _cmmu
2143#define GETPC() NULL
2144#define env cpu_single_env
b769d8fe 2145#define SOFTMMU_CODE_ACCESS
61382a50
FB
2146
2147#define SHIFT 0
2148#include "softmmu_template.h"
2149
2150#define SHIFT 1
2151#include "softmmu_template.h"
2152
2153#define SHIFT 2
2154#include "softmmu_template.h"
2155
2156#define SHIFT 3
2157#include "softmmu_template.h"
2158
2159#undef env
2160
2161#endif