]> git.proxmox.com Git - qemu.git/blame - exec.c
full system SPARC emulation (Blue Swirl)
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
24#include <sys/mman.h>
25#endif
54936004
FB
26#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
6180a181
FB
34#include "cpu.h"
35#include "exec-all.h"
54936004 36
fd6ce8f6 37//#define DEBUG_TB_INVALIDATE
66e85a21 38//#define DEBUG_FLUSH
9fa3e853 39//#define DEBUG_TLB
fd6ce8f6
FB
40
41/* make various TB consistency checks */
42//#define DEBUG_TB_CHECK
98857888 43//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
44
45/* threshold to flush the translated code buffer */
46#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
47
9fa3e853
FB
48#define SMC_BITMAP_USE_THRESHOLD 10
49
50#define MMAP_AREA_START 0x00000000
51#define MMAP_AREA_END 0xa8000000
fd6ce8f6
FB
52
53TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
54TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
9fa3e853 55TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 56int nb_tbs;
eb51d102
FB
57/* any access to the tbs or the page table must use this lock */
58spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6
FB
59
60uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
61uint8_t *code_gen_ptr;
62
9fa3e853
FB
63int phys_ram_size;
64int phys_ram_fd;
65uint8_t *phys_ram_base;
1ccde1cb 66uint8_t *phys_ram_dirty;
9fa3e853 67
54936004 68typedef struct PageDesc {
92e873b9 69 /* list of TBs intersecting this ram page */
fd6ce8f6 70 TranslationBlock *first_tb;
9fa3e853
FB
71 /* in order to optimize self modifying code, we count the number
72 of lookups we do to a given page to use a bitmap */
73 unsigned int code_write_count;
74 uint8_t *code_bitmap;
75#if defined(CONFIG_USER_ONLY)
76 unsigned long flags;
77#endif
54936004
FB
78} PageDesc;
79
92e873b9
FB
80typedef struct PhysPageDesc {
81 /* offset in host memory of the page + io_index in the low 12 bits */
82 unsigned long phys_offset;
83} PhysPageDesc;
84
9fa3e853
FB
85typedef struct VirtPageDesc {
86 /* physical address of code page. It is valid only if 'valid_tag'
87 matches 'virt_valid_tag' */
88 target_ulong phys_addr;
89 unsigned int valid_tag;
90#if !defined(CONFIG_SOFTMMU)
91 /* original page access rights. It is valid only if 'valid_tag'
92 matches 'virt_valid_tag' */
93 unsigned int prot;
94#endif
95} VirtPageDesc;
96
54936004
FB
97#define L2_BITS 10
98#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
99
100#define L1_SIZE (1 << L1_BITS)
101#define L2_SIZE (1 << L2_BITS)
102
33417e70 103static void io_mem_init(void);
fd6ce8f6 104
83fb7adf
FB
105unsigned long qemu_real_host_page_size;
106unsigned long qemu_host_page_bits;
107unsigned long qemu_host_page_size;
108unsigned long qemu_host_page_mask;
54936004 109
92e873b9 110/* XXX: for system emulation, it could just be an array */
54936004 111static PageDesc *l1_map[L1_SIZE];
92e873b9 112static PhysPageDesc *l1_phys_map[L1_SIZE];
54936004 113
9fa3e853
FB
114#if !defined(CONFIG_USER_ONLY)
115static VirtPageDesc *l1_virt_map[L1_SIZE];
116static unsigned int virt_valid_tag;
117#endif
118
33417e70 119/* io memory support */
33417e70
FB
120CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
121CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 122void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
123static int io_mem_nb;
124
34865134
FB
125/* log support */
126char *logfilename = "/tmp/qemu.log";
127FILE *logfile;
128int loglevel;
129
b346ff46 130static void page_init(void)
54936004 131{
83fb7adf 132 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 133 TARGET_PAGE_SIZE */
67b915a5 134#ifdef _WIN32
d5a8f07c
FB
135 {
136 SYSTEM_INFO system_info;
137 DWORD old_protect;
138
139 GetSystemInfo(&system_info);
140 qemu_real_host_page_size = system_info.dwPageSize;
141
142 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
143 PAGE_EXECUTE_READWRITE, &old_protect);
144 }
67b915a5 145#else
83fb7adf 146 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
147 {
148 unsigned long start, end;
149
150 start = (unsigned long)code_gen_buffer;
151 start &= ~(qemu_real_host_page_size - 1);
152
153 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
154 end += qemu_real_host_page_size - 1;
155 end &= ~(qemu_real_host_page_size - 1);
156
157 mprotect((void *)start, end - start,
158 PROT_READ | PROT_WRITE | PROT_EXEC);
159 }
67b915a5 160#endif
d5a8f07c 161
83fb7adf
FB
162 if (qemu_host_page_size == 0)
163 qemu_host_page_size = qemu_real_host_page_size;
164 if (qemu_host_page_size < TARGET_PAGE_SIZE)
165 qemu_host_page_size = TARGET_PAGE_SIZE;
166 qemu_host_page_bits = 0;
167 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
168 qemu_host_page_bits++;
169 qemu_host_page_mask = ~(qemu_host_page_size - 1);
9fa3e853
FB
170#if !defined(CONFIG_USER_ONLY)
171 virt_valid_tag = 1;
172#endif
54936004
FB
173}
174
fd6ce8f6 175static inline PageDesc *page_find_alloc(unsigned int index)
54936004 176{
54936004
FB
177 PageDesc **lp, *p;
178
54936004
FB
179 lp = &l1_map[index >> L2_BITS];
180 p = *lp;
181 if (!p) {
182 /* allocate if not found */
59817ccb 183 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 184 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
185 *lp = p;
186 }
187 return p + (index & (L2_SIZE - 1));
188}
189
fd6ce8f6 190static inline PageDesc *page_find(unsigned int index)
54936004 191{
54936004
FB
192 PageDesc *p;
193
54936004
FB
194 p = l1_map[index >> L2_BITS];
195 if (!p)
196 return 0;
fd6ce8f6
FB
197 return p + (index & (L2_SIZE - 1));
198}
199
92e873b9
FB
200static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
201{
202 PhysPageDesc **lp, *p;
203
204 lp = &l1_phys_map[index >> L2_BITS];
205 p = *lp;
206 if (!p) {
207 /* allocate if not found */
208 p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
209 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
210 *lp = p;
211 }
212 return p + (index & (L2_SIZE - 1));
213}
214
215static inline PhysPageDesc *phys_page_find(unsigned int index)
216{
217 PhysPageDesc *p;
218
219 p = l1_phys_map[index >> L2_BITS];
220 if (!p)
221 return 0;
222 return p + (index & (L2_SIZE - 1));
223}
224
9fa3e853 225#if !defined(CONFIG_USER_ONLY)
4f2ac237
FB
226static void tlb_protect_code(CPUState *env, target_ulong addr);
227static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
9fa3e853
FB
228
229static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
fd6ce8f6 230{
9fa3e853 231 VirtPageDesc **lp, *p;
fd6ce8f6 232
9fa3e853
FB
233 lp = &l1_virt_map[index >> L2_BITS];
234 p = *lp;
235 if (!p) {
236 /* allocate if not found */
59817ccb 237 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
9fa3e853
FB
238 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
239 *lp = p;
240 }
241 return p + (index & (L2_SIZE - 1));
242}
243
244static inline VirtPageDesc *virt_page_find(unsigned int index)
245{
246 VirtPageDesc *p;
247
248 p = l1_virt_map[index >> L2_BITS];
fd6ce8f6
FB
249 if (!p)
250 return 0;
9fa3e853 251 return p + (index & (L2_SIZE - 1));
54936004
FB
252}
253
9fa3e853 254static void virt_page_flush(void)
54936004 255{
9fa3e853
FB
256 int i, j;
257 VirtPageDesc *p;
258
259 virt_valid_tag++;
260
261 if (virt_valid_tag == 0) {
262 virt_valid_tag = 1;
263 for(i = 0; i < L1_SIZE; i++) {
264 p = l1_virt_map[i];
265 if (p) {
266 for(j = 0; j < L2_SIZE; j++)
267 p[j].valid_tag = 0;
268 }
fd6ce8f6 269 }
54936004
FB
270 }
271}
9fa3e853
FB
272#else
273static void virt_page_flush(void)
274{
275}
276#endif
fd6ce8f6 277
b346ff46 278void cpu_exec_init(void)
fd6ce8f6
FB
279{
280 if (!code_gen_ptr) {
281 code_gen_ptr = code_gen_buffer;
b346ff46 282 page_init();
33417e70 283 io_mem_init();
fd6ce8f6
FB
284 }
285}
286
9fa3e853
FB
287static inline void invalidate_page_bitmap(PageDesc *p)
288{
289 if (p->code_bitmap) {
59817ccb 290 qemu_free(p->code_bitmap);
9fa3e853
FB
291 p->code_bitmap = NULL;
292 }
293 p->code_write_count = 0;
294}
295
fd6ce8f6
FB
296/* set to NULL all the 'first_tb' fields in all PageDescs */
297static void page_flush_tb(void)
298{
299 int i, j;
300 PageDesc *p;
301
302 for(i = 0; i < L1_SIZE; i++) {
303 p = l1_map[i];
304 if (p) {
9fa3e853
FB
305 for(j = 0; j < L2_SIZE; j++) {
306 p->first_tb = NULL;
307 invalidate_page_bitmap(p);
308 p++;
309 }
fd6ce8f6
FB
310 }
311 }
312}
313
314/* flush all the translation blocks */
d4e8164f 315/* XXX: tb_flush is currently not thread safe */
0124311e 316void tb_flush(CPUState *env)
fd6ce8f6
FB
317{
318 int i;
0124311e 319#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
320 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
321 code_gen_ptr - code_gen_buffer,
322 nb_tbs,
0124311e 323 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
324#endif
325 nb_tbs = 0;
326 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
327 tb_hash[i] = NULL;
9fa3e853
FB
328 virt_page_flush();
329
330 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
331 tb_phys_hash[i] = NULL;
fd6ce8f6 332 page_flush_tb();
9fa3e853 333
fd6ce8f6 334 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
335 /* XXX: flush processor icache at this point if cache flush is
336 expensive */
fd6ce8f6
FB
337}
338
339#ifdef DEBUG_TB_CHECK
340
341static void tb_invalidate_check(unsigned long address)
342{
343 TranslationBlock *tb;
344 int i;
345 address &= TARGET_PAGE_MASK;
346 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
347 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
348 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
349 address >= tb->pc + tb->size)) {
350 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
351 address, tb->pc, tb->size);
352 }
353 }
354 }
355}
356
357/* verify that all the pages have correct rights for code */
358static void tb_page_check(void)
359{
360 TranslationBlock *tb;
361 int i, flags1, flags2;
362
363 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
364 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
365 flags1 = page_get_flags(tb->pc);
366 flags2 = page_get_flags(tb->pc + tb->size - 1);
367 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
368 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
369 tb->pc, tb->size, flags1, flags2);
370 }
371 }
372 }
373}
374
d4e8164f
FB
375void tb_jmp_check(TranslationBlock *tb)
376{
377 TranslationBlock *tb1;
378 unsigned int n1;
379
380 /* suppress any remaining jumps to this TB */
381 tb1 = tb->jmp_first;
382 for(;;) {
383 n1 = (long)tb1 & 3;
384 tb1 = (TranslationBlock *)((long)tb1 & ~3);
385 if (n1 == 2)
386 break;
387 tb1 = tb1->jmp_next[n1];
388 }
389 /* check end of list */
390 if (tb1 != tb) {
391 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
392 }
393}
394
fd6ce8f6
FB
395#endif
396
397/* invalidate one TB */
398static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
399 int next_offset)
400{
401 TranslationBlock *tb1;
402 for(;;) {
403 tb1 = *ptb;
404 if (tb1 == tb) {
405 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
406 break;
407 }
408 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
409 }
410}
411
9fa3e853
FB
412static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
413{
414 TranslationBlock *tb1;
415 unsigned int n1;
416
417 for(;;) {
418 tb1 = *ptb;
419 n1 = (long)tb1 & 3;
420 tb1 = (TranslationBlock *)((long)tb1 & ~3);
421 if (tb1 == tb) {
422 *ptb = tb1->page_next[n1];
423 break;
424 }
425 ptb = &tb1->page_next[n1];
426 }
427}
428
d4e8164f
FB
429static inline void tb_jmp_remove(TranslationBlock *tb, int n)
430{
431 TranslationBlock *tb1, **ptb;
432 unsigned int n1;
433
434 ptb = &tb->jmp_next[n];
435 tb1 = *ptb;
436 if (tb1) {
437 /* find tb(n) in circular list */
438 for(;;) {
439 tb1 = *ptb;
440 n1 = (long)tb1 & 3;
441 tb1 = (TranslationBlock *)((long)tb1 & ~3);
442 if (n1 == n && tb1 == tb)
443 break;
444 if (n1 == 2) {
445 ptb = &tb1->jmp_first;
446 } else {
447 ptb = &tb1->jmp_next[n1];
448 }
449 }
450 /* now we can suppress tb(n) from the list */
451 *ptb = tb->jmp_next[n];
452
453 tb->jmp_next[n] = NULL;
454 }
455}
456
457/* reset the jump entry 'n' of a TB so that it is not chained to
458 another TB */
459static inline void tb_reset_jump(TranslationBlock *tb, int n)
460{
461 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
462}
463
9fa3e853 464static inline void tb_invalidate(TranslationBlock *tb)
fd6ce8f6 465{
d4e8164f 466 unsigned int h, n1;
9fa3e853 467 TranslationBlock *tb1, *tb2, **ptb;
d4e8164f 468
36bdbe54 469 tb_invalidated_flag = 1;
59817ccb 470
fd6ce8f6
FB
471 /* remove the TB from the hash list */
472 h = tb_hash_func(tb->pc);
9fa3e853
FB
473 ptb = &tb_hash[h];
474 for(;;) {
475 tb1 = *ptb;
476 /* NOTE: the TB is not necessarily linked in the hash. It
477 indicates that it is not currently used */
478 if (tb1 == NULL)
479 return;
480 if (tb1 == tb) {
481 *ptb = tb1->hash_next;
482 break;
483 }
484 ptb = &tb1->hash_next;
fd6ce8f6 485 }
d4e8164f
FB
486
487 /* suppress this TB from the two jump lists */
488 tb_jmp_remove(tb, 0);
489 tb_jmp_remove(tb, 1);
490
491 /* suppress any remaining jumps to this TB */
492 tb1 = tb->jmp_first;
493 for(;;) {
494 n1 = (long)tb1 & 3;
495 if (n1 == 2)
496 break;
497 tb1 = (TranslationBlock *)((long)tb1 & ~3);
498 tb2 = tb1->jmp_next[n1];
499 tb_reset_jump(tb1, n1);
500 tb1->jmp_next[n1] = NULL;
501 tb1 = tb2;
502 }
503 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
fd6ce8f6
FB
504}
505
9fa3e853 506static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 507{
fd6ce8f6 508 PageDesc *p;
9fa3e853
FB
509 unsigned int h;
510 target_ulong phys_pc;
511
512 /* remove the TB from the hash list */
513 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
514 h = tb_phys_hash_func(phys_pc);
515 tb_remove(&tb_phys_hash[h], tb,
516 offsetof(TranslationBlock, phys_hash_next));
517
518 /* remove the TB from the page list */
519 if (tb->page_addr[0] != page_addr) {
520 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
521 tb_page_remove(&p->first_tb, tb);
522 invalidate_page_bitmap(p);
523 }
524 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
525 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
526 tb_page_remove(&p->first_tb, tb);
527 invalidate_page_bitmap(p);
528 }
529
530 tb_invalidate(tb);
531}
532
533static inline void set_bits(uint8_t *tab, int start, int len)
534{
535 int end, mask, end1;
536
537 end = start + len;
538 tab += start >> 3;
539 mask = 0xff << (start & 7);
540 if ((start & ~7) == (end & ~7)) {
541 if (start < end) {
542 mask &= ~(0xff << (end & 7));
543 *tab |= mask;
544 }
545 } else {
546 *tab++ |= mask;
547 start = (start + 8) & ~7;
548 end1 = end & ~7;
549 while (start < end1) {
550 *tab++ = 0xff;
551 start += 8;
552 }
553 if (start < end) {
554 mask = ~(0xff << (end & 7));
555 *tab |= mask;
556 }
557 }
558}
559
560static void build_page_bitmap(PageDesc *p)
561{
562 int n, tb_start, tb_end;
563 TranslationBlock *tb;
564
59817ccb 565 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
566 if (!p->code_bitmap)
567 return;
568 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
569
570 tb = p->first_tb;
571 while (tb != NULL) {
572 n = (long)tb & 3;
573 tb = (TranslationBlock *)((long)tb & ~3);
574 /* NOTE: this is subtle as a TB may span two physical pages */
575 if (n == 0) {
576 /* NOTE: tb_end may be after the end of the page, but
577 it is not a problem */
578 tb_start = tb->pc & ~TARGET_PAGE_MASK;
579 tb_end = tb_start + tb->size;
580 if (tb_end > TARGET_PAGE_SIZE)
581 tb_end = TARGET_PAGE_SIZE;
582 } else {
583 tb_start = 0;
584 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
585 }
586 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
587 tb = tb->page_next[n];
588 }
589}
590
d720b93d
FB
591#ifdef TARGET_HAS_PRECISE_SMC
592
593static void tb_gen_code(CPUState *env,
594 target_ulong pc, target_ulong cs_base, int flags,
595 int cflags)
596{
597 TranslationBlock *tb;
598 uint8_t *tc_ptr;
599 target_ulong phys_pc, phys_page2, virt_page2;
600 int code_gen_size;
601
602 phys_pc = get_phys_addr_code(env, (unsigned long)pc);
603 tb = tb_alloc((unsigned long)pc);
604 if (!tb) {
605 /* flush must be done */
606 tb_flush(env);
607 /* cannot fail at this point */
608 tb = tb_alloc((unsigned long)pc);
609 }
610 tc_ptr = code_gen_ptr;
611 tb->tc_ptr = tc_ptr;
612 tb->cs_base = cs_base;
613 tb->flags = flags;
614 tb->cflags = cflags;
615 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
616 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
617
618 /* check next page if needed */
619 virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
620 phys_page2 = -1;
621 if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
622 phys_page2 = get_phys_addr_code(env, virt_page2);
623 }
624 tb_link_phys(tb, phys_pc, phys_page2);
625}
626#endif
627
9fa3e853
FB
628/* invalidate all TBs which intersect with the target physical page
629 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
630 the same physical page. 'is_cpu_write_access' should be true if called
631 from a real cpu write access: the virtual CPU will exit the current
632 TB if code is modified inside this TB. */
633void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
634 int is_cpu_write_access)
635{
636 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 637 CPUState *env = cpu_single_env;
9fa3e853 638 PageDesc *p;
ea1c1802 639 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 640 target_ulong tb_start, tb_end;
d720b93d 641 target_ulong current_pc, current_cs_base;
9fa3e853
FB
642
643 p = page_find(start >> TARGET_PAGE_BITS);
644 if (!p)
645 return;
646 if (!p->code_bitmap &&
d720b93d
FB
647 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
648 is_cpu_write_access) {
9fa3e853
FB
649 /* build code bitmap */
650 build_page_bitmap(p);
651 }
652
653 /* we remove all the TBs in the range [start, end[ */
654 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
655 current_tb_not_found = is_cpu_write_access;
656 current_tb_modified = 0;
657 current_tb = NULL; /* avoid warning */
658 current_pc = 0; /* avoid warning */
659 current_cs_base = 0; /* avoid warning */
660 current_flags = 0; /* avoid warning */
9fa3e853
FB
661 tb = p->first_tb;
662 while (tb != NULL) {
663 n = (long)tb & 3;
664 tb = (TranslationBlock *)((long)tb & ~3);
665 tb_next = tb->page_next[n];
666 /* NOTE: this is subtle as a TB may span two physical pages */
667 if (n == 0) {
668 /* NOTE: tb_end may be after the end of the page, but
669 it is not a problem */
670 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
671 tb_end = tb_start + tb->size;
672 } else {
673 tb_start = tb->page_addr[1];
674 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
675 }
676 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
677#ifdef TARGET_HAS_PRECISE_SMC
678 if (current_tb_not_found) {
679 current_tb_not_found = 0;
680 current_tb = NULL;
681 if (env->mem_write_pc) {
682 /* now we have a real cpu fault */
683 current_tb = tb_find_pc(env->mem_write_pc);
684 }
685 }
686 if (current_tb == tb &&
687 !(current_tb->cflags & CF_SINGLE_INSN)) {
688 /* If we are modifying the current TB, we must stop
689 its execution. We could be more precise by checking
690 that the modification is after the current PC, but it
691 would require a specialized function to partially
692 restore the CPU state */
693
694 current_tb_modified = 1;
695 cpu_restore_state(current_tb, env,
696 env->mem_write_pc, NULL);
697#if defined(TARGET_I386)
698 current_flags = env->hflags;
699 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
700 current_cs_base = (target_ulong)env->segs[R_CS].base;
701 current_pc = current_cs_base + env->eip;
702#else
703#error unsupported CPU
704#endif
705 }
706#endif /* TARGET_HAS_PRECISE_SMC */
ea1c1802
FB
707 saved_tb = env->current_tb;
708 env->current_tb = NULL;
9fa3e853 709 tb_phys_invalidate(tb, -1);
ea1c1802
FB
710 env->current_tb = saved_tb;
711 if (env->interrupt_request && env->current_tb)
712 cpu_interrupt(env, env->interrupt_request);
9fa3e853
FB
713 }
714 tb = tb_next;
715 }
716#if !defined(CONFIG_USER_ONLY)
717 /* if no code remaining, no need to continue to use slow writes */
718 if (!p->first_tb) {
719 invalidate_page_bitmap(p);
d720b93d
FB
720 if (is_cpu_write_access) {
721 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
722 }
723 }
724#endif
725#ifdef TARGET_HAS_PRECISE_SMC
726 if (current_tb_modified) {
727 /* we generate a block containing just the instruction
728 modifying the memory. It will ensure that it cannot modify
729 itself */
ea1c1802 730 env->current_tb = NULL;
d720b93d
FB
731 tb_gen_code(env, current_pc, current_cs_base, current_flags,
732 CF_SINGLE_INSN);
733 cpu_resume_from_signal(env, NULL);
9fa3e853 734 }
fd6ce8f6 735#endif
9fa3e853 736}
fd6ce8f6 737
9fa3e853 738/* len must be <= 8 and start must be a multiple of len */
d720b93d 739static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
740{
741 PageDesc *p;
742 int offset, b;
59817ccb 743#if 0
a4193c8a
FB
744 if (1) {
745 if (loglevel) {
746 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
747 cpu_single_env->mem_write_vaddr, len,
748 cpu_single_env->eip,
749 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
750 }
59817ccb
FB
751 }
752#endif
9fa3e853
FB
753 p = page_find(start >> TARGET_PAGE_BITS);
754 if (!p)
755 return;
756 if (p->code_bitmap) {
757 offset = start & ~TARGET_PAGE_MASK;
758 b = p->code_bitmap[offset >> 3] >> (offset & 7);
759 if (b & ((1 << len) - 1))
760 goto do_invalidate;
761 } else {
762 do_invalidate:
d720b93d 763 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
764 }
765}
766
9fa3e853 767#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
768static void tb_invalidate_phys_page(target_ulong addr,
769 unsigned long pc, void *puc)
9fa3e853 770{
d720b93d
FB
771 int n, current_flags, current_tb_modified;
772 target_ulong current_pc, current_cs_base;
9fa3e853 773 PageDesc *p;
d720b93d
FB
774 TranslationBlock *tb, *current_tb;
775#ifdef TARGET_HAS_PRECISE_SMC
776 CPUState *env = cpu_single_env;
777#endif
9fa3e853
FB
778
779 addr &= TARGET_PAGE_MASK;
780 p = page_find(addr >> TARGET_PAGE_BITS);
781 if (!p)
782 return;
783 tb = p->first_tb;
d720b93d
FB
784 current_tb_modified = 0;
785 current_tb = NULL;
786 current_pc = 0; /* avoid warning */
787 current_cs_base = 0; /* avoid warning */
788 current_flags = 0; /* avoid warning */
789#ifdef TARGET_HAS_PRECISE_SMC
790 if (tb && pc != 0) {
791 current_tb = tb_find_pc(pc);
792 }
793#endif
9fa3e853
FB
794 while (tb != NULL) {
795 n = (long)tb & 3;
796 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
797#ifdef TARGET_HAS_PRECISE_SMC
798 if (current_tb == tb &&
799 !(current_tb->cflags & CF_SINGLE_INSN)) {
800 /* If we are modifying the current TB, we must stop
801 its execution. We could be more precise by checking
802 that the modification is after the current PC, but it
803 would require a specialized function to partially
804 restore the CPU state */
805
806 current_tb_modified = 1;
807 cpu_restore_state(current_tb, env, pc, puc);
808#if defined(TARGET_I386)
809 current_flags = env->hflags;
810 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
811 current_cs_base = (target_ulong)env->segs[R_CS].base;
812 current_pc = current_cs_base + env->eip;
813#else
814#error unsupported CPU
815#endif
816 }
817#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
818 tb_phys_invalidate(tb, addr);
819 tb = tb->page_next[n];
820 }
fd6ce8f6 821 p->first_tb = NULL;
d720b93d
FB
822#ifdef TARGET_HAS_PRECISE_SMC
823 if (current_tb_modified) {
824 /* we generate a block containing just the instruction
825 modifying the memory. It will ensure that it cannot modify
826 itself */
ea1c1802 827 env->current_tb = NULL;
d720b93d
FB
828 tb_gen_code(env, current_pc, current_cs_base, current_flags,
829 CF_SINGLE_INSN);
830 cpu_resume_from_signal(env, puc);
831 }
832#endif
fd6ce8f6 833}
9fa3e853 834#endif
fd6ce8f6
FB
835
836/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
837static inline void tb_alloc_page(TranslationBlock *tb,
838 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
839{
840 PageDesc *p;
9fa3e853
FB
841 TranslationBlock *last_first_tb;
842
843 tb->page_addr[n] = page_addr;
844 p = page_find(page_addr >> TARGET_PAGE_BITS);
845 tb->page_next[n] = p->first_tb;
846 last_first_tb = p->first_tb;
847 p->first_tb = (TranslationBlock *)((long)tb | n);
848 invalidate_page_bitmap(p);
fd6ce8f6 849
107db443 850#if defined(TARGET_HAS_SMC) || 1
d720b93d 851
9fa3e853 852#if defined(CONFIG_USER_ONLY)
fd6ce8f6 853 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
854 unsigned long host_start, host_end, addr;
855 int prot;
856
fd6ce8f6
FB
857 /* force the host page as non writable (writes will have a
858 page fault + mprotect overhead) */
83fb7adf
FB
859 host_start = page_addr & qemu_host_page_mask;
860 host_end = host_start + qemu_host_page_size;
fd6ce8f6
FB
861 prot = 0;
862 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
863 prot |= page_get_flags(addr);
83fb7adf 864 mprotect((void *)host_start, qemu_host_page_size,
fd6ce8f6
FB
865 (prot & PAGE_BITS) & ~PAGE_WRITE);
866#ifdef DEBUG_TB_INVALIDATE
867 printf("protecting code page: 0x%08lx\n",
868 host_start);
869#endif
870 p->flags &= ~PAGE_WRITE;
fd6ce8f6 871 }
9fa3e853
FB
872#else
873 /* if some code is already present, then the pages are already
874 protected. So we handle the case where only the first TB is
875 allocated in a physical page */
876 if (!last_first_tb) {
877 target_ulong virt_addr;
878
879 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
880 tlb_protect_code(cpu_single_env, virt_addr);
881 }
882#endif
d720b93d
FB
883
884#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
885}
886
887/* Allocate a new translation block. Flush the translation buffer if
888 too many translation blocks or too much generated code. */
d4e8164f 889TranslationBlock *tb_alloc(unsigned long pc)
fd6ce8f6
FB
890{
891 TranslationBlock *tb;
fd6ce8f6
FB
892
893 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
894 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 895 return NULL;
fd6ce8f6
FB
896 tb = &tbs[nb_tbs++];
897 tb->pc = pc;
b448f2f3 898 tb->cflags = 0;
d4e8164f
FB
899 return tb;
900}
901
9fa3e853
FB
902/* add a new TB and link it to the physical page tables. phys_page2 is
903 (-1) to indicate that only one page contains the TB. */
904void tb_link_phys(TranslationBlock *tb,
905 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 906{
9fa3e853
FB
907 unsigned int h;
908 TranslationBlock **ptb;
909
910 /* add in the physical hash table */
911 h = tb_phys_hash_func(phys_pc);
912 ptb = &tb_phys_hash[h];
913 tb->phys_hash_next = *ptb;
914 *ptb = tb;
fd6ce8f6
FB
915
916 /* add in the page list */
9fa3e853
FB
917 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
918 if (phys_page2 != -1)
919 tb_alloc_page(tb, 1, phys_page2);
920 else
921 tb->page_addr[1] = -1;
61382a50
FB
922#ifdef DEBUG_TB_CHECK
923 tb_page_check();
924#endif
9fa3e853
FB
925}
926
927/* link the tb with the other TBs */
928void tb_link(TranslationBlock *tb)
929{
930#if !defined(CONFIG_USER_ONLY)
931 {
932 VirtPageDesc *vp;
933 target_ulong addr;
934
935 /* save the code memory mappings (needed to invalidate the code) */
936 addr = tb->pc & TARGET_PAGE_MASK;
937 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
938#ifdef DEBUG_TLB_CHECK
939 if (vp->valid_tag == virt_valid_tag &&
940 vp->phys_addr != tb->page_addr[0]) {
941 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
942 addr, tb->page_addr[0], vp->phys_addr);
943 }
944#endif
9fa3e853 945 vp->phys_addr = tb->page_addr[0];
59817ccb
FB
946 if (vp->valid_tag != virt_valid_tag) {
947 vp->valid_tag = virt_valid_tag;
948#if !defined(CONFIG_SOFTMMU)
949 vp->prot = 0;
950#endif
951 }
9fa3e853
FB
952
953 if (tb->page_addr[1] != -1) {
954 addr += TARGET_PAGE_SIZE;
955 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
956#ifdef DEBUG_TLB_CHECK
957 if (vp->valid_tag == virt_valid_tag &&
958 vp->phys_addr != tb->page_addr[1]) {
959 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
960 addr, tb->page_addr[1], vp->phys_addr);
961 }
962#endif
9fa3e853 963 vp->phys_addr = tb->page_addr[1];
59817ccb
FB
964 if (vp->valid_tag != virt_valid_tag) {
965 vp->valid_tag = virt_valid_tag;
966#if !defined(CONFIG_SOFTMMU)
967 vp->prot = 0;
968#endif
969 }
9fa3e853
FB
970 }
971 }
972#endif
973
d4e8164f
FB
974 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
975 tb->jmp_next[0] = NULL;
976 tb->jmp_next[1] = NULL;
b448f2f3
FB
977#ifdef USE_CODE_COPY
978 tb->cflags &= ~CF_FP_USED;
979 if (tb->cflags & CF_TB_FP_USED)
980 tb->cflags |= CF_FP_USED;
981#endif
d4e8164f
FB
982
983 /* init original jump addresses */
984 if (tb->tb_next_offset[0] != 0xffff)
985 tb_reset_jump(tb, 0);
986 if (tb->tb_next_offset[1] != 0xffff)
987 tb_reset_jump(tb, 1);
fd6ce8f6
FB
988}
989
9fa3e853
FB
990/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
991 tb[1].tc_ptr. Return NULL if not found */
992TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 993{
9fa3e853
FB
994 int m_min, m_max, m;
995 unsigned long v;
996 TranslationBlock *tb;
a513fe19
FB
997
998 if (nb_tbs <= 0)
999 return NULL;
1000 if (tc_ptr < (unsigned long)code_gen_buffer ||
1001 tc_ptr >= (unsigned long)code_gen_ptr)
1002 return NULL;
1003 /* binary search (cf Knuth) */
1004 m_min = 0;
1005 m_max = nb_tbs - 1;
1006 while (m_min <= m_max) {
1007 m = (m_min + m_max) >> 1;
1008 tb = &tbs[m];
1009 v = (unsigned long)tb->tc_ptr;
1010 if (v == tc_ptr)
1011 return tb;
1012 else if (tc_ptr < v) {
1013 m_max = m - 1;
1014 } else {
1015 m_min = m + 1;
1016 }
1017 }
1018 return &tbs[m_max];
1019}
7501267e 1020
ea041c0e
FB
1021static void tb_reset_jump_recursive(TranslationBlock *tb);
1022
1023static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1024{
1025 TranslationBlock *tb1, *tb_next, **ptb;
1026 unsigned int n1;
1027
1028 tb1 = tb->jmp_next[n];
1029 if (tb1 != NULL) {
1030 /* find head of list */
1031 for(;;) {
1032 n1 = (long)tb1 & 3;
1033 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1034 if (n1 == 2)
1035 break;
1036 tb1 = tb1->jmp_next[n1];
1037 }
1038 /* we are now sure now that tb jumps to tb1 */
1039 tb_next = tb1;
1040
1041 /* remove tb from the jmp_first list */
1042 ptb = &tb_next->jmp_first;
1043 for(;;) {
1044 tb1 = *ptb;
1045 n1 = (long)tb1 & 3;
1046 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1047 if (n1 == n && tb1 == tb)
1048 break;
1049 ptb = &tb1->jmp_next[n1];
1050 }
1051 *ptb = tb->jmp_next[n];
1052 tb->jmp_next[n] = NULL;
1053
1054 /* suppress the jump to next tb in generated code */
1055 tb_reset_jump(tb, n);
1056
0124311e 1057 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1058 tb_reset_jump_recursive(tb_next);
1059 }
1060}
1061
1062static void tb_reset_jump_recursive(TranslationBlock *tb)
1063{
1064 tb_reset_jump_recursive2(tb, 0);
1065 tb_reset_jump_recursive2(tb, 1);
1066}
1067
d720b93d
FB
1068static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1069{
1070 target_ulong phys_addr;
1071
1072 phys_addr = cpu_get_phys_page_debug(env, pc);
1073 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1074}
1075
c33a346e
FB
1076/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1077 breakpoint is reached */
2e12669a 1078int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1079{
a541f297 1080#if defined(TARGET_I386) || defined(TARGET_PPC)
4c3a88a2 1081 int i;
d720b93d 1082
4c3a88a2
FB
1083 for(i = 0; i < env->nb_breakpoints; i++) {
1084 if (env->breakpoints[i] == pc)
1085 return 0;
1086 }
1087
1088 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1089 return -1;
1090 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1091
1092 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1093 return 0;
1094#else
1095 return -1;
1096#endif
1097}
1098
1099/* remove a breakpoint */
2e12669a 1100int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1101{
a541f297 1102#if defined(TARGET_I386) || defined(TARGET_PPC)
4c3a88a2
FB
1103 int i;
1104 for(i = 0; i < env->nb_breakpoints; i++) {
1105 if (env->breakpoints[i] == pc)
1106 goto found;
1107 }
1108 return -1;
1109 found:
1110 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1111 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1112 env->nb_breakpoints--;
d720b93d
FB
1113
1114 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1115 return 0;
1116#else
1117 return -1;
1118#endif
1119}
1120
c33a346e
FB
1121/* enable or disable single step mode. EXCP_DEBUG is returned by the
1122 CPU loop after each instruction */
1123void cpu_single_step(CPUState *env, int enabled)
1124{
a541f297 1125#if defined(TARGET_I386) || defined(TARGET_PPC)
c33a346e
FB
1126 if (env->singlestep_enabled != enabled) {
1127 env->singlestep_enabled = enabled;
1128 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1129 /* XXX: only flush what is necessary */
0124311e 1130 tb_flush(env);
c33a346e
FB
1131 }
1132#endif
1133}
1134
34865134
FB
1135/* enable or disable low levels log */
1136void cpu_set_log(int log_flags)
1137{
1138 loglevel = log_flags;
1139 if (loglevel && !logfile) {
1140 logfile = fopen(logfilename, "w");
1141 if (!logfile) {
1142 perror(logfilename);
1143 _exit(1);
1144 }
9fa3e853
FB
1145#if !defined(CONFIG_SOFTMMU)
1146 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1147 {
1148 static uint8_t logfile_buf[4096];
1149 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1150 }
1151#else
34865134 1152 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1153#endif
34865134
FB
1154 }
1155}
1156
1157void cpu_set_log_filename(const char *filename)
1158{
1159 logfilename = strdup(filename);
1160}
c33a346e 1161
0124311e 1162/* mask must never be zero, except for A20 change call */
68a79315 1163void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1164{
1165 TranslationBlock *tb;
ee8b7021 1166 static int interrupt_lock;
59817ccb 1167
68a79315 1168 env->interrupt_request |= mask;
ea041c0e
FB
1169 /* if the cpu is currently executing code, we must unlink it and
1170 all the potentially executing TB */
1171 tb = env->current_tb;
ee8b7021
FB
1172 if (tb && !testandset(&interrupt_lock)) {
1173 env->current_tb = NULL;
ea041c0e 1174 tb_reset_jump_recursive(tb);
ee8b7021 1175 interrupt_lock = 0;
ea041c0e
FB
1176 }
1177}
1178
b54ad049
FB
1179void cpu_reset_interrupt(CPUState *env, int mask)
1180{
1181 env->interrupt_request &= ~mask;
1182}
1183
f193c797
FB
1184CPULogItem cpu_log_items[] = {
1185 { CPU_LOG_TB_OUT_ASM, "out_asm",
1186 "show generated host assembly code for each compiled TB" },
1187 { CPU_LOG_TB_IN_ASM, "in_asm",
1188 "show target assembly code for each compiled TB" },
1189 { CPU_LOG_TB_OP, "op",
1190 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1191#ifdef TARGET_I386
1192 { CPU_LOG_TB_OP_OPT, "op_opt",
1193 "show micro ops after optimization for each compiled TB" },
1194#endif
1195 { CPU_LOG_INT, "int",
1196 "show interrupts/exceptions in short format" },
1197 { CPU_LOG_EXEC, "exec",
1198 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1199 { CPU_LOG_TB_CPU, "cpu",
1200 "show CPU state before bloc translation" },
f193c797
FB
1201#ifdef TARGET_I386
1202 { CPU_LOG_PCALL, "pcall",
1203 "show protected mode far calls/returns/exceptions" },
1204#endif
fd872598
FB
1205 { CPU_LOG_IOPORT, "ioport",
1206 "show all i/o ports accesses" },
f193c797
FB
1207 { 0, NULL, NULL },
1208};
1209
1210static int cmp1(const char *s1, int n, const char *s2)
1211{
1212 if (strlen(s2) != n)
1213 return 0;
1214 return memcmp(s1, s2, n) == 0;
1215}
1216
1217/* takes a comma separated list of log masks. Return 0 if error. */
1218int cpu_str_to_log_mask(const char *str)
1219{
1220 CPULogItem *item;
1221 int mask;
1222 const char *p, *p1;
1223
1224 p = str;
1225 mask = 0;
1226 for(;;) {
1227 p1 = strchr(p, ',');
1228 if (!p1)
1229 p1 = p + strlen(p);
1230 for(item = cpu_log_items; item->mask != 0; item++) {
1231 if (cmp1(p, p1 - p, item->name))
1232 goto found;
1233 }
1234 return 0;
1235 found:
1236 mask |= item->mask;
1237 if (*p1 != ',')
1238 break;
1239 p = p1 + 1;
1240 }
1241 return mask;
1242}
ea041c0e 1243
7501267e
FB
1244void cpu_abort(CPUState *env, const char *fmt, ...)
1245{
1246 va_list ap;
1247
1248 va_start(ap, fmt);
1249 fprintf(stderr, "qemu: fatal: ");
1250 vfprintf(stderr, fmt, ap);
1251 fprintf(stderr, "\n");
1252#ifdef TARGET_I386
1253 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1254#endif
1255 va_end(ap);
1256 abort();
1257}
1258
0124311e
FB
1259#if !defined(CONFIG_USER_ONLY)
1260
ee8b7021
FB
1261/* NOTE: if flush_global is true, also flush global entries (not
1262 implemented yet) */
1263void tlb_flush(CPUState *env, int flush_global)
33417e70 1264{
33417e70 1265 int i;
0124311e 1266
9fa3e853
FB
1267#if defined(DEBUG_TLB)
1268 printf("tlb_flush:\n");
1269#endif
0124311e
FB
1270 /* must reset current TB so that interrupts cannot modify the
1271 links while we are modifying them */
1272 env->current_tb = NULL;
1273
33417e70
FB
1274 for(i = 0; i < CPU_TLB_SIZE; i++) {
1275 env->tlb_read[0][i].address = -1;
1276 env->tlb_write[0][i].address = -1;
1277 env->tlb_read[1][i].address = -1;
1278 env->tlb_write[1][i].address = -1;
1279 }
9fa3e853
FB
1280
1281 virt_page_flush();
1282 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1283 tb_hash[i] = NULL;
1284
1285#if !defined(CONFIG_SOFTMMU)
1286 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1287#endif
33417e70
FB
1288}
1289
274da6b2 1290static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50
FB
1291{
1292 if (addr == (tlb_entry->address &
1293 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1294 tlb_entry->address = -1;
1295}
1296
2e12669a 1297void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1298{
9fa3e853
FB
1299 int i, n;
1300 VirtPageDesc *vp;
1301 PageDesc *p;
1302 TranslationBlock *tb;
0124311e 1303
9fa3e853
FB
1304#if defined(DEBUG_TLB)
1305 printf("tlb_flush_page: 0x%08x\n", addr);
1306#endif
0124311e
FB
1307 /* must reset current TB so that interrupts cannot modify the
1308 links while we are modifying them */
1309 env->current_tb = NULL;
61382a50
FB
1310
1311 addr &= TARGET_PAGE_MASK;
1312 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1313 tlb_flush_entry(&env->tlb_read[0][i], addr);
1314 tlb_flush_entry(&env->tlb_write[0][i], addr);
1315 tlb_flush_entry(&env->tlb_read[1][i], addr);
1316 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1317
9fa3e853
FB
1318 /* remove from the virtual pc hash table all the TB at this
1319 virtual address */
1320
1321 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1322 if (vp && vp->valid_tag == virt_valid_tag) {
1323 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1324 if (p) {
1325 /* we remove all the links to the TBs in this virtual page */
1326 tb = p->first_tb;
1327 while (tb != NULL) {
1328 n = (long)tb & 3;
1329 tb = (TranslationBlock *)((long)tb & ~3);
1330 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1331 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1332 tb_invalidate(tb);
1333 }
1334 tb = tb->page_next[n];
1335 }
1336 }
98857888 1337 vp->valid_tag = 0;
9fa3e853
FB
1338 }
1339
0124311e 1340#if !defined(CONFIG_SOFTMMU)
9fa3e853 1341 if (addr < MMAP_AREA_END)
0124311e 1342 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1343#endif
9fa3e853
FB
1344}
1345
4f2ac237 1346static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
9fa3e853
FB
1347{
1348 if (addr == (tlb_entry->address &
1349 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
98857888
FB
1350 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1351 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1ccde1cb 1352 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
9fa3e853
FB
1353 }
1354}
1355
1356/* update the TLBs so that writes to code in the virtual page 'addr'
1357 can be detected */
4f2ac237 1358static void tlb_protect_code(CPUState *env, target_ulong addr)
9fa3e853
FB
1359{
1360 int i;
1361
1362 addr &= TARGET_PAGE_MASK;
1363 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1364 tlb_protect_code1(&env->tlb_write[0][i], addr);
1365 tlb_protect_code1(&env->tlb_write[1][i], addr);
1366#if !defined(CONFIG_SOFTMMU)
1367 /* NOTE: as we generated the code for this page, it is already at
1368 least readable */
1369 if (addr < MMAP_AREA_END)
1370 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1371#endif
1372}
1373
9fa3e853 1374static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
4f2ac237 1375 unsigned long phys_addr)
9fa3e853
FB
1376{
1377 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1378 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1ccde1cb 1379 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
9fa3e853
FB
1380 }
1381}
1382
1383/* update the TLB so that writes in physical page 'phys_addr' are no longer
1384 tested self modifying code */
4f2ac237 1385static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
9fa3e853
FB
1386{
1387 int i;
1388
1389 phys_addr &= TARGET_PAGE_MASK;
1ccde1cb
FB
1390 phys_addr += (long)phys_ram_base;
1391 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1392 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1393 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1394}
1395
1396static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1397 unsigned long start, unsigned long length)
1398{
1399 unsigned long addr;
1400 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1401 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1402 if ((addr - start) < length) {
1403 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1404 }
1405 }
1406}
1407
1408void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1409{
1410 CPUState *env;
4f2ac237 1411 unsigned long length, start1;
1ccde1cb
FB
1412 int i;
1413
1414 start &= TARGET_PAGE_MASK;
1415 end = TARGET_PAGE_ALIGN(end);
1416
1417 length = end - start;
1418 if (length == 0)
1419 return;
1420 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1421
1422 env = cpu_single_env;
1423 /* we modify the TLB cache so that the dirty bit will be set again
1424 when accessing the range */
59817ccb 1425 start1 = start + (unsigned long)phys_ram_base;
9fa3e853 1426 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb 1427 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
9fa3e853 1428 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb
FB
1429 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1430
1431#if !defined(CONFIG_SOFTMMU)
1432 /* XXX: this is expensive */
1433 {
1434 VirtPageDesc *p;
1435 int j;
1436 target_ulong addr;
1437
1438 for(i = 0; i < L1_SIZE; i++) {
1439 p = l1_virt_map[i];
1440 if (p) {
1441 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1442 for(j = 0; j < L2_SIZE; j++) {
1443 if (p->valid_tag == virt_valid_tag &&
1444 p->phys_addr >= start && p->phys_addr < end &&
1445 (p->prot & PROT_WRITE)) {
1446 if (addr < MMAP_AREA_END) {
1447 mprotect((void *)addr, TARGET_PAGE_SIZE,
1448 p->prot & ~PROT_WRITE);
1449 }
1450 }
1451 addr += TARGET_PAGE_SIZE;
1452 p++;
1453 }
1454 }
1455 }
1456 }
1457#endif
1ccde1cb
FB
1458}
1459
1460static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1461 unsigned long start)
1462{
1463 unsigned long addr;
1464 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1465 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1466 if (addr == start) {
1467 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1468 }
1469 }
1470}
1471
1472/* update the TLB corresponding to virtual page vaddr and phys addr
1473 addr so that it is no longer dirty */
1474static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1475{
1476 CPUState *env = cpu_single_env;
1477 int i;
1478
1479 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1480
1481 addr &= TARGET_PAGE_MASK;
1482 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1483 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1484 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1485}
1486
59817ccb
FB
1487/* add a new TLB entry. At most one entry for a given virtual address
1488 is permitted. Return 0 if OK or 2 if the page could not be mapped
1489 (can only happen in non SOFTMMU mode for I/O pages or pages
1490 conflicting with the host address space). */
2e12669a
FB
1491int tlb_set_page(CPUState *env, target_ulong vaddr,
1492 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1493 int is_user, int is_softmmu)
1494{
92e873b9 1495 PhysPageDesc *p;
4f2ac237 1496 unsigned long pd;
9fa3e853
FB
1497 TranslationBlock *first_tb;
1498 unsigned int index;
4f2ac237
FB
1499 target_ulong address;
1500 unsigned long addend;
9fa3e853
FB
1501 int ret;
1502
92e873b9
FB
1503 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1504 first_tb = NULL;
9fa3e853
FB
1505 if (!p) {
1506 pd = IO_MEM_UNASSIGNED;
9fa3e853 1507 } else {
92e873b9 1508 PageDesc *p1;
9fa3e853 1509 pd = p->phys_offset;
92e873b9
FB
1510 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1511 /* NOTE: we also allocate the page at this stage */
1512 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1513 first_tb = p1->first_tb;
1514 }
9fa3e853
FB
1515 }
1516#if defined(DEBUG_TLB)
1517 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1518 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1519#endif
1520
1521 ret = 0;
1522#if !defined(CONFIG_SOFTMMU)
1523 if (is_softmmu)
1524#endif
1525 {
1526 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1527 /* IO memory case */
1528 address = vaddr | pd;
1529 addend = paddr;
1530 } else {
1531 /* standard memory */
1532 address = vaddr;
1533 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1534 }
1535
1536 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1537 addend -= vaddr;
67b915a5 1538 if (prot & PAGE_READ) {
9fa3e853
FB
1539 env->tlb_read[is_user][index].address = address;
1540 env->tlb_read[is_user][index].addend = addend;
1541 } else {
1542 env->tlb_read[is_user][index].address = -1;
1543 env->tlb_read[is_user][index].addend = -1;
1544 }
67b915a5 1545 if (prot & PAGE_WRITE) {
9fa3e853
FB
1546 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1547 /* ROM: access is ignored (same as unassigned) */
1548 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1549 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1550 } else
1551 /* XXX: the PowerPC code seems not ready to handle
1552 self modifying code with DCBI */
1553#if defined(TARGET_HAS_SMC) || 1
1554 if (first_tb) {
9fa3e853
FB
1555 /* if code is present, we use a specific memory
1556 handler. It works only for physical memory access */
1557 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1ccde1cb 1558 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1559 } else
1560#endif
1561 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb
FB
1562 !cpu_physical_memory_is_dirty(pd)) {
1563 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1564 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1565 } else {
1566 env->tlb_write[is_user][index].address = address;
1567 env->tlb_write[is_user][index].addend = addend;
1568 }
1569 } else {
1570 env->tlb_write[is_user][index].address = -1;
1571 env->tlb_write[is_user][index].addend = -1;
1572 }
1573 }
1574#if !defined(CONFIG_SOFTMMU)
1575 else {
1576 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1577 /* IO access: no mapping is done as it will be handled by the
1578 soft MMU */
1579 if (!(env->hflags & HF_SOFTMMU_MASK))
1580 ret = 2;
1581 } else {
1582 void *map_addr;
59817ccb
FB
1583
1584 if (vaddr >= MMAP_AREA_END) {
1585 ret = 2;
1586 } else {
1587 if (prot & PROT_WRITE) {
1588 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1589#if defined(TARGET_HAS_SMC) || 1
59817ccb 1590 first_tb ||
d720b93d 1591#endif
59817ccb
FB
1592 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1593 !cpu_physical_memory_is_dirty(pd))) {
1594 /* ROM: we do as if code was inside */
1595 /* if code is present, we only map as read only and save the
1596 original mapping */
1597 VirtPageDesc *vp;
1598
1599 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1600 vp->phys_addr = pd;
1601 vp->prot = prot;
1602 vp->valid_tag = virt_valid_tag;
1603 prot &= ~PAGE_WRITE;
1604 }
1605 }
1606 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1607 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1608 if (map_addr == MAP_FAILED) {
1609 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1610 paddr, vaddr);
9fa3e853 1611 }
9fa3e853
FB
1612 }
1613 }
1614 }
1615#endif
1616 return ret;
1617}
1618
1619/* called from signal handler: invalidate the code and unprotect the
1620 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1621int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1622{
1623#if !defined(CONFIG_SOFTMMU)
1624 VirtPageDesc *vp;
1625
1626#if defined(DEBUG_TLB)
1627 printf("page_unprotect: addr=0x%08x\n", addr);
1628#endif
1629 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1630
1631 /* if it is not mapped, no need to worry here */
1632 if (addr >= MMAP_AREA_END)
1633 return 0;
9fa3e853
FB
1634 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1635 if (!vp)
1636 return 0;
1637 /* NOTE: in this case, validate_tag is _not_ tested as it
1638 validates only the code TLB */
1639 if (vp->valid_tag != virt_valid_tag)
1640 return 0;
1641 if (!(vp->prot & PAGE_WRITE))
1642 return 0;
1643#if defined(DEBUG_TLB)
1644 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1645 addr, vp->phys_addr, vp->prot);
1646#endif
59817ccb
FB
1647 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1648 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1649 (unsigned long)addr, vp->prot);
d720b93d
FB
1650 /* set the dirty bit */
1651 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1652 /* flush the code inside */
1653 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1654 return 1;
1655#else
1656 return 0;
1657#endif
33417e70
FB
1658}
1659
0124311e
FB
1660#else
1661
ee8b7021 1662void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1663{
1664}
1665
2e12669a 1666void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1667{
1668}
1669
2e12669a
FB
1670int tlb_set_page(CPUState *env, target_ulong vaddr,
1671 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1672 int is_user, int is_softmmu)
1673{
1674 return 0;
1675}
0124311e 1676
9fa3e853
FB
1677/* dump memory mappings */
1678void page_dump(FILE *f)
33417e70 1679{
9fa3e853
FB
1680 unsigned long start, end;
1681 int i, j, prot, prot1;
1682 PageDesc *p;
33417e70 1683
9fa3e853
FB
1684 fprintf(f, "%-8s %-8s %-8s %s\n",
1685 "start", "end", "size", "prot");
1686 start = -1;
1687 end = -1;
1688 prot = 0;
1689 for(i = 0; i <= L1_SIZE; i++) {
1690 if (i < L1_SIZE)
1691 p = l1_map[i];
1692 else
1693 p = NULL;
1694 for(j = 0;j < L2_SIZE; j++) {
1695 if (!p)
1696 prot1 = 0;
1697 else
1698 prot1 = p[j].flags;
1699 if (prot1 != prot) {
1700 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1701 if (start != -1) {
1702 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1703 start, end, end - start,
1704 prot & PAGE_READ ? 'r' : '-',
1705 prot & PAGE_WRITE ? 'w' : '-',
1706 prot & PAGE_EXEC ? 'x' : '-');
1707 }
1708 if (prot1 != 0)
1709 start = end;
1710 else
1711 start = -1;
1712 prot = prot1;
1713 }
1714 if (!p)
1715 break;
1716 }
33417e70 1717 }
33417e70
FB
1718}
1719
9fa3e853 1720int page_get_flags(unsigned long address)
33417e70 1721{
9fa3e853
FB
1722 PageDesc *p;
1723
1724 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1725 if (!p)
9fa3e853
FB
1726 return 0;
1727 return p->flags;
1728}
1729
1730/* modify the flags of a page and invalidate the code if
1731 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1732 depending on PAGE_WRITE */
1733void page_set_flags(unsigned long start, unsigned long end, int flags)
1734{
1735 PageDesc *p;
1736 unsigned long addr;
1737
1738 start = start & TARGET_PAGE_MASK;
1739 end = TARGET_PAGE_ALIGN(end);
1740 if (flags & PAGE_WRITE)
1741 flags |= PAGE_WRITE_ORG;
1742 spin_lock(&tb_lock);
1743 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1744 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1745 /* if the write protection is set, then we invalidate the code
1746 inside */
1747 if (!(p->flags & PAGE_WRITE) &&
1748 (flags & PAGE_WRITE) &&
1749 p->first_tb) {
d720b93d 1750 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1751 }
1752 p->flags = flags;
1753 }
1754 spin_unlock(&tb_lock);
33417e70
FB
1755}
1756
9fa3e853
FB
1757/* called from signal handler: invalidate the code and unprotect the
1758 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1759int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1760{
1761 unsigned int page_index, prot, pindex;
1762 PageDesc *p, *p1;
1763 unsigned long host_start, host_end, addr;
1764
83fb7adf 1765 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1766 page_index = host_start >> TARGET_PAGE_BITS;
1767 p1 = page_find(page_index);
1768 if (!p1)
1769 return 0;
83fb7adf 1770 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1771 p = p1;
1772 prot = 0;
1773 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1774 prot |= p->flags;
1775 p++;
1776 }
1777 /* if the page was really writable, then we change its
1778 protection back to writable */
1779 if (prot & PAGE_WRITE_ORG) {
1780 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1781 if (!(p1[pindex].flags & PAGE_WRITE)) {
83fb7adf 1782 mprotect((void *)host_start, qemu_host_page_size,
9fa3e853
FB
1783 (prot & PAGE_BITS) | PAGE_WRITE);
1784 p1[pindex].flags |= PAGE_WRITE;
1785 /* and since the content will be modified, we must invalidate
1786 the corresponding translated code. */
d720b93d 1787 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1788#ifdef DEBUG_TB_CHECK
1789 tb_invalidate_check(address);
1790#endif
1791 return 1;
1792 }
1793 }
1794 return 0;
1795}
1796
1797/* call this function when system calls directly modify a memory area */
1798void page_unprotect_range(uint8_t *data, unsigned long data_size)
1799{
1800 unsigned long start, end, addr;
1801
1802 start = (unsigned long)data;
1803 end = start + data_size;
1804 start &= TARGET_PAGE_MASK;
1805 end = TARGET_PAGE_ALIGN(end);
1806 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1807 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1808 }
1809}
1810
1ccde1cb
FB
1811static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1812{
1813}
9fa3e853
FB
1814#endif /* defined(CONFIG_USER_ONLY) */
1815
33417e70
FB
1816/* register physical memory. 'size' must be a multiple of the target
1817 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1818 io memory page */
2e12669a
FB
1819void cpu_register_physical_memory(target_phys_addr_t start_addr,
1820 unsigned long size,
1821 unsigned long phys_offset)
33417e70
FB
1822{
1823 unsigned long addr, end_addr;
92e873b9 1824 PhysPageDesc *p;
33417e70 1825
5fd386f6 1826 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1827 end_addr = start_addr + size;
5fd386f6 1828 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
92e873b9 1829 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1830 p->phys_offset = phys_offset;
1831 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1832 phys_offset += TARGET_PAGE_SIZE;
1833 }
1834}
1835
a4193c8a 1836static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1837{
1838 return 0;
1839}
1840
a4193c8a 1841static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1842{
1843}
1844
1845static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1846 unassigned_mem_readb,
1847 unassigned_mem_readb,
1848 unassigned_mem_readb,
1849};
1850
1851static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1852 unassigned_mem_writeb,
1853 unassigned_mem_writeb,
1854 unassigned_mem_writeb,
1855};
1856
9fa3e853
FB
1857/* self modifying code support in soft mmu mode : writing to a page
1858 containing code comes to these functions */
1859
a4193c8a 1860static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1861{
1ccde1cb
FB
1862 unsigned long phys_addr;
1863
274da6b2 1864 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1865#if !defined(CONFIG_USER_ONLY)
d720b93d 1866 tb_invalidate_phys_page_fast(phys_addr, 1);
9fa3e853 1867#endif
1ccde1cb
FB
1868 stb_raw((uint8_t *)addr, val);
1869 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1870}
1871
a4193c8a 1872static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1873{
1ccde1cb
FB
1874 unsigned long phys_addr;
1875
274da6b2 1876 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1877#if !defined(CONFIG_USER_ONLY)
d720b93d 1878 tb_invalidate_phys_page_fast(phys_addr, 2);
9fa3e853 1879#endif
1ccde1cb
FB
1880 stw_raw((uint8_t *)addr, val);
1881 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1882}
1883
a4193c8a 1884static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1885{
1ccde1cb
FB
1886 unsigned long phys_addr;
1887
274da6b2 1888 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1889#if !defined(CONFIG_USER_ONLY)
d720b93d 1890 tb_invalidate_phys_page_fast(phys_addr, 4);
9fa3e853 1891#endif
1ccde1cb
FB
1892 stl_raw((uint8_t *)addr, val);
1893 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1894}
1895
1896static CPUReadMemoryFunc *code_mem_read[3] = {
1897 NULL, /* never used */
1898 NULL, /* never used */
1899 NULL, /* never used */
1900};
1901
1902static CPUWriteMemoryFunc *code_mem_write[3] = {
1903 code_mem_writeb,
1904 code_mem_writew,
1905 code_mem_writel,
1906};
33417e70 1907
a4193c8a 1908static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1909{
1910 stb_raw((uint8_t *)addr, val);
d720b93d 1911 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1912}
1913
a4193c8a 1914static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1915{
1916 stw_raw((uint8_t *)addr, val);
d720b93d 1917 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1918}
1919
a4193c8a 1920static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1921{
1922 stl_raw((uint8_t *)addr, val);
d720b93d 1923 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1924}
1925
1926static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1927 notdirty_mem_writeb,
1928 notdirty_mem_writew,
1929 notdirty_mem_writel,
1930};
1931
33417e70
FB
1932static void io_mem_init(void)
1933{
a4193c8a
FB
1934 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1935 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1936 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1937 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1938 io_mem_nb = 5;
1939
1940 /* alloc dirty bits array */
59817ccb 1941 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1942}
1943
1944/* mem_read and mem_write are arrays of functions containing the
1945 function to access byte (index 0), word (index 1) and dword (index
1946 2). All functions must be supplied. If io_index is non zero, the
1947 corresponding io zone is modified. If it is zero, a new io zone is
1948 allocated. The return value can be used with
1949 cpu_register_physical_memory(). (-1) is returned if error. */
1950int cpu_register_io_memory(int io_index,
1951 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1952 CPUWriteMemoryFunc **mem_write,
1953 void *opaque)
33417e70
FB
1954{
1955 int i;
1956
1957 if (io_index <= 0) {
1958 if (io_index >= IO_MEM_NB_ENTRIES)
1959 return -1;
1960 io_index = io_mem_nb++;
1961 } else {
1962 if (io_index >= IO_MEM_NB_ENTRIES)
1963 return -1;
1964 }
1965
1966 for(i = 0;i < 3; i++) {
1967 io_mem_read[io_index][i] = mem_read[i];
1968 io_mem_write[io_index][i] = mem_write[i];
1969 }
a4193c8a 1970 io_mem_opaque[io_index] = opaque;
33417e70
FB
1971 return io_index << IO_MEM_SHIFT;
1972}
61382a50 1973
13eb76e0
FB
1974/* physical memory access (slow version, mainly for debug) */
1975#if defined(CONFIG_USER_ONLY)
2e12669a 1976void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1977 int len, int is_write)
1978{
1979 int l, flags;
1980 target_ulong page;
1981
1982 while (len > 0) {
1983 page = addr & TARGET_PAGE_MASK;
1984 l = (page + TARGET_PAGE_SIZE) - addr;
1985 if (l > len)
1986 l = len;
1987 flags = page_get_flags(page);
1988 if (!(flags & PAGE_VALID))
1989 return;
1990 if (is_write) {
1991 if (!(flags & PAGE_WRITE))
1992 return;
1993 memcpy((uint8_t *)addr, buf, len);
1994 } else {
1995 if (!(flags & PAGE_READ))
1996 return;
1997 memcpy(buf, (uint8_t *)addr, len);
1998 }
1999 len -= l;
2000 buf += l;
2001 addr += l;
2002 }
2003}
2004#else
2e12669a 2005void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2006 int len, int is_write)
2007{
2008 int l, io_index;
2009 uint8_t *ptr;
2010 uint32_t val;
2e12669a
FB
2011 target_phys_addr_t page;
2012 unsigned long pd;
92e873b9 2013 PhysPageDesc *p;
13eb76e0
FB
2014
2015 while (len > 0) {
2016 page = addr & TARGET_PAGE_MASK;
2017 l = (page + TARGET_PAGE_SIZE) - addr;
2018 if (l > len)
2019 l = len;
92e873b9 2020 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2021 if (!p) {
2022 pd = IO_MEM_UNASSIGNED;
2023 } else {
2024 pd = p->phys_offset;
2025 }
2026
2027 if (is_write) {
2028 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2029 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2030 if (l >= 4 && ((addr & 3) == 0)) {
2031 /* 32 bit read access */
2032 val = ldl_raw(buf);
a4193c8a 2033 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2034 l = 4;
2035 } else if (l >= 2 && ((addr & 1) == 0)) {
2036 /* 16 bit read access */
2037 val = lduw_raw(buf);
a4193c8a 2038 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2039 l = 2;
2040 } else {
2041 /* 8 bit access */
2042 val = ldub_raw(buf);
a4193c8a 2043 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2044 l = 1;
2045 }
2046 } else {
b448f2f3
FB
2047 unsigned long addr1;
2048 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2049 /* RAM case */
b448f2f3 2050 ptr = phys_ram_base + addr1;
13eb76e0 2051 memcpy(ptr, buf, l);
b448f2f3
FB
2052 /* invalidate code */
2053 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2054 /* set dirty bit */
2055 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
13eb76e0
FB
2056 }
2057 } else {
2058 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2059 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2060 /* I/O case */
2061 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2062 if (l >= 4 && ((addr & 3) == 0)) {
2063 /* 32 bit read access */
a4193c8a 2064 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
13eb76e0
FB
2065 stl_raw(buf, val);
2066 l = 4;
2067 } else if (l >= 2 && ((addr & 1) == 0)) {
2068 /* 16 bit read access */
a4193c8a 2069 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
13eb76e0
FB
2070 stw_raw(buf, val);
2071 l = 2;
2072 } else {
2073 /* 8 bit access */
a4193c8a 2074 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
13eb76e0
FB
2075 stb_raw(buf, val);
2076 l = 1;
2077 }
2078 } else {
2079 /* RAM case */
2080 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2081 (addr & ~TARGET_PAGE_MASK);
2082 memcpy(buf, ptr, l);
2083 }
2084 }
2085 len -= l;
2086 buf += l;
2087 addr += l;
2088 }
2089}
2090#endif
2091
2092/* virtual memory access for debug */
b448f2f3
FB
2093int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2094 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2095{
2096 int l;
2097 target_ulong page, phys_addr;
2098
2099 while (len > 0) {
2100 page = addr & TARGET_PAGE_MASK;
2101 phys_addr = cpu_get_phys_page_debug(env, page);
2102 /* if no physical page mapped, return an error */
2103 if (phys_addr == -1)
2104 return -1;
2105 l = (page + TARGET_PAGE_SIZE) - addr;
2106 if (l > len)
2107 l = len;
b448f2f3
FB
2108 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2109 buf, l, is_write);
13eb76e0
FB
2110 len -= l;
2111 buf += l;
2112 addr += l;
2113 }
2114 return 0;
2115}
2116
61382a50
FB
2117#if !defined(CONFIG_USER_ONLY)
2118
2119#define MMUSUFFIX _cmmu
2120#define GETPC() NULL
2121#define env cpu_single_env
2122
2123#define SHIFT 0
2124#include "softmmu_template.h"
2125
2126#define SHIFT 1
2127#include "softmmu_template.h"
2128
2129#define SHIFT 2
2130#include "softmmu_template.h"
2131
2132#define SHIFT 3
2133#include "softmmu_template.h"
2134
2135#undef env
2136
2137#endif