]> git.proxmox.com Git - qemu.git/blame - exec.c
SDL Audio support and SB16 fixes (malc)
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
24#include <sys/mman.h>
25#endif
54936004
FB
26#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
6180a181
FB
34#include "cpu.h"
35#include "exec-all.h"
54936004 36
fd6ce8f6 37//#define DEBUG_TB_INVALIDATE
66e85a21 38//#define DEBUG_FLUSH
9fa3e853 39//#define DEBUG_TLB
fd6ce8f6
FB
40
41/* make various TB consistency checks */
42//#define DEBUG_TB_CHECK
98857888 43//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
44
45/* threshold to flush the translated code buffer */
46#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
47
9fa3e853
FB
48#define SMC_BITMAP_USE_THRESHOLD 10
49
50#define MMAP_AREA_START 0x00000000
51#define MMAP_AREA_END 0xa8000000
fd6ce8f6
FB
52
53TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
54TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
9fa3e853 55TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 56int nb_tbs;
eb51d102
FB
57/* any access to the tbs or the page table must use this lock */
58spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6
FB
59
60uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
61uint8_t *code_gen_ptr;
62
9fa3e853
FB
63int phys_ram_size;
64int phys_ram_fd;
65uint8_t *phys_ram_base;
1ccde1cb 66uint8_t *phys_ram_dirty;
9fa3e853 67
54936004 68typedef struct PageDesc {
92e873b9 69 /* list of TBs intersecting this ram page */
fd6ce8f6 70 TranslationBlock *first_tb;
9fa3e853
FB
71 /* in order to optimize self modifying code, we count the number
72 of lookups we do to a given page to use a bitmap */
73 unsigned int code_write_count;
74 uint8_t *code_bitmap;
75#if defined(CONFIG_USER_ONLY)
76 unsigned long flags;
77#endif
54936004
FB
78} PageDesc;
79
92e873b9
FB
80typedef struct PhysPageDesc {
81 /* offset in host memory of the page + io_index in the low 12 bits */
82 unsigned long phys_offset;
83} PhysPageDesc;
84
9fa3e853
FB
85typedef struct VirtPageDesc {
86 /* physical address of code page. It is valid only if 'valid_tag'
87 matches 'virt_valid_tag' */
88 target_ulong phys_addr;
89 unsigned int valid_tag;
90#if !defined(CONFIG_SOFTMMU)
91 /* original page access rights. It is valid only if 'valid_tag'
92 matches 'virt_valid_tag' */
93 unsigned int prot;
94#endif
95} VirtPageDesc;
96
54936004
FB
97#define L2_BITS 10
98#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
99
100#define L1_SIZE (1 << L1_BITS)
101#define L2_SIZE (1 << L2_BITS)
102
33417e70 103static void io_mem_init(void);
fd6ce8f6 104
83fb7adf
FB
105unsigned long qemu_real_host_page_size;
106unsigned long qemu_host_page_bits;
107unsigned long qemu_host_page_size;
108unsigned long qemu_host_page_mask;
54936004 109
92e873b9 110/* XXX: for system emulation, it could just be an array */
54936004 111static PageDesc *l1_map[L1_SIZE];
92e873b9 112static PhysPageDesc *l1_phys_map[L1_SIZE];
54936004 113
9fa3e853
FB
114#if !defined(CONFIG_USER_ONLY)
115static VirtPageDesc *l1_virt_map[L1_SIZE];
116static unsigned int virt_valid_tag;
117#endif
118
33417e70 119/* io memory support */
33417e70
FB
120CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
121CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 122void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
123static int io_mem_nb;
124
34865134
FB
125/* log support */
126char *logfilename = "/tmp/qemu.log";
127FILE *logfile;
128int loglevel;
129
b346ff46 130static void page_init(void)
54936004 131{
83fb7adf 132 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 133 TARGET_PAGE_SIZE */
67b915a5 134#ifdef _WIN32
d5a8f07c
FB
135 {
136 SYSTEM_INFO system_info;
137 DWORD old_protect;
138
139 GetSystemInfo(&system_info);
140 qemu_real_host_page_size = system_info.dwPageSize;
141
142 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
143 PAGE_EXECUTE_READWRITE, &old_protect);
144 }
67b915a5 145#else
83fb7adf 146 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
147 {
148 unsigned long start, end;
149
150 start = (unsigned long)code_gen_buffer;
151 start &= ~(qemu_real_host_page_size - 1);
152
153 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
154 end += qemu_real_host_page_size - 1;
155 end &= ~(qemu_real_host_page_size - 1);
156
157 mprotect((void *)start, end - start,
158 PROT_READ | PROT_WRITE | PROT_EXEC);
159 }
67b915a5 160#endif
d5a8f07c 161
83fb7adf
FB
162 if (qemu_host_page_size == 0)
163 qemu_host_page_size = qemu_real_host_page_size;
164 if (qemu_host_page_size < TARGET_PAGE_SIZE)
165 qemu_host_page_size = TARGET_PAGE_SIZE;
166 qemu_host_page_bits = 0;
167 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
168 qemu_host_page_bits++;
169 qemu_host_page_mask = ~(qemu_host_page_size - 1);
9fa3e853
FB
170#if !defined(CONFIG_USER_ONLY)
171 virt_valid_tag = 1;
172#endif
54936004
FB
173}
174
fd6ce8f6 175static inline PageDesc *page_find_alloc(unsigned int index)
54936004 176{
54936004
FB
177 PageDesc **lp, *p;
178
54936004
FB
179 lp = &l1_map[index >> L2_BITS];
180 p = *lp;
181 if (!p) {
182 /* allocate if not found */
59817ccb 183 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 184 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
185 *lp = p;
186 }
187 return p + (index & (L2_SIZE - 1));
188}
189
fd6ce8f6 190static inline PageDesc *page_find(unsigned int index)
54936004 191{
54936004
FB
192 PageDesc *p;
193
54936004
FB
194 p = l1_map[index >> L2_BITS];
195 if (!p)
196 return 0;
fd6ce8f6
FB
197 return p + (index & (L2_SIZE - 1));
198}
199
92e873b9
FB
200static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
201{
202 PhysPageDesc **lp, *p;
203
204 lp = &l1_phys_map[index >> L2_BITS];
205 p = *lp;
206 if (!p) {
207 /* allocate if not found */
208 p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
209 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
210 *lp = p;
211 }
212 return p + (index & (L2_SIZE - 1));
213}
214
215static inline PhysPageDesc *phys_page_find(unsigned int index)
216{
217 PhysPageDesc *p;
218
219 p = l1_phys_map[index >> L2_BITS];
220 if (!p)
221 return 0;
222 return p + (index & (L2_SIZE - 1));
223}
224
9fa3e853 225#if !defined(CONFIG_USER_ONLY)
4f2ac237
FB
226static void tlb_protect_code(CPUState *env, target_ulong addr);
227static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
9fa3e853
FB
228
229static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
fd6ce8f6 230{
9fa3e853 231 VirtPageDesc **lp, *p;
fd6ce8f6 232
9fa3e853
FB
233 lp = &l1_virt_map[index >> L2_BITS];
234 p = *lp;
235 if (!p) {
236 /* allocate if not found */
59817ccb 237 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
9fa3e853
FB
238 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
239 *lp = p;
240 }
241 return p + (index & (L2_SIZE - 1));
242}
243
244static inline VirtPageDesc *virt_page_find(unsigned int index)
245{
246 VirtPageDesc *p;
247
248 p = l1_virt_map[index >> L2_BITS];
fd6ce8f6
FB
249 if (!p)
250 return 0;
9fa3e853 251 return p + (index & (L2_SIZE - 1));
54936004
FB
252}
253
9fa3e853 254static void virt_page_flush(void)
54936004 255{
9fa3e853
FB
256 int i, j;
257 VirtPageDesc *p;
258
259 virt_valid_tag++;
260
261 if (virt_valid_tag == 0) {
262 virt_valid_tag = 1;
263 for(i = 0; i < L1_SIZE; i++) {
264 p = l1_virt_map[i];
265 if (p) {
266 for(j = 0; j < L2_SIZE; j++)
267 p[j].valid_tag = 0;
268 }
fd6ce8f6 269 }
54936004
FB
270 }
271}
9fa3e853
FB
272#else
273static void virt_page_flush(void)
274{
275}
276#endif
fd6ce8f6 277
b346ff46 278void cpu_exec_init(void)
fd6ce8f6
FB
279{
280 if (!code_gen_ptr) {
281 code_gen_ptr = code_gen_buffer;
b346ff46 282 page_init();
33417e70 283 io_mem_init();
fd6ce8f6
FB
284 }
285}
286
9fa3e853
FB
287static inline void invalidate_page_bitmap(PageDesc *p)
288{
289 if (p->code_bitmap) {
59817ccb 290 qemu_free(p->code_bitmap);
9fa3e853
FB
291 p->code_bitmap = NULL;
292 }
293 p->code_write_count = 0;
294}
295
fd6ce8f6
FB
296/* set to NULL all the 'first_tb' fields in all PageDescs */
297static void page_flush_tb(void)
298{
299 int i, j;
300 PageDesc *p;
301
302 for(i = 0; i < L1_SIZE; i++) {
303 p = l1_map[i];
304 if (p) {
9fa3e853
FB
305 for(j = 0; j < L2_SIZE; j++) {
306 p->first_tb = NULL;
307 invalidate_page_bitmap(p);
308 p++;
309 }
fd6ce8f6
FB
310 }
311 }
312}
313
314/* flush all the translation blocks */
d4e8164f 315/* XXX: tb_flush is currently not thread safe */
0124311e 316void tb_flush(CPUState *env)
fd6ce8f6 317{
0124311e 318#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
319 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
320 code_gen_ptr - code_gen_buffer,
321 nb_tbs,
0124311e 322 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
323#endif
324 nb_tbs = 0;
8a8a608f 325 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
9fa3e853
FB
326 virt_page_flush();
327
8a8a608f 328 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 329 page_flush_tb();
9fa3e853 330
fd6ce8f6 331 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
332 /* XXX: flush processor icache at this point if cache flush is
333 expensive */
fd6ce8f6
FB
334}
335
336#ifdef DEBUG_TB_CHECK
337
338static void tb_invalidate_check(unsigned long address)
339{
340 TranslationBlock *tb;
341 int i;
342 address &= TARGET_PAGE_MASK;
343 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
344 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
345 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
346 address >= tb->pc + tb->size)) {
347 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
348 address, tb->pc, tb->size);
349 }
350 }
351 }
352}
353
354/* verify that all the pages have correct rights for code */
355static void tb_page_check(void)
356{
357 TranslationBlock *tb;
358 int i, flags1, flags2;
359
360 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
361 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
362 flags1 = page_get_flags(tb->pc);
363 flags2 = page_get_flags(tb->pc + tb->size - 1);
364 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
365 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
366 tb->pc, tb->size, flags1, flags2);
367 }
368 }
369 }
370}
371
d4e8164f
FB
372void tb_jmp_check(TranslationBlock *tb)
373{
374 TranslationBlock *tb1;
375 unsigned int n1;
376
377 /* suppress any remaining jumps to this TB */
378 tb1 = tb->jmp_first;
379 for(;;) {
380 n1 = (long)tb1 & 3;
381 tb1 = (TranslationBlock *)((long)tb1 & ~3);
382 if (n1 == 2)
383 break;
384 tb1 = tb1->jmp_next[n1];
385 }
386 /* check end of list */
387 if (tb1 != tb) {
388 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
389 }
390}
391
fd6ce8f6
FB
392#endif
393
394/* invalidate one TB */
395static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
396 int next_offset)
397{
398 TranslationBlock *tb1;
399 for(;;) {
400 tb1 = *ptb;
401 if (tb1 == tb) {
402 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
403 break;
404 }
405 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
406 }
407}
408
9fa3e853
FB
409static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
410{
411 TranslationBlock *tb1;
412 unsigned int n1;
413
414 for(;;) {
415 tb1 = *ptb;
416 n1 = (long)tb1 & 3;
417 tb1 = (TranslationBlock *)((long)tb1 & ~3);
418 if (tb1 == tb) {
419 *ptb = tb1->page_next[n1];
420 break;
421 }
422 ptb = &tb1->page_next[n1];
423 }
424}
425
d4e8164f
FB
426static inline void tb_jmp_remove(TranslationBlock *tb, int n)
427{
428 TranslationBlock *tb1, **ptb;
429 unsigned int n1;
430
431 ptb = &tb->jmp_next[n];
432 tb1 = *ptb;
433 if (tb1) {
434 /* find tb(n) in circular list */
435 for(;;) {
436 tb1 = *ptb;
437 n1 = (long)tb1 & 3;
438 tb1 = (TranslationBlock *)((long)tb1 & ~3);
439 if (n1 == n && tb1 == tb)
440 break;
441 if (n1 == 2) {
442 ptb = &tb1->jmp_first;
443 } else {
444 ptb = &tb1->jmp_next[n1];
445 }
446 }
447 /* now we can suppress tb(n) from the list */
448 *ptb = tb->jmp_next[n];
449
450 tb->jmp_next[n] = NULL;
451 }
452}
453
454/* reset the jump entry 'n' of a TB so that it is not chained to
455 another TB */
456static inline void tb_reset_jump(TranslationBlock *tb, int n)
457{
458 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
459}
460
9fa3e853 461static inline void tb_invalidate(TranslationBlock *tb)
fd6ce8f6 462{
d4e8164f 463 unsigned int h, n1;
9fa3e853 464 TranslationBlock *tb1, *tb2, **ptb;
d4e8164f 465
36bdbe54 466 tb_invalidated_flag = 1;
59817ccb 467
fd6ce8f6
FB
468 /* remove the TB from the hash list */
469 h = tb_hash_func(tb->pc);
9fa3e853
FB
470 ptb = &tb_hash[h];
471 for(;;) {
472 tb1 = *ptb;
473 /* NOTE: the TB is not necessarily linked in the hash. It
474 indicates that it is not currently used */
475 if (tb1 == NULL)
476 return;
477 if (tb1 == tb) {
478 *ptb = tb1->hash_next;
479 break;
480 }
481 ptb = &tb1->hash_next;
fd6ce8f6 482 }
d4e8164f
FB
483
484 /* suppress this TB from the two jump lists */
485 tb_jmp_remove(tb, 0);
486 tb_jmp_remove(tb, 1);
487
488 /* suppress any remaining jumps to this TB */
489 tb1 = tb->jmp_first;
490 for(;;) {
491 n1 = (long)tb1 & 3;
492 if (n1 == 2)
493 break;
494 tb1 = (TranslationBlock *)((long)tb1 & ~3);
495 tb2 = tb1->jmp_next[n1];
496 tb_reset_jump(tb1, n1);
497 tb1->jmp_next[n1] = NULL;
498 tb1 = tb2;
499 }
500 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
fd6ce8f6
FB
501}
502
9fa3e853 503static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 504{
fd6ce8f6 505 PageDesc *p;
9fa3e853
FB
506 unsigned int h;
507 target_ulong phys_pc;
508
509 /* remove the TB from the hash list */
510 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
511 h = tb_phys_hash_func(phys_pc);
512 tb_remove(&tb_phys_hash[h], tb,
513 offsetof(TranslationBlock, phys_hash_next));
514
515 /* remove the TB from the page list */
516 if (tb->page_addr[0] != page_addr) {
517 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
518 tb_page_remove(&p->first_tb, tb);
519 invalidate_page_bitmap(p);
520 }
521 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
522 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
523 tb_page_remove(&p->first_tb, tb);
524 invalidate_page_bitmap(p);
525 }
526
527 tb_invalidate(tb);
528}
529
530static inline void set_bits(uint8_t *tab, int start, int len)
531{
532 int end, mask, end1;
533
534 end = start + len;
535 tab += start >> 3;
536 mask = 0xff << (start & 7);
537 if ((start & ~7) == (end & ~7)) {
538 if (start < end) {
539 mask &= ~(0xff << (end & 7));
540 *tab |= mask;
541 }
542 } else {
543 *tab++ |= mask;
544 start = (start + 8) & ~7;
545 end1 = end & ~7;
546 while (start < end1) {
547 *tab++ = 0xff;
548 start += 8;
549 }
550 if (start < end) {
551 mask = ~(0xff << (end & 7));
552 *tab |= mask;
553 }
554 }
555}
556
557static void build_page_bitmap(PageDesc *p)
558{
559 int n, tb_start, tb_end;
560 TranslationBlock *tb;
561
59817ccb 562 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
563 if (!p->code_bitmap)
564 return;
565 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
566
567 tb = p->first_tb;
568 while (tb != NULL) {
569 n = (long)tb & 3;
570 tb = (TranslationBlock *)((long)tb & ~3);
571 /* NOTE: this is subtle as a TB may span two physical pages */
572 if (n == 0) {
573 /* NOTE: tb_end may be after the end of the page, but
574 it is not a problem */
575 tb_start = tb->pc & ~TARGET_PAGE_MASK;
576 tb_end = tb_start + tb->size;
577 if (tb_end > TARGET_PAGE_SIZE)
578 tb_end = TARGET_PAGE_SIZE;
579 } else {
580 tb_start = 0;
581 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
582 }
583 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
584 tb = tb->page_next[n];
585 }
586}
587
d720b93d
FB
588#ifdef TARGET_HAS_PRECISE_SMC
589
590static void tb_gen_code(CPUState *env,
591 target_ulong pc, target_ulong cs_base, int flags,
592 int cflags)
593{
594 TranslationBlock *tb;
595 uint8_t *tc_ptr;
596 target_ulong phys_pc, phys_page2, virt_page2;
597 int code_gen_size;
598
599 phys_pc = get_phys_addr_code(env, (unsigned long)pc);
600 tb = tb_alloc((unsigned long)pc);
601 if (!tb) {
602 /* flush must be done */
603 tb_flush(env);
604 /* cannot fail at this point */
605 tb = tb_alloc((unsigned long)pc);
606 }
607 tc_ptr = code_gen_ptr;
608 tb->tc_ptr = tc_ptr;
609 tb->cs_base = cs_base;
610 tb->flags = flags;
611 tb->cflags = cflags;
612 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
613 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
614
615 /* check next page if needed */
616 virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
617 phys_page2 = -1;
618 if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
619 phys_page2 = get_phys_addr_code(env, virt_page2);
620 }
621 tb_link_phys(tb, phys_pc, phys_page2);
622}
623#endif
624
9fa3e853
FB
625/* invalidate all TBs which intersect with the target physical page
626 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
627 the same physical page. 'is_cpu_write_access' should be true if called
628 from a real cpu write access: the virtual CPU will exit the current
629 TB if code is modified inside this TB. */
630void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
631 int is_cpu_write_access)
632{
633 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 634 CPUState *env = cpu_single_env;
9fa3e853 635 PageDesc *p;
ea1c1802 636 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 637 target_ulong tb_start, tb_end;
d720b93d 638 target_ulong current_pc, current_cs_base;
9fa3e853
FB
639
640 p = page_find(start >> TARGET_PAGE_BITS);
641 if (!p)
642 return;
643 if (!p->code_bitmap &&
d720b93d
FB
644 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
645 is_cpu_write_access) {
9fa3e853
FB
646 /* build code bitmap */
647 build_page_bitmap(p);
648 }
649
650 /* we remove all the TBs in the range [start, end[ */
651 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
652 current_tb_not_found = is_cpu_write_access;
653 current_tb_modified = 0;
654 current_tb = NULL; /* avoid warning */
655 current_pc = 0; /* avoid warning */
656 current_cs_base = 0; /* avoid warning */
657 current_flags = 0; /* avoid warning */
9fa3e853
FB
658 tb = p->first_tb;
659 while (tb != NULL) {
660 n = (long)tb & 3;
661 tb = (TranslationBlock *)((long)tb & ~3);
662 tb_next = tb->page_next[n];
663 /* NOTE: this is subtle as a TB may span two physical pages */
664 if (n == 0) {
665 /* NOTE: tb_end may be after the end of the page, but
666 it is not a problem */
667 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
668 tb_end = tb_start + tb->size;
669 } else {
670 tb_start = tb->page_addr[1];
671 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
672 }
673 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
674#ifdef TARGET_HAS_PRECISE_SMC
675 if (current_tb_not_found) {
676 current_tb_not_found = 0;
677 current_tb = NULL;
678 if (env->mem_write_pc) {
679 /* now we have a real cpu fault */
680 current_tb = tb_find_pc(env->mem_write_pc);
681 }
682 }
683 if (current_tb == tb &&
684 !(current_tb->cflags & CF_SINGLE_INSN)) {
685 /* If we are modifying the current TB, we must stop
686 its execution. We could be more precise by checking
687 that the modification is after the current PC, but it
688 would require a specialized function to partially
689 restore the CPU state */
690
691 current_tb_modified = 1;
692 cpu_restore_state(current_tb, env,
693 env->mem_write_pc, NULL);
694#if defined(TARGET_I386)
695 current_flags = env->hflags;
696 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
697 current_cs_base = (target_ulong)env->segs[R_CS].base;
698 current_pc = current_cs_base + env->eip;
699#else
700#error unsupported CPU
701#endif
702 }
703#endif /* TARGET_HAS_PRECISE_SMC */
ea1c1802
FB
704 saved_tb = env->current_tb;
705 env->current_tb = NULL;
9fa3e853 706 tb_phys_invalidate(tb, -1);
ea1c1802
FB
707 env->current_tb = saved_tb;
708 if (env->interrupt_request && env->current_tb)
709 cpu_interrupt(env, env->interrupt_request);
9fa3e853
FB
710 }
711 tb = tb_next;
712 }
713#if !defined(CONFIG_USER_ONLY)
714 /* if no code remaining, no need to continue to use slow writes */
715 if (!p->first_tb) {
716 invalidate_page_bitmap(p);
d720b93d
FB
717 if (is_cpu_write_access) {
718 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
719 }
720 }
721#endif
722#ifdef TARGET_HAS_PRECISE_SMC
723 if (current_tb_modified) {
724 /* we generate a block containing just the instruction
725 modifying the memory. It will ensure that it cannot modify
726 itself */
ea1c1802 727 env->current_tb = NULL;
d720b93d
FB
728 tb_gen_code(env, current_pc, current_cs_base, current_flags,
729 CF_SINGLE_INSN);
730 cpu_resume_from_signal(env, NULL);
9fa3e853 731 }
fd6ce8f6 732#endif
9fa3e853 733}
fd6ce8f6 734
9fa3e853 735/* len must be <= 8 and start must be a multiple of len */
d720b93d 736static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
737{
738 PageDesc *p;
739 int offset, b;
59817ccb 740#if 0
a4193c8a
FB
741 if (1) {
742 if (loglevel) {
743 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
744 cpu_single_env->mem_write_vaddr, len,
745 cpu_single_env->eip,
746 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
747 }
59817ccb
FB
748 }
749#endif
9fa3e853
FB
750 p = page_find(start >> TARGET_PAGE_BITS);
751 if (!p)
752 return;
753 if (p->code_bitmap) {
754 offset = start & ~TARGET_PAGE_MASK;
755 b = p->code_bitmap[offset >> 3] >> (offset & 7);
756 if (b & ((1 << len) - 1))
757 goto do_invalidate;
758 } else {
759 do_invalidate:
d720b93d 760 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
761 }
762}
763
9fa3e853 764#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
765static void tb_invalidate_phys_page(target_ulong addr,
766 unsigned long pc, void *puc)
9fa3e853 767{
d720b93d
FB
768 int n, current_flags, current_tb_modified;
769 target_ulong current_pc, current_cs_base;
9fa3e853 770 PageDesc *p;
d720b93d
FB
771 TranslationBlock *tb, *current_tb;
772#ifdef TARGET_HAS_PRECISE_SMC
773 CPUState *env = cpu_single_env;
774#endif
9fa3e853
FB
775
776 addr &= TARGET_PAGE_MASK;
777 p = page_find(addr >> TARGET_PAGE_BITS);
778 if (!p)
779 return;
780 tb = p->first_tb;
d720b93d
FB
781 current_tb_modified = 0;
782 current_tb = NULL;
783 current_pc = 0; /* avoid warning */
784 current_cs_base = 0; /* avoid warning */
785 current_flags = 0; /* avoid warning */
786#ifdef TARGET_HAS_PRECISE_SMC
787 if (tb && pc != 0) {
788 current_tb = tb_find_pc(pc);
789 }
790#endif
9fa3e853
FB
791 while (tb != NULL) {
792 n = (long)tb & 3;
793 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
794#ifdef TARGET_HAS_PRECISE_SMC
795 if (current_tb == tb &&
796 !(current_tb->cflags & CF_SINGLE_INSN)) {
797 /* If we are modifying the current TB, we must stop
798 its execution. We could be more precise by checking
799 that the modification is after the current PC, but it
800 would require a specialized function to partially
801 restore the CPU state */
802
803 current_tb_modified = 1;
804 cpu_restore_state(current_tb, env, pc, puc);
805#if defined(TARGET_I386)
806 current_flags = env->hflags;
807 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
808 current_cs_base = (target_ulong)env->segs[R_CS].base;
809 current_pc = current_cs_base + env->eip;
810#else
811#error unsupported CPU
812#endif
813 }
814#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
815 tb_phys_invalidate(tb, addr);
816 tb = tb->page_next[n];
817 }
fd6ce8f6 818 p->first_tb = NULL;
d720b93d
FB
819#ifdef TARGET_HAS_PRECISE_SMC
820 if (current_tb_modified) {
821 /* we generate a block containing just the instruction
822 modifying the memory. It will ensure that it cannot modify
823 itself */
ea1c1802 824 env->current_tb = NULL;
d720b93d
FB
825 tb_gen_code(env, current_pc, current_cs_base, current_flags,
826 CF_SINGLE_INSN);
827 cpu_resume_from_signal(env, puc);
828 }
829#endif
fd6ce8f6 830}
9fa3e853 831#endif
fd6ce8f6
FB
832
833/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
834static inline void tb_alloc_page(TranslationBlock *tb,
835 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
836{
837 PageDesc *p;
9fa3e853
FB
838 TranslationBlock *last_first_tb;
839
840 tb->page_addr[n] = page_addr;
841 p = page_find(page_addr >> TARGET_PAGE_BITS);
842 tb->page_next[n] = p->first_tb;
843 last_first_tb = p->first_tb;
844 p->first_tb = (TranslationBlock *)((long)tb | n);
845 invalidate_page_bitmap(p);
fd6ce8f6 846
107db443 847#if defined(TARGET_HAS_SMC) || 1
d720b93d 848
9fa3e853 849#if defined(CONFIG_USER_ONLY)
fd6ce8f6 850 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
851 unsigned long host_start, host_end, addr;
852 int prot;
853
fd6ce8f6
FB
854 /* force the host page as non writable (writes will have a
855 page fault + mprotect overhead) */
83fb7adf
FB
856 host_start = page_addr & qemu_host_page_mask;
857 host_end = host_start + qemu_host_page_size;
fd6ce8f6
FB
858 prot = 0;
859 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
860 prot |= page_get_flags(addr);
83fb7adf 861 mprotect((void *)host_start, qemu_host_page_size,
fd6ce8f6
FB
862 (prot & PAGE_BITS) & ~PAGE_WRITE);
863#ifdef DEBUG_TB_INVALIDATE
864 printf("protecting code page: 0x%08lx\n",
865 host_start);
866#endif
867 p->flags &= ~PAGE_WRITE;
fd6ce8f6 868 }
9fa3e853
FB
869#else
870 /* if some code is already present, then the pages are already
871 protected. So we handle the case where only the first TB is
872 allocated in a physical page */
873 if (!last_first_tb) {
874 target_ulong virt_addr;
875
876 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
877 tlb_protect_code(cpu_single_env, virt_addr);
878 }
879#endif
d720b93d
FB
880
881#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
882}
883
884/* Allocate a new translation block. Flush the translation buffer if
885 too many translation blocks or too much generated code. */
d4e8164f 886TranslationBlock *tb_alloc(unsigned long pc)
fd6ce8f6
FB
887{
888 TranslationBlock *tb;
fd6ce8f6
FB
889
890 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
891 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 892 return NULL;
fd6ce8f6
FB
893 tb = &tbs[nb_tbs++];
894 tb->pc = pc;
b448f2f3 895 tb->cflags = 0;
d4e8164f
FB
896 return tb;
897}
898
9fa3e853
FB
899/* add a new TB and link it to the physical page tables. phys_page2 is
900 (-1) to indicate that only one page contains the TB. */
901void tb_link_phys(TranslationBlock *tb,
902 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 903{
9fa3e853
FB
904 unsigned int h;
905 TranslationBlock **ptb;
906
907 /* add in the physical hash table */
908 h = tb_phys_hash_func(phys_pc);
909 ptb = &tb_phys_hash[h];
910 tb->phys_hash_next = *ptb;
911 *ptb = tb;
fd6ce8f6
FB
912
913 /* add in the page list */
9fa3e853
FB
914 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
915 if (phys_page2 != -1)
916 tb_alloc_page(tb, 1, phys_page2);
917 else
918 tb->page_addr[1] = -1;
61382a50
FB
919#ifdef DEBUG_TB_CHECK
920 tb_page_check();
921#endif
9fa3e853
FB
922}
923
924/* link the tb with the other TBs */
925void tb_link(TranslationBlock *tb)
926{
927#if !defined(CONFIG_USER_ONLY)
928 {
929 VirtPageDesc *vp;
930 target_ulong addr;
931
932 /* save the code memory mappings (needed to invalidate the code) */
933 addr = tb->pc & TARGET_PAGE_MASK;
934 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
935#ifdef DEBUG_TLB_CHECK
936 if (vp->valid_tag == virt_valid_tag &&
937 vp->phys_addr != tb->page_addr[0]) {
938 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
939 addr, tb->page_addr[0], vp->phys_addr);
940 }
941#endif
9fa3e853 942 vp->phys_addr = tb->page_addr[0];
59817ccb
FB
943 if (vp->valid_tag != virt_valid_tag) {
944 vp->valid_tag = virt_valid_tag;
945#if !defined(CONFIG_SOFTMMU)
946 vp->prot = 0;
947#endif
948 }
9fa3e853
FB
949
950 if (tb->page_addr[1] != -1) {
951 addr += TARGET_PAGE_SIZE;
952 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
953#ifdef DEBUG_TLB_CHECK
954 if (vp->valid_tag == virt_valid_tag &&
955 vp->phys_addr != tb->page_addr[1]) {
956 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
957 addr, tb->page_addr[1], vp->phys_addr);
958 }
959#endif
9fa3e853 960 vp->phys_addr = tb->page_addr[1];
59817ccb
FB
961 if (vp->valid_tag != virt_valid_tag) {
962 vp->valid_tag = virt_valid_tag;
963#if !defined(CONFIG_SOFTMMU)
964 vp->prot = 0;
965#endif
966 }
9fa3e853
FB
967 }
968 }
969#endif
970
d4e8164f
FB
971 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
972 tb->jmp_next[0] = NULL;
973 tb->jmp_next[1] = NULL;
b448f2f3
FB
974#ifdef USE_CODE_COPY
975 tb->cflags &= ~CF_FP_USED;
976 if (tb->cflags & CF_TB_FP_USED)
977 tb->cflags |= CF_FP_USED;
978#endif
d4e8164f
FB
979
980 /* init original jump addresses */
981 if (tb->tb_next_offset[0] != 0xffff)
982 tb_reset_jump(tb, 0);
983 if (tb->tb_next_offset[1] != 0xffff)
984 tb_reset_jump(tb, 1);
fd6ce8f6
FB
985}
986
9fa3e853
FB
987/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
988 tb[1].tc_ptr. Return NULL if not found */
989TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 990{
9fa3e853
FB
991 int m_min, m_max, m;
992 unsigned long v;
993 TranslationBlock *tb;
a513fe19
FB
994
995 if (nb_tbs <= 0)
996 return NULL;
997 if (tc_ptr < (unsigned long)code_gen_buffer ||
998 tc_ptr >= (unsigned long)code_gen_ptr)
999 return NULL;
1000 /* binary search (cf Knuth) */
1001 m_min = 0;
1002 m_max = nb_tbs - 1;
1003 while (m_min <= m_max) {
1004 m = (m_min + m_max) >> 1;
1005 tb = &tbs[m];
1006 v = (unsigned long)tb->tc_ptr;
1007 if (v == tc_ptr)
1008 return tb;
1009 else if (tc_ptr < v) {
1010 m_max = m - 1;
1011 } else {
1012 m_min = m + 1;
1013 }
1014 }
1015 return &tbs[m_max];
1016}
7501267e 1017
ea041c0e
FB
1018static void tb_reset_jump_recursive(TranslationBlock *tb);
1019
1020static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1021{
1022 TranslationBlock *tb1, *tb_next, **ptb;
1023 unsigned int n1;
1024
1025 tb1 = tb->jmp_next[n];
1026 if (tb1 != NULL) {
1027 /* find head of list */
1028 for(;;) {
1029 n1 = (long)tb1 & 3;
1030 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1031 if (n1 == 2)
1032 break;
1033 tb1 = tb1->jmp_next[n1];
1034 }
1035 /* we are now sure now that tb jumps to tb1 */
1036 tb_next = tb1;
1037
1038 /* remove tb from the jmp_first list */
1039 ptb = &tb_next->jmp_first;
1040 for(;;) {
1041 tb1 = *ptb;
1042 n1 = (long)tb1 & 3;
1043 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1044 if (n1 == n && tb1 == tb)
1045 break;
1046 ptb = &tb1->jmp_next[n1];
1047 }
1048 *ptb = tb->jmp_next[n];
1049 tb->jmp_next[n] = NULL;
1050
1051 /* suppress the jump to next tb in generated code */
1052 tb_reset_jump(tb, n);
1053
0124311e 1054 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1055 tb_reset_jump_recursive(tb_next);
1056 }
1057}
1058
1059static void tb_reset_jump_recursive(TranslationBlock *tb)
1060{
1061 tb_reset_jump_recursive2(tb, 0);
1062 tb_reset_jump_recursive2(tb, 1);
1063}
1064
d720b93d
FB
1065static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1066{
1067 target_ulong phys_addr;
1068
1069 phys_addr = cpu_get_phys_page_debug(env, pc);
1070 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1071}
1072
c33a346e
FB
1073/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1074 breakpoint is reached */
2e12669a 1075int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1076{
e95c8d51 1077#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
4c3a88a2 1078 int i;
d720b93d 1079
4c3a88a2
FB
1080 for(i = 0; i < env->nb_breakpoints; i++) {
1081 if (env->breakpoints[i] == pc)
1082 return 0;
1083 }
1084
1085 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1086 return -1;
1087 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1088
1089 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1090 return 0;
1091#else
1092 return -1;
1093#endif
1094}
1095
1096/* remove a breakpoint */
2e12669a 1097int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1098{
e95c8d51 1099#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
4c3a88a2
FB
1100 int i;
1101 for(i = 0; i < env->nb_breakpoints; i++) {
1102 if (env->breakpoints[i] == pc)
1103 goto found;
1104 }
1105 return -1;
1106 found:
1107 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1108 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1109 env->nb_breakpoints--;
d720b93d
FB
1110
1111 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1112 return 0;
1113#else
1114 return -1;
1115#endif
1116}
1117
c33a346e
FB
1118/* enable or disable single step mode. EXCP_DEBUG is returned by the
1119 CPU loop after each instruction */
1120void cpu_single_step(CPUState *env, int enabled)
1121{
e95c8d51 1122#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
c33a346e
FB
1123 if (env->singlestep_enabled != enabled) {
1124 env->singlestep_enabled = enabled;
1125 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1126 /* XXX: only flush what is necessary */
0124311e 1127 tb_flush(env);
c33a346e
FB
1128 }
1129#endif
1130}
1131
34865134
FB
1132/* enable or disable low levels log */
1133void cpu_set_log(int log_flags)
1134{
1135 loglevel = log_flags;
1136 if (loglevel && !logfile) {
1137 logfile = fopen(logfilename, "w");
1138 if (!logfile) {
1139 perror(logfilename);
1140 _exit(1);
1141 }
9fa3e853
FB
1142#if !defined(CONFIG_SOFTMMU)
1143 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1144 {
1145 static uint8_t logfile_buf[4096];
1146 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1147 }
1148#else
34865134 1149 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1150#endif
34865134
FB
1151 }
1152}
1153
1154void cpu_set_log_filename(const char *filename)
1155{
1156 logfilename = strdup(filename);
1157}
c33a346e 1158
0124311e 1159/* mask must never be zero, except for A20 change call */
68a79315 1160void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1161{
1162 TranslationBlock *tb;
ee8b7021 1163 static int interrupt_lock;
59817ccb 1164
68a79315 1165 env->interrupt_request |= mask;
ea041c0e
FB
1166 /* if the cpu is currently executing code, we must unlink it and
1167 all the potentially executing TB */
1168 tb = env->current_tb;
ee8b7021
FB
1169 if (tb && !testandset(&interrupt_lock)) {
1170 env->current_tb = NULL;
ea041c0e 1171 tb_reset_jump_recursive(tb);
ee8b7021 1172 interrupt_lock = 0;
ea041c0e
FB
1173 }
1174}
1175
b54ad049
FB
1176void cpu_reset_interrupt(CPUState *env, int mask)
1177{
1178 env->interrupt_request &= ~mask;
1179}
1180
f193c797
FB
1181CPULogItem cpu_log_items[] = {
1182 { CPU_LOG_TB_OUT_ASM, "out_asm",
1183 "show generated host assembly code for each compiled TB" },
1184 { CPU_LOG_TB_IN_ASM, "in_asm",
1185 "show target assembly code for each compiled TB" },
1186 { CPU_LOG_TB_OP, "op",
1187 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1188#ifdef TARGET_I386
1189 { CPU_LOG_TB_OP_OPT, "op_opt",
1190 "show micro ops after optimization for each compiled TB" },
1191#endif
1192 { CPU_LOG_INT, "int",
1193 "show interrupts/exceptions in short format" },
1194 { CPU_LOG_EXEC, "exec",
1195 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1196 { CPU_LOG_TB_CPU, "cpu",
1197 "show CPU state before bloc translation" },
f193c797
FB
1198#ifdef TARGET_I386
1199 { CPU_LOG_PCALL, "pcall",
1200 "show protected mode far calls/returns/exceptions" },
1201#endif
fd872598
FB
1202 { CPU_LOG_IOPORT, "ioport",
1203 "show all i/o ports accesses" },
f193c797
FB
1204 { 0, NULL, NULL },
1205};
1206
1207static int cmp1(const char *s1, int n, const char *s2)
1208{
1209 if (strlen(s2) != n)
1210 return 0;
1211 return memcmp(s1, s2, n) == 0;
1212}
1213
1214/* takes a comma separated list of log masks. Return 0 if error. */
1215int cpu_str_to_log_mask(const char *str)
1216{
1217 CPULogItem *item;
1218 int mask;
1219 const char *p, *p1;
1220
1221 p = str;
1222 mask = 0;
1223 for(;;) {
1224 p1 = strchr(p, ',');
1225 if (!p1)
1226 p1 = p + strlen(p);
1227 for(item = cpu_log_items; item->mask != 0; item++) {
1228 if (cmp1(p, p1 - p, item->name))
1229 goto found;
1230 }
1231 return 0;
1232 found:
1233 mask |= item->mask;
1234 if (*p1 != ',')
1235 break;
1236 p = p1 + 1;
1237 }
1238 return mask;
1239}
ea041c0e 1240
7501267e
FB
1241void cpu_abort(CPUState *env, const char *fmt, ...)
1242{
1243 va_list ap;
1244
1245 va_start(ap, fmt);
1246 fprintf(stderr, "qemu: fatal: ");
1247 vfprintf(stderr, fmt, ap);
1248 fprintf(stderr, "\n");
1249#ifdef TARGET_I386
1250 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1251#endif
1252 va_end(ap);
1253 abort();
1254}
1255
0124311e
FB
1256#if !defined(CONFIG_USER_ONLY)
1257
ee8b7021
FB
1258/* NOTE: if flush_global is true, also flush global entries (not
1259 implemented yet) */
1260void tlb_flush(CPUState *env, int flush_global)
33417e70 1261{
33417e70 1262 int i;
0124311e 1263
9fa3e853
FB
1264#if defined(DEBUG_TLB)
1265 printf("tlb_flush:\n");
1266#endif
0124311e
FB
1267 /* must reset current TB so that interrupts cannot modify the
1268 links while we are modifying them */
1269 env->current_tb = NULL;
1270
33417e70
FB
1271 for(i = 0; i < CPU_TLB_SIZE; i++) {
1272 env->tlb_read[0][i].address = -1;
1273 env->tlb_write[0][i].address = -1;
1274 env->tlb_read[1][i].address = -1;
1275 env->tlb_write[1][i].address = -1;
1276 }
9fa3e853
FB
1277
1278 virt_page_flush();
8a8a608f 1279 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
9fa3e853
FB
1280
1281#if !defined(CONFIG_SOFTMMU)
1282 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1283#endif
33417e70
FB
1284}
1285
274da6b2 1286static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50
FB
1287{
1288 if (addr == (tlb_entry->address &
1289 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1290 tlb_entry->address = -1;
1291}
1292
2e12669a 1293void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1294{
9fa3e853
FB
1295 int i, n;
1296 VirtPageDesc *vp;
1297 PageDesc *p;
1298 TranslationBlock *tb;
0124311e 1299
9fa3e853
FB
1300#if defined(DEBUG_TLB)
1301 printf("tlb_flush_page: 0x%08x\n", addr);
1302#endif
0124311e
FB
1303 /* must reset current TB so that interrupts cannot modify the
1304 links while we are modifying them */
1305 env->current_tb = NULL;
61382a50
FB
1306
1307 addr &= TARGET_PAGE_MASK;
1308 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1309 tlb_flush_entry(&env->tlb_read[0][i], addr);
1310 tlb_flush_entry(&env->tlb_write[0][i], addr);
1311 tlb_flush_entry(&env->tlb_read[1][i], addr);
1312 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1313
9fa3e853
FB
1314 /* remove from the virtual pc hash table all the TB at this
1315 virtual address */
1316
1317 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1318 if (vp && vp->valid_tag == virt_valid_tag) {
1319 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1320 if (p) {
1321 /* we remove all the links to the TBs in this virtual page */
1322 tb = p->first_tb;
1323 while (tb != NULL) {
1324 n = (long)tb & 3;
1325 tb = (TranslationBlock *)((long)tb & ~3);
1326 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1327 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1328 tb_invalidate(tb);
1329 }
1330 tb = tb->page_next[n];
1331 }
1332 }
98857888 1333 vp->valid_tag = 0;
9fa3e853
FB
1334 }
1335
0124311e 1336#if !defined(CONFIG_SOFTMMU)
9fa3e853 1337 if (addr < MMAP_AREA_END)
0124311e 1338 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1339#endif
9fa3e853
FB
1340}
1341
4f2ac237 1342static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
9fa3e853
FB
1343{
1344 if (addr == (tlb_entry->address &
1345 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
98857888
FB
1346 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1347 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1ccde1cb 1348 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
9fa3e853
FB
1349 }
1350}
1351
1352/* update the TLBs so that writes to code in the virtual page 'addr'
1353 can be detected */
4f2ac237 1354static void tlb_protect_code(CPUState *env, target_ulong addr)
9fa3e853
FB
1355{
1356 int i;
1357
1358 addr &= TARGET_PAGE_MASK;
1359 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1360 tlb_protect_code1(&env->tlb_write[0][i], addr);
1361 tlb_protect_code1(&env->tlb_write[1][i], addr);
1362#if !defined(CONFIG_SOFTMMU)
1363 /* NOTE: as we generated the code for this page, it is already at
1364 least readable */
1365 if (addr < MMAP_AREA_END)
1366 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1367#endif
1368}
1369
9fa3e853 1370static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
4f2ac237 1371 unsigned long phys_addr)
9fa3e853
FB
1372{
1373 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1374 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1ccde1cb 1375 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
9fa3e853
FB
1376 }
1377}
1378
1379/* update the TLB so that writes in physical page 'phys_addr' are no longer
1380 tested self modifying code */
4f2ac237 1381static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
9fa3e853
FB
1382{
1383 int i;
1384
1385 phys_addr &= TARGET_PAGE_MASK;
1ccde1cb
FB
1386 phys_addr += (long)phys_ram_base;
1387 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1388 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1389 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1390}
1391
1392static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1393 unsigned long start, unsigned long length)
1394{
1395 unsigned long addr;
1396 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1397 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1398 if ((addr - start) < length) {
1399 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1400 }
1401 }
1402}
1403
1404void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1405{
1406 CPUState *env;
4f2ac237 1407 unsigned long length, start1;
1ccde1cb
FB
1408 int i;
1409
1410 start &= TARGET_PAGE_MASK;
1411 end = TARGET_PAGE_ALIGN(end);
1412
1413 length = end - start;
1414 if (length == 0)
1415 return;
1416 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1417
1418 env = cpu_single_env;
1419 /* we modify the TLB cache so that the dirty bit will be set again
1420 when accessing the range */
59817ccb 1421 start1 = start + (unsigned long)phys_ram_base;
9fa3e853 1422 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb 1423 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
9fa3e853 1424 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb
FB
1425 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1426
1427#if !defined(CONFIG_SOFTMMU)
1428 /* XXX: this is expensive */
1429 {
1430 VirtPageDesc *p;
1431 int j;
1432 target_ulong addr;
1433
1434 for(i = 0; i < L1_SIZE; i++) {
1435 p = l1_virt_map[i];
1436 if (p) {
1437 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1438 for(j = 0; j < L2_SIZE; j++) {
1439 if (p->valid_tag == virt_valid_tag &&
1440 p->phys_addr >= start && p->phys_addr < end &&
1441 (p->prot & PROT_WRITE)) {
1442 if (addr < MMAP_AREA_END) {
1443 mprotect((void *)addr, TARGET_PAGE_SIZE,
1444 p->prot & ~PROT_WRITE);
1445 }
1446 }
1447 addr += TARGET_PAGE_SIZE;
1448 p++;
1449 }
1450 }
1451 }
1452 }
1453#endif
1ccde1cb
FB
1454}
1455
1456static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1457 unsigned long start)
1458{
1459 unsigned long addr;
1460 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1461 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1462 if (addr == start) {
1463 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1464 }
1465 }
1466}
1467
1468/* update the TLB corresponding to virtual page vaddr and phys addr
1469 addr so that it is no longer dirty */
1470static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1471{
1472 CPUState *env = cpu_single_env;
1473 int i;
1474
1475 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1476
1477 addr &= TARGET_PAGE_MASK;
1478 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1479 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1480 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1481}
1482
59817ccb
FB
1483/* add a new TLB entry. At most one entry for a given virtual address
1484 is permitted. Return 0 if OK or 2 if the page could not be mapped
1485 (can only happen in non SOFTMMU mode for I/O pages or pages
1486 conflicting with the host address space). */
2e12669a
FB
1487int tlb_set_page(CPUState *env, target_ulong vaddr,
1488 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1489 int is_user, int is_softmmu)
1490{
92e873b9 1491 PhysPageDesc *p;
4f2ac237 1492 unsigned long pd;
9fa3e853
FB
1493 TranslationBlock *first_tb;
1494 unsigned int index;
4f2ac237
FB
1495 target_ulong address;
1496 unsigned long addend;
9fa3e853
FB
1497 int ret;
1498
92e873b9
FB
1499 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1500 first_tb = NULL;
9fa3e853
FB
1501 if (!p) {
1502 pd = IO_MEM_UNASSIGNED;
9fa3e853 1503 } else {
92e873b9 1504 PageDesc *p1;
9fa3e853 1505 pd = p->phys_offset;
92e873b9
FB
1506 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1507 /* NOTE: we also allocate the page at this stage */
1508 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1509 first_tb = p1->first_tb;
1510 }
9fa3e853
FB
1511 }
1512#if defined(DEBUG_TLB)
1513 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1514 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1515#endif
1516
1517 ret = 0;
1518#if !defined(CONFIG_SOFTMMU)
1519 if (is_softmmu)
1520#endif
1521 {
1522 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1523 /* IO memory case */
1524 address = vaddr | pd;
1525 addend = paddr;
1526 } else {
1527 /* standard memory */
1528 address = vaddr;
1529 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1530 }
1531
1532 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1533 addend -= vaddr;
67b915a5 1534 if (prot & PAGE_READ) {
9fa3e853
FB
1535 env->tlb_read[is_user][index].address = address;
1536 env->tlb_read[is_user][index].addend = addend;
1537 } else {
1538 env->tlb_read[is_user][index].address = -1;
1539 env->tlb_read[is_user][index].addend = -1;
1540 }
67b915a5 1541 if (prot & PAGE_WRITE) {
9fa3e853
FB
1542 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1543 /* ROM: access is ignored (same as unassigned) */
1544 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1545 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1546 } else
1547 /* XXX: the PowerPC code seems not ready to handle
1548 self modifying code with DCBI */
1549#if defined(TARGET_HAS_SMC) || 1
1550 if (first_tb) {
9fa3e853
FB
1551 /* if code is present, we use a specific memory
1552 handler. It works only for physical memory access */
1553 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1ccde1cb 1554 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1555 } else
1556#endif
1557 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb
FB
1558 !cpu_physical_memory_is_dirty(pd)) {
1559 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1560 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1561 } else {
1562 env->tlb_write[is_user][index].address = address;
1563 env->tlb_write[is_user][index].addend = addend;
1564 }
1565 } else {
1566 env->tlb_write[is_user][index].address = -1;
1567 env->tlb_write[is_user][index].addend = -1;
1568 }
1569 }
1570#if !defined(CONFIG_SOFTMMU)
1571 else {
1572 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1573 /* IO access: no mapping is done as it will be handled by the
1574 soft MMU */
1575 if (!(env->hflags & HF_SOFTMMU_MASK))
1576 ret = 2;
1577 } else {
1578 void *map_addr;
59817ccb
FB
1579
1580 if (vaddr >= MMAP_AREA_END) {
1581 ret = 2;
1582 } else {
1583 if (prot & PROT_WRITE) {
1584 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1585#if defined(TARGET_HAS_SMC) || 1
59817ccb 1586 first_tb ||
d720b93d 1587#endif
59817ccb
FB
1588 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1589 !cpu_physical_memory_is_dirty(pd))) {
1590 /* ROM: we do as if code was inside */
1591 /* if code is present, we only map as read only and save the
1592 original mapping */
1593 VirtPageDesc *vp;
1594
1595 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1596 vp->phys_addr = pd;
1597 vp->prot = prot;
1598 vp->valid_tag = virt_valid_tag;
1599 prot &= ~PAGE_WRITE;
1600 }
1601 }
1602 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1603 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1604 if (map_addr == MAP_FAILED) {
1605 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1606 paddr, vaddr);
9fa3e853 1607 }
9fa3e853
FB
1608 }
1609 }
1610 }
1611#endif
1612 return ret;
1613}
1614
1615/* called from signal handler: invalidate the code and unprotect the
1616 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1617int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1618{
1619#if !defined(CONFIG_SOFTMMU)
1620 VirtPageDesc *vp;
1621
1622#if defined(DEBUG_TLB)
1623 printf("page_unprotect: addr=0x%08x\n", addr);
1624#endif
1625 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1626
1627 /* if it is not mapped, no need to worry here */
1628 if (addr >= MMAP_AREA_END)
1629 return 0;
9fa3e853
FB
1630 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1631 if (!vp)
1632 return 0;
1633 /* NOTE: in this case, validate_tag is _not_ tested as it
1634 validates only the code TLB */
1635 if (vp->valid_tag != virt_valid_tag)
1636 return 0;
1637 if (!(vp->prot & PAGE_WRITE))
1638 return 0;
1639#if defined(DEBUG_TLB)
1640 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1641 addr, vp->phys_addr, vp->prot);
1642#endif
59817ccb
FB
1643 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1644 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1645 (unsigned long)addr, vp->prot);
d720b93d
FB
1646 /* set the dirty bit */
1647 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1648 /* flush the code inside */
1649 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1650 return 1;
1651#else
1652 return 0;
1653#endif
33417e70
FB
1654}
1655
0124311e
FB
1656#else
1657
ee8b7021 1658void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1659{
1660}
1661
2e12669a 1662void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1663{
1664}
1665
2e12669a
FB
1666int tlb_set_page(CPUState *env, target_ulong vaddr,
1667 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1668 int is_user, int is_softmmu)
1669{
1670 return 0;
1671}
0124311e 1672
9fa3e853
FB
1673/* dump memory mappings */
1674void page_dump(FILE *f)
33417e70 1675{
9fa3e853
FB
1676 unsigned long start, end;
1677 int i, j, prot, prot1;
1678 PageDesc *p;
33417e70 1679
9fa3e853
FB
1680 fprintf(f, "%-8s %-8s %-8s %s\n",
1681 "start", "end", "size", "prot");
1682 start = -1;
1683 end = -1;
1684 prot = 0;
1685 for(i = 0; i <= L1_SIZE; i++) {
1686 if (i < L1_SIZE)
1687 p = l1_map[i];
1688 else
1689 p = NULL;
1690 for(j = 0;j < L2_SIZE; j++) {
1691 if (!p)
1692 prot1 = 0;
1693 else
1694 prot1 = p[j].flags;
1695 if (prot1 != prot) {
1696 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1697 if (start != -1) {
1698 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1699 start, end, end - start,
1700 prot & PAGE_READ ? 'r' : '-',
1701 prot & PAGE_WRITE ? 'w' : '-',
1702 prot & PAGE_EXEC ? 'x' : '-');
1703 }
1704 if (prot1 != 0)
1705 start = end;
1706 else
1707 start = -1;
1708 prot = prot1;
1709 }
1710 if (!p)
1711 break;
1712 }
33417e70 1713 }
33417e70
FB
1714}
1715
9fa3e853 1716int page_get_flags(unsigned long address)
33417e70 1717{
9fa3e853
FB
1718 PageDesc *p;
1719
1720 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1721 if (!p)
9fa3e853
FB
1722 return 0;
1723 return p->flags;
1724}
1725
1726/* modify the flags of a page and invalidate the code if
1727 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1728 depending on PAGE_WRITE */
1729void page_set_flags(unsigned long start, unsigned long end, int flags)
1730{
1731 PageDesc *p;
1732 unsigned long addr;
1733
1734 start = start & TARGET_PAGE_MASK;
1735 end = TARGET_PAGE_ALIGN(end);
1736 if (flags & PAGE_WRITE)
1737 flags |= PAGE_WRITE_ORG;
1738 spin_lock(&tb_lock);
1739 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1740 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1741 /* if the write protection is set, then we invalidate the code
1742 inside */
1743 if (!(p->flags & PAGE_WRITE) &&
1744 (flags & PAGE_WRITE) &&
1745 p->first_tb) {
d720b93d 1746 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1747 }
1748 p->flags = flags;
1749 }
1750 spin_unlock(&tb_lock);
33417e70
FB
1751}
1752
9fa3e853
FB
1753/* called from signal handler: invalidate the code and unprotect the
1754 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1755int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1756{
1757 unsigned int page_index, prot, pindex;
1758 PageDesc *p, *p1;
1759 unsigned long host_start, host_end, addr;
1760
83fb7adf 1761 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1762 page_index = host_start >> TARGET_PAGE_BITS;
1763 p1 = page_find(page_index);
1764 if (!p1)
1765 return 0;
83fb7adf 1766 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1767 p = p1;
1768 prot = 0;
1769 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1770 prot |= p->flags;
1771 p++;
1772 }
1773 /* if the page was really writable, then we change its
1774 protection back to writable */
1775 if (prot & PAGE_WRITE_ORG) {
1776 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1777 if (!(p1[pindex].flags & PAGE_WRITE)) {
83fb7adf 1778 mprotect((void *)host_start, qemu_host_page_size,
9fa3e853
FB
1779 (prot & PAGE_BITS) | PAGE_WRITE);
1780 p1[pindex].flags |= PAGE_WRITE;
1781 /* and since the content will be modified, we must invalidate
1782 the corresponding translated code. */
d720b93d 1783 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1784#ifdef DEBUG_TB_CHECK
1785 tb_invalidate_check(address);
1786#endif
1787 return 1;
1788 }
1789 }
1790 return 0;
1791}
1792
1793/* call this function when system calls directly modify a memory area */
1794void page_unprotect_range(uint8_t *data, unsigned long data_size)
1795{
1796 unsigned long start, end, addr;
1797
1798 start = (unsigned long)data;
1799 end = start + data_size;
1800 start &= TARGET_PAGE_MASK;
1801 end = TARGET_PAGE_ALIGN(end);
1802 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1803 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1804 }
1805}
1806
1ccde1cb
FB
1807static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1808{
1809}
9fa3e853
FB
1810#endif /* defined(CONFIG_USER_ONLY) */
1811
33417e70
FB
1812/* register physical memory. 'size' must be a multiple of the target
1813 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1814 io memory page */
2e12669a
FB
1815void cpu_register_physical_memory(target_phys_addr_t start_addr,
1816 unsigned long size,
1817 unsigned long phys_offset)
33417e70
FB
1818{
1819 unsigned long addr, end_addr;
92e873b9 1820 PhysPageDesc *p;
33417e70 1821
5fd386f6 1822 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1823 end_addr = start_addr + size;
5fd386f6 1824 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
92e873b9 1825 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1826 p->phys_offset = phys_offset;
1827 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1828 phys_offset += TARGET_PAGE_SIZE;
1829 }
1830}
1831
a4193c8a 1832static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1833{
1834 return 0;
1835}
1836
a4193c8a 1837static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1838{
1839}
1840
1841static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1842 unassigned_mem_readb,
1843 unassigned_mem_readb,
1844 unassigned_mem_readb,
1845};
1846
1847static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1848 unassigned_mem_writeb,
1849 unassigned_mem_writeb,
1850 unassigned_mem_writeb,
1851};
1852
9fa3e853
FB
1853/* self modifying code support in soft mmu mode : writing to a page
1854 containing code comes to these functions */
1855
a4193c8a 1856static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1857{
1ccde1cb
FB
1858 unsigned long phys_addr;
1859
274da6b2 1860 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1861#if !defined(CONFIG_USER_ONLY)
d720b93d 1862 tb_invalidate_phys_page_fast(phys_addr, 1);
9fa3e853 1863#endif
1ccde1cb
FB
1864 stb_raw((uint8_t *)addr, val);
1865 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1866}
1867
a4193c8a 1868static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1869{
1ccde1cb
FB
1870 unsigned long phys_addr;
1871
274da6b2 1872 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1873#if !defined(CONFIG_USER_ONLY)
d720b93d 1874 tb_invalidate_phys_page_fast(phys_addr, 2);
9fa3e853 1875#endif
1ccde1cb
FB
1876 stw_raw((uint8_t *)addr, val);
1877 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1878}
1879
a4193c8a 1880static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1881{
1ccde1cb
FB
1882 unsigned long phys_addr;
1883
274da6b2 1884 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1885#if !defined(CONFIG_USER_ONLY)
d720b93d 1886 tb_invalidate_phys_page_fast(phys_addr, 4);
9fa3e853 1887#endif
1ccde1cb
FB
1888 stl_raw((uint8_t *)addr, val);
1889 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1890}
1891
1892static CPUReadMemoryFunc *code_mem_read[3] = {
1893 NULL, /* never used */
1894 NULL, /* never used */
1895 NULL, /* never used */
1896};
1897
1898static CPUWriteMemoryFunc *code_mem_write[3] = {
1899 code_mem_writeb,
1900 code_mem_writew,
1901 code_mem_writel,
1902};
33417e70 1903
a4193c8a 1904static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1905{
1906 stb_raw((uint8_t *)addr, val);
d720b93d 1907 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1908}
1909
a4193c8a 1910static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1911{
1912 stw_raw((uint8_t *)addr, val);
d720b93d 1913 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1914}
1915
a4193c8a 1916static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1917{
1918 stl_raw((uint8_t *)addr, val);
d720b93d 1919 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1920}
1921
1922static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1923 notdirty_mem_writeb,
1924 notdirty_mem_writew,
1925 notdirty_mem_writel,
1926};
1927
33417e70
FB
1928static void io_mem_init(void)
1929{
a4193c8a
FB
1930 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1931 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1932 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1933 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1934 io_mem_nb = 5;
1935
1936 /* alloc dirty bits array */
59817ccb 1937 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1938}
1939
1940/* mem_read and mem_write are arrays of functions containing the
1941 function to access byte (index 0), word (index 1) and dword (index
1942 2). All functions must be supplied. If io_index is non zero, the
1943 corresponding io zone is modified. If it is zero, a new io zone is
1944 allocated. The return value can be used with
1945 cpu_register_physical_memory(). (-1) is returned if error. */
1946int cpu_register_io_memory(int io_index,
1947 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1948 CPUWriteMemoryFunc **mem_write,
1949 void *opaque)
33417e70
FB
1950{
1951 int i;
1952
1953 if (io_index <= 0) {
1954 if (io_index >= IO_MEM_NB_ENTRIES)
1955 return -1;
1956 io_index = io_mem_nb++;
1957 } else {
1958 if (io_index >= IO_MEM_NB_ENTRIES)
1959 return -1;
1960 }
1961
1962 for(i = 0;i < 3; i++) {
1963 io_mem_read[io_index][i] = mem_read[i];
1964 io_mem_write[io_index][i] = mem_write[i];
1965 }
a4193c8a 1966 io_mem_opaque[io_index] = opaque;
33417e70
FB
1967 return io_index << IO_MEM_SHIFT;
1968}
61382a50 1969
13eb76e0
FB
1970/* physical memory access (slow version, mainly for debug) */
1971#if defined(CONFIG_USER_ONLY)
2e12669a 1972void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1973 int len, int is_write)
1974{
1975 int l, flags;
1976 target_ulong page;
1977
1978 while (len > 0) {
1979 page = addr & TARGET_PAGE_MASK;
1980 l = (page + TARGET_PAGE_SIZE) - addr;
1981 if (l > len)
1982 l = len;
1983 flags = page_get_flags(page);
1984 if (!(flags & PAGE_VALID))
1985 return;
1986 if (is_write) {
1987 if (!(flags & PAGE_WRITE))
1988 return;
1989 memcpy((uint8_t *)addr, buf, len);
1990 } else {
1991 if (!(flags & PAGE_READ))
1992 return;
1993 memcpy(buf, (uint8_t *)addr, len);
1994 }
1995 len -= l;
1996 buf += l;
1997 addr += l;
1998 }
1999}
2000#else
2e12669a 2001void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2002 int len, int is_write)
2003{
2004 int l, io_index;
2005 uint8_t *ptr;
2006 uint32_t val;
2e12669a
FB
2007 target_phys_addr_t page;
2008 unsigned long pd;
92e873b9 2009 PhysPageDesc *p;
13eb76e0
FB
2010
2011 while (len > 0) {
2012 page = addr & TARGET_PAGE_MASK;
2013 l = (page + TARGET_PAGE_SIZE) - addr;
2014 if (l > len)
2015 l = len;
92e873b9 2016 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2017 if (!p) {
2018 pd = IO_MEM_UNASSIGNED;
2019 } else {
2020 pd = p->phys_offset;
2021 }
2022
2023 if (is_write) {
2024 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2025 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2026 if (l >= 4 && ((addr & 3) == 0)) {
2027 /* 32 bit read access */
2028 val = ldl_raw(buf);
a4193c8a 2029 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2030 l = 4;
2031 } else if (l >= 2 && ((addr & 1) == 0)) {
2032 /* 16 bit read access */
2033 val = lduw_raw(buf);
a4193c8a 2034 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2035 l = 2;
2036 } else {
2037 /* 8 bit access */
2038 val = ldub_raw(buf);
a4193c8a 2039 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2040 l = 1;
2041 }
2042 } else {
b448f2f3
FB
2043 unsigned long addr1;
2044 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2045 /* RAM case */
b448f2f3 2046 ptr = phys_ram_base + addr1;
13eb76e0 2047 memcpy(ptr, buf, l);
b448f2f3
FB
2048 /* invalidate code */
2049 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2050 /* set dirty bit */
2051 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
13eb76e0
FB
2052 }
2053 } else {
2054 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2055 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2056 /* I/O case */
2057 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2058 if (l >= 4 && ((addr & 3) == 0)) {
2059 /* 32 bit read access */
a4193c8a 2060 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
13eb76e0
FB
2061 stl_raw(buf, val);
2062 l = 4;
2063 } else if (l >= 2 && ((addr & 1) == 0)) {
2064 /* 16 bit read access */
a4193c8a 2065 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
13eb76e0
FB
2066 stw_raw(buf, val);
2067 l = 2;
2068 } else {
2069 /* 8 bit access */
a4193c8a 2070 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
13eb76e0
FB
2071 stb_raw(buf, val);
2072 l = 1;
2073 }
2074 } else {
2075 /* RAM case */
2076 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2077 (addr & ~TARGET_PAGE_MASK);
2078 memcpy(buf, ptr, l);
2079 }
2080 }
2081 len -= l;
2082 buf += l;
2083 addr += l;
2084 }
2085}
2086#endif
2087
2088/* virtual memory access for debug */
b448f2f3
FB
2089int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2090 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2091{
2092 int l;
2093 target_ulong page, phys_addr;
2094
2095 while (len > 0) {
2096 page = addr & TARGET_PAGE_MASK;
2097 phys_addr = cpu_get_phys_page_debug(env, page);
2098 /* if no physical page mapped, return an error */
2099 if (phys_addr == -1)
2100 return -1;
2101 l = (page + TARGET_PAGE_SIZE) - addr;
2102 if (l > len)
2103 l = len;
b448f2f3
FB
2104 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2105 buf, l, is_write);
13eb76e0
FB
2106 len -= l;
2107 buf += l;
2108 addr += l;
2109 }
2110 return 0;
2111}
2112
61382a50
FB
2113#if !defined(CONFIG_USER_ONLY)
2114
2115#define MMUSUFFIX _cmmu
2116#define GETPC() NULL
2117#define env cpu_single_env
b769d8fe 2118#define SOFTMMU_CODE_ACCESS
61382a50
FB
2119
2120#define SHIFT 0
2121#include "softmmu_template.h"
2122
2123#define SHIFT 1
2124#include "softmmu_template.h"
2125
2126#define SHIFT 2
2127#include "softmmu_template.h"
2128
2129#define SHIFT 3
2130#include "softmmu_template.h"
2131
2132#undef env
2133
2134#endif