]> git.proxmox.com Git - qemu.git/blame - exec.c
monitor fixes
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
24#include <sys/mman.h>
25#endif
54936004
FB
26#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
6180a181
FB
34#include "cpu.h"
35#include "exec-all.h"
54936004 36
fd6ce8f6 37//#define DEBUG_TB_INVALIDATE
66e85a21 38//#define DEBUG_FLUSH
9fa3e853 39//#define DEBUG_TLB
fd6ce8f6
FB
40
41/* make various TB consistency checks */
42//#define DEBUG_TB_CHECK
98857888 43//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
44
45/* threshold to flush the translated code buffer */
46#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
47
9fa3e853
FB
48#define SMC_BITMAP_USE_THRESHOLD 10
49
50#define MMAP_AREA_START 0x00000000
51#define MMAP_AREA_END 0xa8000000
fd6ce8f6
FB
52
53TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
54TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
9fa3e853 55TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 56int nb_tbs;
eb51d102
FB
57/* any access to the tbs or the page table must use this lock */
58spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6
FB
59
60uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
61uint8_t *code_gen_ptr;
62
9fa3e853
FB
63int phys_ram_size;
64int phys_ram_fd;
65uint8_t *phys_ram_base;
1ccde1cb 66uint8_t *phys_ram_dirty;
9fa3e853 67
54936004 68typedef struct PageDesc {
92e873b9 69 /* list of TBs intersecting this ram page */
fd6ce8f6 70 TranslationBlock *first_tb;
9fa3e853
FB
71 /* in order to optimize self modifying code, we count the number
72 of lookups we do to a given page to use a bitmap */
73 unsigned int code_write_count;
74 uint8_t *code_bitmap;
75#if defined(CONFIG_USER_ONLY)
76 unsigned long flags;
77#endif
54936004
FB
78} PageDesc;
79
92e873b9
FB
80typedef struct PhysPageDesc {
81 /* offset in host memory of the page + io_index in the low 12 bits */
82 unsigned long phys_offset;
83} PhysPageDesc;
84
9fa3e853
FB
85typedef struct VirtPageDesc {
86 /* physical address of code page. It is valid only if 'valid_tag'
87 matches 'virt_valid_tag' */
88 target_ulong phys_addr;
89 unsigned int valid_tag;
90#if !defined(CONFIG_SOFTMMU)
91 /* original page access rights. It is valid only if 'valid_tag'
92 matches 'virt_valid_tag' */
93 unsigned int prot;
94#endif
95} VirtPageDesc;
96
54936004
FB
97#define L2_BITS 10
98#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
99
100#define L1_SIZE (1 << L1_BITS)
101#define L2_SIZE (1 << L2_BITS)
102
33417e70 103static void io_mem_init(void);
fd6ce8f6 104
83fb7adf
FB
105unsigned long qemu_real_host_page_size;
106unsigned long qemu_host_page_bits;
107unsigned long qemu_host_page_size;
108unsigned long qemu_host_page_mask;
54936004 109
92e873b9 110/* XXX: for system emulation, it could just be an array */
54936004 111static PageDesc *l1_map[L1_SIZE];
92e873b9 112static PhysPageDesc *l1_phys_map[L1_SIZE];
54936004 113
9fa3e853
FB
114#if !defined(CONFIG_USER_ONLY)
115static VirtPageDesc *l1_virt_map[L1_SIZE];
116static unsigned int virt_valid_tag;
117#endif
118
33417e70 119/* io memory support */
33417e70
FB
120CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
121CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 122void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
123static int io_mem_nb;
124
34865134
FB
125/* log support */
126char *logfilename = "/tmp/qemu.log";
127FILE *logfile;
128int loglevel;
129
b346ff46 130static void page_init(void)
54936004 131{
83fb7adf 132 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 133 TARGET_PAGE_SIZE */
67b915a5 134#ifdef _WIN32
d5a8f07c
FB
135 {
136 SYSTEM_INFO system_info;
137 DWORD old_protect;
138
139 GetSystemInfo(&system_info);
140 qemu_real_host_page_size = system_info.dwPageSize;
141
142 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
143 PAGE_EXECUTE_READWRITE, &old_protect);
144 }
67b915a5 145#else
83fb7adf 146 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
147 {
148 unsigned long start, end;
149
150 start = (unsigned long)code_gen_buffer;
151 start &= ~(qemu_real_host_page_size - 1);
152
153 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
154 end += qemu_real_host_page_size - 1;
155 end &= ~(qemu_real_host_page_size - 1);
156
157 mprotect((void *)start, end - start,
158 PROT_READ | PROT_WRITE | PROT_EXEC);
159 }
67b915a5 160#endif
d5a8f07c 161
83fb7adf
FB
162 if (qemu_host_page_size == 0)
163 qemu_host_page_size = qemu_real_host_page_size;
164 if (qemu_host_page_size < TARGET_PAGE_SIZE)
165 qemu_host_page_size = TARGET_PAGE_SIZE;
166 qemu_host_page_bits = 0;
167 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
168 qemu_host_page_bits++;
169 qemu_host_page_mask = ~(qemu_host_page_size - 1);
9fa3e853
FB
170#if !defined(CONFIG_USER_ONLY)
171 virt_valid_tag = 1;
172#endif
54936004
FB
173}
174
fd6ce8f6 175static inline PageDesc *page_find_alloc(unsigned int index)
54936004 176{
54936004
FB
177 PageDesc **lp, *p;
178
54936004
FB
179 lp = &l1_map[index >> L2_BITS];
180 p = *lp;
181 if (!p) {
182 /* allocate if not found */
59817ccb 183 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 184 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
185 *lp = p;
186 }
187 return p + (index & (L2_SIZE - 1));
188}
189
fd6ce8f6 190static inline PageDesc *page_find(unsigned int index)
54936004 191{
54936004
FB
192 PageDesc *p;
193
54936004
FB
194 p = l1_map[index >> L2_BITS];
195 if (!p)
196 return 0;
fd6ce8f6
FB
197 return p + (index & (L2_SIZE - 1));
198}
199
92e873b9
FB
200static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
201{
202 PhysPageDesc **lp, *p;
203
204 lp = &l1_phys_map[index >> L2_BITS];
205 p = *lp;
206 if (!p) {
207 /* allocate if not found */
208 p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
209 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
210 *lp = p;
211 }
212 return p + (index & (L2_SIZE - 1));
213}
214
215static inline PhysPageDesc *phys_page_find(unsigned int index)
216{
217 PhysPageDesc *p;
218
219 p = l1_phys_map[index >> L2_BITS];
220 if (!p)
221 return 0;
222 return p + (index & (L2_SIZE - 1));
223}
224
9fa3e853 225#if !defined(CONFIG_USER_ONLY)
4f2ac237
FB
226static void tlb_protect_code(CPUState *env, target_ulong addr);
227static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
9fa3e853
FB
228
229static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
fd6ce8f6 230{
9fa3e853 231 VirtPageDesc **lp, *p;
fd6ce8f6 232
9fa3e853
FB
233 lp = &l1_virt_map[index >> L2_BITS];
234 p = *lp;
235 if (!p) {
236 /* allocate if not found */
59817ccb 237 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
9fa3e853
FB
238 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
239 *lp = p;
240 }
241 return p + (index & (L2_SIZE - 1));
242}
243
244static inline VirtPageDesc *virt_page_find(unsigned int index)
245{
246 VirtPageDesc *p;
247
248 p = l1_virt_map[index >> L2_BITS];
fd6ce8f6
FB
249 if (!p)
250 return 0;
9fa3e853 251 return p + (index & (L2_SIZE - 1));
54936004
FB
252}
253
9fa3e853 254static void virt_page_flush(void)
54936004 255{
9fa3e853
FB
256 int i, j;
257 VirtPageDesc *p;
258
259 virt_valid_tag++;
260
261 if (virt_valid_tag == 0) {
262 virt_valid_tag = 1;
263 for(i = 0; i < L1_SIZE; i++) {
264 p = l1_virt_map[i];
265 if (p) {
266 for(j = 0; j < L2_SIZE; j++)
267 p[j].valid_tag = 0;
268 }
fd6ce8f6 269 }
54936004
FB
270 }
271}
9fa3e853
FB
272#else
273static void virt_page_flush(void)
274{
275}
276#endif
fd6ce8f6 277
b346ff46 278void cpu_exec_init(void)
fd6ce8f6
FB
279{
280 if (!code_gen_ptr) {
281 code_gen_ptr = code_gen_buffer;
b346ff46 282 page_init();
33417e70 283 io_mem_init();
fd6ce8f6
FB
284 }
285}
286
9fa3e853
FB
287static inline void invalidate_page_bitmap(PageDesc *p)
288{
289 if (p->code_bitmap) {
59817ccb 290 qemu_free(p->code_bitmap);
9fa3e853
FB
291 p->code_bitmap = NULL;
292 }
293 p->code_write_count = 0;
294}
295
fd6ce8f6
FB
296/* set to NULL all the 'first_tb' fields in all PageDescs */
297static void page_flush_tb(void)
298{
299 int i, j;
300 PageDesc *p;
301
302 for(i = 0; i < L1_SIZE; i++) {
303 p = l1_map[i];
304 if (p) {
9fa3e853
FB
305 for(j = 0; j < L2_SIZE; j++) {
306 p->first_tb = NULL;
307 invalidate_page_bitmap(p);
308 p++;
309 }
fd6ce8f6
FB
310 }
311 }
312}
313
314/* flush all the translation blocks */
d4e8164f 315/* XXX: tb_flush is currently not thread safe */
0124311e 316void tb_flush(CPUState *env)
fd6ce8f6 317{
0124311e 318#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
319 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
320 code_gen_ptr - code_gen_buffer,
321 nb_tbs,
0124311e 322 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
323#endif
324 nb_tbs = 0;
8a8a608f 325 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
9fa3e853
FB
326 virt_page_flush();
327
8a8a608f 328 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 329 page_flush_tb();
9fa3e853 330
fd6ce8f6 331 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
332 /* XXX: flush processor icache at this point if cache flush is
333 expensive */
fd6ce8f6
FB
334}
335
336#ifdef DEBUG_TB_CHECK
337
338static void tb_invalidate_check(unsigned long address)
339{
340 TranslationBlock *tb;
341 int i;
342 address &= TARGET_PAGE_MASK;
343 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
344 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
345 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
346 address >= tb->pc + tb->size)) {
347 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
348 address, tb->pc, tb->size);
349 }
350 }
351 }
352}
353
354/* verify that all the pages have correct rights for code */
355static void tb_page_check(void)
356{
357 TranslationBlock *tb;
358 int i, flags1, flags2;
359
360 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
361 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
362 flags1 = page_get_flags(tb->pc);
363 flags2 = page_get_flags(tb->pc + tb->size - 1);
364 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
365 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
366 tb->pc, tb->size, flags1, flags2);
367 }
368 }
369 }
370}
371
d4e8164f
FB
372void tb_jmp_check(TranslationBlock *tb)
373{
374 TranslationBlock *tb1;
375 unsigned int n1;
376
377 /* suppress any remaining jumps to this TB */
378 tb1 = tb->jmp_first;
379 for(;;) {
380 n1 = (long)tb1 & 3;
381 tb1 = (TranslationBlock *)((long)tb1 & ~3);
382 if (n1 == 2)
383 break;
384 tb1 = tb1->jmp_next[n1];
385 }
386 /* check end of list */
387 if (tb1 != tb) {
388 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
389 }
390}
391
fd6ce8f6
FB
392#endif
393
394/* invalidate one TB */
395static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
396 int next_offset)
397{
398 TranslationBlock *tb1;
399 for(;;) {
400 tb1 = *ptb;
401 if (tb1 == tb) {
402 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
403 break;
404 }
405 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
406 }
407}
408
9fa3e853
FB
409static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
410{
411 TranslationBlock *tb1;
412 unsigned int n1;
413
414 for(;;) {
415 tb1 = *ptb;
416 n1 = (long)tb1 & 3;
417 tb1 = (TranslationBlock *)((long)tb1 & ~3);
418 if (tb1 == tb) {
419 *ptb = tb1->page_next[n1];
420 break;
421 }
422 ptb = &tb1->page_next[n1];
423 }
424}
425
d4e8164f
FB
426static inline void tb_jmp_remove(TranslationBlock *tb, int n)
427{
428 TranslationBlock *tb1, **ptb;
429 unsigned int n1;
430
431 ptb = &tb->jmp_next[n];
432 tb1 = *ptb;
433 if (tb1) {
434 /* find tb(n) in circular list */
435 for(;;) {
436 tb1 = *ptb;
437 n1 = (long)tb1 & 3;
438 tb1 = (TranslationBlock *)((long)tb1 & ~3);
439 if (n1 == n && tb1 == tb)
440 break;
441 if (n1 == 2) {
442 ptb = &tb1->jmp_first;
443 } else {
444 ptb = &tb1->jmp_next[n1];
445 }
446 }
447 /* now we can suppress tb(n) from the list */
448 *ptb = tb->jmp_next[n];
449
450 tb->jmp_next[n] = NULL;
451 }
452}
453
454/* reset the jump entry 'n' of a TB so that it is not chained to
455 another TB */
456static inline void tb_reset_jump(TranslationBlock *tb, int n)
457{
458 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
459}
460
9fa3e853 461static inline void tb_invalidate(TranslationBlock *tb)
fd6ce8f6 462{
d4e8164f 463 unsigned int h, n1;
9fa3e853 464 TranslationBlock *tb1, *tb2, **ptb;
d4e8164f 465
36bdbe54 466 tb_invalidated_flag = 1;
59817ccb 467
fd6ce8f6
FB
468 /* remove the TB from the hash list */
469 h = tb_hash_func(tb->pc);
9fa3e853
FB
470 ptb = &tb_hash[h];
471 for(;;) {
472 tb1 = *ptb;
473 /* NOTE: the TB is not necessarily linked in the hash. It
474 indicates that it is not currently used */
475 if (tb1 == NULL)
476 return;
477 if (tb1 == tb) {
478 *ptb = tb1->hash_next;
479 break;
480 }
481 ptb = &tb1->hash_next;
fd6ce8f6 482 }
d4e8164f
FB
483
484 /* suppress this TB from the two jump lists */
485 tb_jmp_remove(tb, 0);
486 tb_jmp_remove(tb, 1);
487
488 /* suppress any remaining jumps to this TB */
489 tb1 = tb->jmp_first;
490 for(;;) {
491 n1 = (long)tb1 & 3;
492 if (n1 == 2)
493 break;
494 tb1 = (TranslationBlock *)((long)tb1 & ~3);
495 tb2 = tb1->jmp_next[n1];
496 tb_reset_jump(tb1, n1);
497 tb1->jmp_next[n1] = NULL;
498 tb1 = tb2;
499 }
500 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
fd6ce8f6
FB
501}
502
9fa3e853 503static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 504{
fd6ce8f6 505 PageDesc *p;
9fa3e853
FB
506 unsigned int h;
507 target_ulong phys_pc;
508
509 /* remove the TB from the hash list */
510 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
511 h = tb_phys_hash_func(phys_pc);
512 tb_remove(&tb_phys_hash[h], tb,
513 offsetof(TranslationBlock, phys_hash_next));
514
515 /* remove the TB from the page list */
516 if (tb->page_addr[0] != page_addr) {
517 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
518 tb_page_remove(&p->first_tb, tb);
519 invalidate_page_bitmap(p);
520 }
521 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
522 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
523 tb_page_remove(&p->first_tb, tb);
524 invalidate_page_bitmap(p);
525 }
526
527 tb_invalidate(tb);
528}
529
530static inline void set_bits(uint8_t *tab, int start, int len)
531{
532 int end, mask, end1;
533
534 end = start + len;
535 tab += start >> 3;
536 mask = 0xff << (start & 7);
537 if ((start & ~7) == (end & ~7)) {
538 if (start < end) {
539 mask &= ~(0xff << (end & 7));
540 *tab |= mask;
541 }
542 } else {
543 *tab++ |= mask;
544 start = (start + 8) & ~7;
545 end1 = end & ~7;
546 while (start < end1) {
547 *tab++ = 0xff;
548 start += 8;
549 }
550 if (start < end) {
551 mask = ~(0xff << (end & 7));
552 *tab |= mask;
553 }
554 }
555}
556
557static void build_page_bitmap(PageDesc *p)
558{
559 int n, tb_start, tb_end;
560 TranslationBlock *tb;
561
59817ccb 562 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
563 if (!p->code_bitmap)
564 return;
565 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
566
567 tb = p->first_tb;
568 while (tb != NULL) {
569 n = (long)tb & 3;
570 tb = (TranslationBlock *)((long)tb & ~3);
571 /* NOTE: this is subtle as a TB may span two physical pages */
572 if (n == 0) {
573 /* NOTE: tb_end may be after the end of the page, but
574 it is not a problem */
575 tb_start = tb->pc & ~TARGET_PAGE_MASK;
576 tb_end = tb_start + tb->size;
577 if (tb_end > TARGET_PAGE_SIZE)
578 tb_end = TARGET_PAGE_SIZE;
579 } else {
580 tb_start = 0;
581 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
582 }
583 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
584 tb = tb->page_next[n];
585 }
586}
587
d720b93d
FB
588#ifdef TARGET_HAS_PRECISE_SMC
589
590static void tb_gen_code(CPUState *env,
591 target_ulong pc, target_ulong cs_base, int flags,
592 int cflags)
593{
594 TranslationBlock *tb;
595 uint8_t *tc_ptr;
596 target_ulong phys_pc, phys_page2, virt_page2;
597 int code_gen_size;
598
599 phys_pc = get_phys_addr_code(env, (unsigned long)pc);
600 tb = tb_alloc((unsigned long)pc);
601 if (!tb) {
602 /* flush must be done */
603 tb_flush(env);
604 /* cannot fail at this point */
605 tb = tb_alloc((unsigned long)pc);
606 }
607 tc_ptr = code_gen_ptr;
608 tb->tc_ptr = tc_ptr;
609 tb->cs_base = cs_base;
610 tb->flags = flags;
611 tb->cflags = cflags;
612 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
613 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
614
615 /* check next page if needed */
616 virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
617 phys_page2 = -1;
618 if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
619 phys_page2 = get_phys_addr_code(env, virt_page2);
620 }
621 tb_link_phys(tb, phys_pc, phys_page2);
622}
623#endif
624
9fa3e853
FB
625/* invalidate all TBs which intersect with the target physical page
626 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
627 the same physical page. 'is_cpu_write_access' should be true if called
628 from a real cpu write access: the virtual CPU will exit the current
629 TB if code is modified inside this TB. */
630void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
631 int is_cpu_write_access)
632{
633 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 634 CPUState *env = cpu_single_env;
9fa3e853 635 PageDesc *p;
ea1c1802 636 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 637 target_ulong tb_start, tb_end;
d720b93d 638 target_ulong current_pc, current_cs_base;
9fa3e853
FB
639
640 p = page_find(start >> TARGET_PAGE_BITS);
641 if (!p)
642 return;
643 if (!p->code_bitmap &&
d720b93d
FB
644 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
645 is_cpu_write_access) {
9fa3e853
FB
646 /* build code bitmap */
647 build_page_bitmap(p);
648 }
649
650 /* we remove all the TBs in the range [start, end[ */
651 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
652 current_tb_not_found = is_cpu_write_access;
653 current_tb_modified = 0;
654 current_tb = NULL; /* avoid warning */
655 current_pc = 0; /* avoid warning */
656 current_cs_base = 0; /* avoid warning */
657 current_flags = 0; /* avoid warning */
9fa3e853
FB
658 tb = p->first_tb;
659 while (tb != NULL) {
660 n = (long)tb & 3;
661 tb = (TranslationBlock *)((long)tb & ~3);
662 tb_next = tb->page_next[n];
663 /* NOTE: this is subtle as a TB may span two physical pages */
664 if (n == 0) {
665 /* NOTE: tb_end may be after the end of the page, but
666 it is not a problem */
667 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
668 tb_end = tb_start + tb->size;
669 } else {
670 tb_start = tb->page_addr[1];
671 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
672 }
673 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
674#ifdef TARGET_HAS_PRECISE_SMC
675 if (current_tb_not_found) {
676 current_tb_not_found = 0;
677 current_tb = NULL;
678 if (env->mem_write_pc) {
679 /* now we have a real cpu fault */
680 current_tb = tb_find_pc(env->mem_write_pc);
681 }
682 }
683 if (current_tb == tb &&
684 !(current_tb->cflags & CF_SINGLE_INSN)) {
685 /* If we are modifying the current TB, we must stop
686 its execution. We could be more precise by checking
687 that the modification is after the current PC, but it
688 would require a specialized function to partially
689 restore the CPU state */
690
691 current_tb_modified = 1;
692 cpu_restore_state(current_tb, env,
693 env->mem_write_pc, NULL);
694#if defined(TARGET_I386)
695 current_flags = env->hflags;
696 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
697 current_cs_base = (target_ulong)env->segs[R_CS].base;
698 current_pc = current_cs_base + env->eip;
699#else
700#error unsupported CPU
701#endif
702 }
703#endif /* TARGET_HAS_PRECISE_SMC */
ea1c1802
FB
704 saved_tb = env->current_tb;
705 env->current_tb = NULL;
9fa3e853 706 tb_phys_invalidate(tb, -1);
ea1c1802
FB
707 env->current_tb = saved_tb;
708 if (env->interrupt_request && env->current_tb)
709 cpu_interrupt(env, env->interrupt_request);
9fa3e853
FB
710 }
711 tb = tb_next;
712 }
713#if !defined(CONFIG_USER_ONLY)
714 /* if no code remaining, no need to continue to use slow writes */
715 if (!p->first_tb) {
716 invalidate_page_bitmap(p);
d720b93d
FB
717 if (is_cpu_write_access) {
718 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
719 }
720 }
721#endif
722#ifdef TARGET_HAS_PRECISE_SMC
723 if (current_tb_modified) {
724 /* we generate a block containing just the instruction
725 modifying the memory. It will ensure that it cannot modify
726 itself */
ea1c1802 727 env->current_tb = NULL;
d720b93d
FB
728 tb_gen_code(env, current_pc, current_cs_base, current_flags,
729 CF_SINGLE_INSN);
730 cpu_resume_from_signal(env, NULL);
9fa3e853 731 }
fd6ce8f6 732#endif
9fa3e853 733}
fd6ce8f6 734
9fa3e853 735/* len must be <= 8 and start must be a multiple of len */
d720b93d 736static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
737{
738 PageDesc *p;
739 int offset, b;
59817ccb 740#if 0
a4193c8a
FB
741 if (1) {
742 if (loglevel) {
743 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
744 cpu_single_env->mem_write_vaddr, len,
745 cpu_single_env->eip,
746 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
747 }
59817ccb
FB
748 }
749#endif
9fa3e853
FB
750 p = page_find(start >> TARGET_PAGE_BITS);
751 if (!p)
752 return;
753 if (p->code_bitmap) {
754 offset = start & ~TARGET_PAGE_MASK;
755 b = p->code_bitmap[offset >> 3] >> (offset & 7);
756 if (b & ((1 << len) - 1))
757 goto do_invalidate;
758 } else {
759 do_invalidate:
d720b93d 760 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
761 }
762}
763
9fa3e853 764#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
765static void tb_invalidate_phys_page(target_ulong addr,
766 unsigned long pc, void *puc)
9fa3e853 767{
d720b93d
FB
768 int n, current_flags, current_tb_modified;
769 target_ulong current_pc, current_cs_base;
9fa3e853 770 PageDesc *p;
d720b93d
FB
771 TranslationBlock *tb, *current_tb;
772#ifdef TARGET_HAS_PRECISE_SMC
773 CPUState *env = cpu_single_env;
774#endif
9fa3e853
FB
775
776 addr &= TARGET_PAGE_MASK;
777 p = page_find(addr >> TARGET_PAGE_BITS);
778 if (!p)
779 return;
780 tb = p->first_tb;
d720b93d
FB
781 current_tb_modified = 0;
782 current_tb = NULL;
783 current_pc = 0; /* avoid warning */
784 current_cs_base = 0; /* avoid warning */
785 current_flags = 0; /* avoid warning */
786#ifdef TARGET_HAS_PRECISE_SMC
787 if (tb && pc != 0) {
788 current_tb = tb_find_pc(pc);
789 }
790#endif
9fa3e853
FB
791 while (tb != NULL) {
792 n = (long)tb & 3;
793 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
794#ifdef TARGET_HAS_PRECISE_SMC
795 if (current_tb == tb &&
796 !(current_tb->cflags & CF_SINGLE_INSN)) {
797 /* If we are modifying the current TB, we must stop
798 its execution. We could be more precise by checking
799 that the modification is after the current PC, but it
800 would require a specialized function to partially
801 restore the CPU state */
802
803 current_tb_modified = 1;
804 cpu_restore_state(current_tb, env, pc, puc);
805#if defined(TARGET_I386)
806 current_flags = env->hflags;
807 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
808 current_cs_base = (target_ulong)env->segs[R_CS].base;
809 current_pc = current_cs_base + env->eip;
810#else
811#error unsupported CPU
812#endif
813 }
814#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
815 tb_phys_invalidate(tb, addr);
816 tb = tb->page_next[n];
817 }
fd6ce8f6 818 p->first_tb = NULL;
d720b93d
FB
819#ifdef TARGET_HAS_PRECISE_SMC
820 if (current_tb_modified) {
821 /* we generate a block containing just the instruction
822 modifying the memory. It will ensure that it cannot modify
823 itself */
ea1c1802 824 env->current_tb = NULL;
d720b93d
FB
825 tb_gen_code(env, current_pc, current_cs_base, current_flags,
826 CF_SINGLE_INSN);
827 cpu_resume_from_signal(env, puc);
828 }
829#endif
fd6ce8f6 830}
9fa3e853 831#endif
fd6ce8f6
FB
832
833/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
834static inline void tb_alloc_page(TranslationBlock *tb,
835 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
836{
837 PageDesc *p;
9fa3e853
FB
838 TranslationBlock *last_first_tb;
839
840 tb->page_addr[n] = page_addr;
841 p = page_find(page_addr >> TARGET_PAGE_BITS);
842 tb->page_next[n] = p->first_tb;
843 last_first_tb = p->first_tb;
844 p->first_tb = (TranslationBlock *)((long)tb | n);
845 invalidate_page_bitmap(p);
fd6ce8f6 846
107db443 847#if defined(TARGET_HAS_SMC) || 1
d720b93d 848
9fa3e853 849#if defined(CONFIG_USER_ONLY)
fd6ce8f6 850 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
851 unsigned long host_start, host_end, addr;
852 int prot;
853
fd6ce8f6
FB
854 /* force the host page as non writable (writes will have a
855 page fault + mprotect overhead) */
83fb7adf
FB
856 host_start = page_addr & qemu_host_page_mask;
857 host_end = host_start + qemu_host_page_size;
fd6ce8f6
FB
858 prot = 0;
859 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
860 prot |= page_get_flags(addr);
83fb7adf 861 mprotect((void *)host_start, qemu_host_page_size,
fd6ce8f6
FB
862 (prot & PAGE_BITS) & ~PAGE_WRITE);
863#ifdef DEBUG_TB_INVALIDATE
864 printf("protecting code page: 0x%08lx\n",
865 host_start);
866#endif
867 p->flags &= ~PAGE_WRITE;
fd6ce8f6 868 }
9fa3e853
FB
869#else
870 /* if some code is already present, then the pages are already
871 protected. So we handle the case where only the first TB is
872 allocated in a physical page */
873 if (!last_first_tb) {
874 target_ulong virt_addr;
875
876 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
877 tlb_protect_code(cpu_single_env, virt_addr);
878 }
879#endif
d720b93d
FB
880
881#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
882}
883
884/* Allocate a new translation block. Flush the translation buffer if
885 too many translation blocks or too much generated code. */
d4e8164f 886TranslationBlock *tb_alloc(unsigned long pc)
fd6ce8f6
FB
887{
888 TranslationBlock *tb;
fd6ce8f6
FB
889
890 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
891 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 892 return NULL;
fd6ce8f6
FB
893 tb = &tbs[nb_tbs++];
894 tb->pc = pc;
b448f2f3 895 tb->cflags = 0;
d4e8164f
FB
896 return tb;
897}
898
9fa3e853
FB
899/* add a new TB and link it to the physical page tables. phys_page2 is
900 (-1) to indicate that only one page contains the TB. */
901void tb_link_phys(TranslationBlock *tb,
902 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 903{
9fa3e853
FB
904 unsigned int h;
905 TranslationBlock **ptb;
906
907 /* add in the physical hash table */
908 h = tb_phys_hash_func(phys_pc);
909 ptb = &tb_phys_hash[h];
910 tb->phys_hash_next = *ptb;
911 *ptb = tb;
fd6ce8f6
FB
912
913 /* add in the page list */
9fa3e853
FB
914 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
915 if (phys_page2 != -1)
916 tb_alloc_page(tb, 1, phys_page2);
917 else
918 tb->page_addr[1] = -1;
61382a50
FB
919#ifdef DEBUG_TB_CHECK
920 tb_page_check();
921#endif
9fa3e853
FB
922}
923
924/* link the tb with the other TBs */
925void tb_link(TranslationBlock *tb)
926{
927#if !defined(CONFIG_USER_ONLY)
928 {
929 VirtPageDesc *vp;
930 target_ulong addr;
931
932 /* save the code memory mappings (needed to invalidate the code) */
933 addr = tb->pc & TARGET_PAGE_MASK;
934 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
935#ifdef DEBUG_TLB_CHECK
936 if (vp->valid_tag == virt_valid_tag &&
937 vp->phys_addr != tb->page_addr[0]) {
938 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
939 addr, tb->page_addr[0], vp->phys_addr);
940 }
941#endif
9fa3e853 942 vp->phys_addr = tb->page_addr[0];
59817ccb
FB
943 if (vp->valid_tag != virt_valid_tag) {
944 vp->valid_tag = virt_valid_tag;
945#if !defined(CONFIG_SOFTMMU)
946 vp->prot = 0;
947#endif
948 }
9fa3e853
FB
949
950 if (tb->page_addr[1] != -1) {
951 addr += TARGET_PAGE_SIZE;
952 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
953#ifdef DEBUG_TLB_CHECK
954 if (vp->valid_tag == virt_valid_tag &&
955 vp->phys_addr != tb->page_addr[1]) {
956 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
957 addr, tb->page_addr[1], vp->phys_addr);
958 }
959#endif
9fa3e853 960 vp->phys_addr = tb->page_addr[1];
59817ccb
FB
961 if (vp->valid_tag != virt_valid_tag) {
962 vp->valid_tag = virt_valid_tag;
963#if !defined(CONFIG_SOFTMMU)
964 vp->prot = 0;
965#endif
966 }
9fa3e853
FB
967 }
968 }
969#endif
970
d4e8164f
FB
971 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
972 tb->jmp_next[0] = NULL;
973 tb->jmp_next[1] = NULL;
b448f2f3
FB
974#ifdef USE_CODE_COPY
975 tb->cflags &= ~CF_FP_USED;
976 if (tb->cflags & CF_TB_FP_USED)
977 tb->cflags |= CF_FP_USED;
978#endif
d4e8164f
FB
979
980 /* init original jump addresses */
981 if (tb->tb_next_offset[0] != 0xffff)
982 tb_reset_jump(tb, 0);
983 if (tb->tb_next_offset[1] != 0xffff)
984 tb_reset_jump(tb, 1);
fd6ce8f6
FB
985}
986
9fa3e853
FB
987/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
988 tb[1].tc_ptr. Return NULL if not found */
989TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 990{
9fa3e853
FB
991 int m_min, m_max, m;
992 unsigned long v;
993 TranslationBlock *tb;
a513fe19
FB
994
995 if (nb_tbs <= 0)
996 return NULL;
997 if (tc_ptr < (unsigned long)code_gen_buffer ||
998 tc_ptr >= (unsigned long)code_gen_ptr)
999 return NULL;
1000 /* binary search (cf Knuth) */
1001 m_min = 0;
1002 m_max = nb_tbs - 1;
1003 while (m_min <= m_max) {
1004 m = (m_min + m_max) >> 1;
1005 tb = &tbs[m];
1006 v = (unsigned long)tb->tc_ptr;
1007 if (v == tc_ptr)
1008 return tb;
1009 else if (tc_ptr < v) {
1010 m_max = m - 1;
1011 } else {
1012 m_min = m + 1;
1013 }
1014 }
1015 return &tbs[m_max];
1016}
7501267e 1017
ea041c0e
FB
1018static void tb_reset_jump_recursive(TranslationBlock *tb);
1019
1020static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1021{
1022 TranslationBlock *tb1, *tb_next, **ptb;
1023 unsigned int n1;
1024
1025 tb1 = tb->jmp_next[n];
1026 if (tb1 != NULL) {
1027 /* find head of list */
1028 for(;;) {
1029 n1 = (long)tb1 & 3;
1030 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1031 if (n1 == 2)
1032 break;
1033 tb1 = tb1->jmp_next[n1];
1034 }
1035 /* we are now sure now that tb jumps to tb1 */
1036 tb_next = tb1;
1037
1038 /* remove tb from the jmp_first list */
1039 ptb = &tb_next->jmp_first;
1040 for(;;) {
1041 tb1 = *ptb;
1042 n1 = (long)tb1 & 3;
1043 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1044 if (n1 == n && tb1 == tb)
1045 break;
1046 ptb = &tb1->jmp_next[n1];
1047 }
1048 *ptb = tb->jmp_next[n];
1049 tb->jmp_next[n] = NULL;
1050
1051 /* suppress the jump to next tb in generated code */
1052 tb_reset_jump(tb, n);
1053
0124311e 1054 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1055 tb_reset_jump_recursive(tb_next);
1056 }
1057}
1058
1059static void tb_reset_jump_recursive(TranslationBlock *tb)
1060{
1061 tb_reset_jump_recursive2(tb, 0);
1062 tb_reset_jump_recursive2(tb, 1);
1063}
1064
d720b93d
FB
1065static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1066{
1067 target_ulong phys_addr;
1068
1069 phys_addr = cpu_get_phys_page_debug(env, pc);
1070 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1071}
1072
c33a346e
FB
1073/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1074 breakpoint is reached */
2e12669a 1075int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1076{
e95c8d51 1077#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
4c3a88a2 1078 int i;
d720b93d 1079
4c3a88a2
FB
1080 for(i = 0; i < env->nb_breakpoints; i++) {
1081 if (env->breakpoints[i] == pc)
1082 return 0;
1083 }
1084
1085 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1086 return -1;
1087 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1088
1089 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1090 return 0;
1091#else
1092 return -1;
1093#endif
1094}
1095
1096/* remove a breakpoint */
2e12669a 1097int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1098{
e95c8d51 1099#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
4c3a88a2
FB
1100 int i;
1101 for(i = 0; i < env->nb_breakpoints; i++) {
1102 if (env->breakpoints[i] == pc)
1103 goto found;
1104 }
1105 return -1;
1106 found:
1107 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1108 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1109 env->nb_breakpoints--;
d720b93d
FB
1110
1111 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1112 return 0;
1113#else
1114 return -1;
1115#endif
1116}
1117
c33a346e
FB
1118/* enable or disable single step mode. EXCP_DEBUG is returned by the
1119 CPU loop after each instruction */
1120void cpu_single_step(CPUState *env, int enabled)
1121{
e95c8d51 1122#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
c33a346e
FB
1123 if (env->singlestep_enabled != enabled) {
1124 env->singlestep_enabled = enabled;
1125 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1126 /* XXX: only flush what is necessary */
0124311e 1127 tb_flush(env);
c33a346e
FB
1128 }
1129#endif
1130}
1131
34865134
FB
1132/* enable or disable low levels log */
1133void cpu_set_log(int log_flags)
1134{
1135 loglevel = log_flags;
1136 if (loglevel && !logfile) {
1137 logfile = fopen(logfilename, "w");
1138 if (!logfile) {
1139 perror(logfilename);
1140 _exit(1);
1141 }
9fa3e853
FB
1142#if !defined(CONFIG_SOFTMMU)
1143 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1144 {
1145 static uint8_t logfile_buf[4096];
1146 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1147 }
1148#else
34865134 1149 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1150#endif
34865134
FB
1151 }
1152}
1153
1154void cpu_set_log_filename(const char *filename)
1155{
1156 logfilename = strdup(filename);
1157}
c33a346e 1158
0124311e 1159/* mask must never be zero, except for A20 change call */
68a79315 1160void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1161{
1162 TranslationBlock *tb;
ee8b7021 1163 static int interrupt_lock;
59817ccb 1164
68a79315 1165 env->interrupt_request |= mask;
ea041c0e
FB
1166 /* if the cpu is currently executing code, we must unlink it and
1167 all the potentially executing TB */
1168 tb = env->current_tb;
ee8b7021
FB
1169 if (tb && !testandset(&interrupt_lock)) {
1170 env->current_tb = NULL;
ea041c0e 1171 tb_reset_jump_recursive(tb);
ee8b7021 1172 interrupt_lock = 0;
ea041c0e
FB
1173 }
1174}
1175
b54ad049
FB
1176void cpu_reset_interrupt(CPUState *env, int mask)
1177{
1178 env->interrupt_request &= ~mask;
1179}
1180
f193c797
FB
1181CPULogItem cpu_log_items[] = {
1182 { CPU_LOG_TB_OUT_ASM, "out_asm",
1183 "show generated host assembly code for each compiled TB" },
1184 { CPU_LOG_TB_IN_ASM, "in_asm",
1185 "show target assembly code for each compiled TB" },
1186 { CPU_LOG_TB_OP, "op",
1187 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1188#ifdef TARGET_I386
1189 { CPU_LOG_TB_OP_OPT, "op_opt",
1190 "show micro ops after optimization for each compiled TB" },
1191#endif
1192 { CPU_LOG_INT, "int",
1193 "show interrupts/exceptions in short format" },
1194 { CPU_LOG_EXEC, "exec",
1195 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1196 { CPU_LOG_TB_CPU, "cpu",
1197 "show CPU state before bloc translation" },
f193c797
FB
1198#ifdef TARGET_I386
1199 { CPU_LOG_PCALL, "pcall",
1200 "show protected mode far calls/returns/exceptions" },
1201#endif
8e3a9fd2 1202#ifdef DEBUG_IOPORT
fd872598
FB
1203 { CPU_LOG_IOPORT, "ioport",
1204 "show all i/o ports accesses" },
8e3a9fd2 1205#endif
f193c797
FB
1206 { 0, NULL, NULL },
1207};
1208
1209static int cmp1(const char *s1, int n, const char *s2)
1210{
1211 if (strlen(s2) != n)
1212 return 0;
1213 return memcmp(s1, s2, n) == 0;
1214}
1215
1216/* takes a comma separated list of log masks. Return 0 if error. */
1217int cpu_str_to_log_mask(const char *str)
1218{
1219 CPULogItem *item;
1220 int mask;
1221 const char *p, *p1;
1222
1223 p = str;
1224 mask = 0;
1225 for(;;) {
1226 p1 = strchr(p, ',');
1227 if (!p1)
1228 p1 = p + strlen(p);
8e3a9fd2
FB
1229 if(cmp1(p,p1-p,"all")) {
1230 for(item = cpu_log_items; item->mask != 0; item++) {
1231 mask |= item->mask;
1232 }
1233 } else {
f193c797
FB
1234 for(item = cpu_log_items; item->mask != 0; item++) {
1235 if (cmp1(p, p1 - p, item->name))
1236 goto found;
1237 }
1238 return 0;
8e3a9fd2 1239 }
f193c797
FB
1240 found:
1241 mask |= item->mask;
1242 if (*p1 != ',')
1243 break;
1244 p = p1 + 1;
1245 }
1246 return mask;
1247}
ea041c0e 1248
7501267e
FB
1249void cpu_abort(CPUState *env, const char *fmt, ...)
1250{
1251 va_list ap;
1252
1253 va_start(ap, fmt);
1254 fprintf(stderr, "qemu: fatal: ");
1255 vfprintf(stderr, fmt, ap);
1256 fprintf(stderr, "\n");
1257#ifdef TARGET_I386
7fe48483
FB
1258 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1259#else
1260 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1261#endif
1262 va_end(ap);
1263 abort();
1264}
1265
0124311e
FB
1266#if !defined(CONFIG_USER_ONLY)
1267
ee8b7021
FB
1268/* NOTE: if flush_global is true, also flush global entries (not
1269 implemented yet) */
1270void tlb_flush(CPUState *env, int flush_global)
33417e70 1271{
33417e70 1272 int i;
0124311e 1273
9fa3e853
FB
1274#if defined(DEBUG_TLB)
1275 printf("tlb_flush:\n");
1276#endif
0124311e
FB
1277 /* must reset current TB so that interrupts cannot modify the
1278 links while we are modifying them */
1279 env->current_tb = NULL;
1280
33417e70
FB
1281 for(i = 0; i < CPU_TLB_SIZE; i++) {
1282 env->tlb_read[0][i].address = -1;
1283 env->tlb_write[0][i].address = -1;
1284 env->tlb_read[1][i].address = -1;
1285 env->tlb_write[1][i].address = -1;
1286 }
9fa3e853
FB
1287
1288 virt_page_flush();
8a8a608f 1289 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
9fa3e853
FB
1290
1291#if !defined(CONFIG_SOFTMMU)
1292 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1293#endif
33417e70
FB
1294}
1295
274da6b2 1296static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50
FB
1297{
1298 if (addr == (tlb_entry->address &
1299 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1300 tlb_entry->address = -1;
1301}
1302
2e12669a 1303void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1304{
9fa3e853
FB
1305 int i, n;
1306 VirtPageDesc *vp;
1307 PageDesc *p;
1308 TranslationBlock *tb;
0124311e 1309
9fa3e853
FB
1310#if defined(DEBUG_TLB)
1311 printf("tlb_flush_page: 0x%08x\n", addr);
1312#endif
0124311e
FB
1313 /* must reset current TB so that interrupts cannot modify the
1314 links while we are modifying them */
1315 env->current_tb = NULL;
61382a50
FB
1316
1317 addr &= TARGET_PAGE_MASK;
1318 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1319 tlb_flush_entry(&env->tlb_read[0][i], addr);
1320 tlb_flush_entry(&env->tlb_write[0][i], addr);
1321 tlb_flush_entry(&env->tlb_read[1][i], addr);
1322 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1323
9fa3e853
FB
1324 /* remove from the virtual pc hash table all the TB at this
1325 virtual address */
1326
1327 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1328 if (vp && vp->valid_tag == virt_valid_tag) {
1329 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1330 if (p) {
1331 /* we remove all the links to the TBs in this virtual page */
1332 tb = p->first_tb;
1333 while (tb != NULL) {
1334 n = (long)tb & 3;
1335 tb = (TranslationBlock *)((long)tb & ~3);
1336 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1337 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1338 tb_invalidate(tb);
1339 }
1340 tb = tb->page_next[n];
1341 }
1342 }
98857888 1343 vp->valid_tag = 0;
9fa3e853
FB
1344 }
1345
0124311e 1346#if !defined(CONFIG_SOFTMMU)
9fa3e853 1347 if (addr < MMAP_AREA_END)
0124311e 1348 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1349#endif
9fa3e853
FB
1350}
1351
4f2ac237 1352static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
9fa3e853
FB
1353{
1354 if (addr == (tlb_entry->address &
1355 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
98857888
FB
1356 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1357 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1ccde1cb 1358 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
9fa3e853
FB
1359 }
1360}
1361
1362/* update the TLBs so that writes to code in the virtual page 'addr'
1363 can be detected */
4f2ac237 1364static void tlb_protect_code(CPUState *env, target_ulong addr)
9fa3e853
FB
1365{
1366 int i;
1367
1368 addr &= TARGET_PAGE_MASK;
1369 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1370 tlb_protect_code1(&env->tlb_write[0][i], addr);
1371 tlb_protect_code1(&env->tlb_write[1][i], addr);
1372#if !defined(CONFIG_SOFTMMU)
1373 /* NOTE: as we generated the code for this page, it is already at
1374 least readable */
1375 if (addr < MMAP_AREA_END)
1376 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1377#endif
1378}
1379
9fa3e853 1380static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
4f2ac237 1381 unsigned long phys_addr)
9fa3e853
FB
1382{
1383 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1384 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1ccde1cb 1385 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
9fa3e853
FB
1386 }
1387}
1388
1389/* update the TLB so that writes in physical page 'phys_addr' are no longer
1390 tested self modifying code */
4f2ac237 1391static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
9fa3e853
FB
1392{
1393 int i;
1394
1395 phys_addr &= TARGET_PAGE_MASK;
1ccde1cb
FB
1396 phys_addr += (long)phys_ram_base;
1397 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1398 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1399 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1400}
1401
1402static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1403 unsigned long start, unsigned long length)
1404{
1405 unsigned long addr;
1406 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1407 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1408 if ((addr - start) < length) {
1409 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1410 }
1411 }
1412}
1413
1414void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1415{
1416 CPUState *env;
4f2ac237 1417 unsigned long length, start1;
1ccde1cb
FB
1418 int i;
1419
1420 start &= TARGET_PAGE_MASK;
1421 end = TARGET_PAGE_ALIGN(end);
1422
1423 length = end - start;
1424 if (length == 0)
1425 return;
1426 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1427
1428 env = cpu_single_env;
1429 /* we modify the TLB cache so that the dirty bit will be set again
1430 when accessing the range */
59817ccb 1431 start1 = start + (unsigned long)phys_ram_base;
9fa3e853 1432 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb 1433 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
9fa3e853 1434 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb
FB
1435 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1436
1437#if !defined(CONFIG_SOFTMMU)
1438 /* XXX: this is expensive */
1439 {
1440 VirtPageDesc *p;
1441 int j;
1442 target_ulong addr;
1443
1444 for(i = 0; i < L1_SIZE; i++) {
1445 p = l1_virt_map[i];
1446 if (p) {
1447 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1448 for(j = 0; j < L2_SIZE; j++) {
1449 if (p->valid_tag == virt_valid_tag &&
1450 p->phys_addr >= start && p->phys_addr < end &&
1451 (p->prot & PROT_WRITE)) {
1452 if (addr < MMAP_AREA_END) {
1453 mprotect((void *)addr, TARGET_PAGE_SIZE,
1454 p->prot & ~PROT_WRITE);
1455 }
1456 }
1457 addr += TARGET_PAGE_SIZE;
1458 p++;
1459 }
1460 }
1461 }
1462 }
1463#endif
1ccde1cb
FB
1464}
1465
1466static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1467 unsigned long start)
1468{
1469 unsigned long addr;
1470 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1471 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1472 if (addr == start) {
1473 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1474 }
1475 }
1476}
1477
1478/* update the TLB corresponding to virtual page vaddr and phys addr
1479 addr so that it is no longer dirty */
1480static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1481{
1482 CPUState *env = cpu_single_env;
1483 int i;
1484
1485 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1486
1487 addr &= TARGET_PAGE_MASK;
1488 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1489 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1490 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1491}
1492
59817ccb
FB
1493/* add a new TLB entry. At most one entry for a given virtual address
1494 is permitted. Return 0 if OK or 2 if the page could not be mapped
1495 (can only happen in non SOFTMMU mode for I/O pages or pages
1496 conflicting with the host address space). */
2e12669a
FB
1497int tlb_set_page(CPUState *env, target_ulong vaddr,
1498 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1499 int is_user, int is_softmmu)
1500{
92e873b9 1501 PhysPageDesc *p;
4f2ac237 1502 unsigned long pd;
9fa3e853
FB
1503 TranslationBlock *first_tb;
1504 unsigned int index;
4f2ac237
FB
1505 target_ulong address;
1506 unsigned long addend;
9fa3e853
FB
1507 int ret;
1508
92e873b9
FB
1509 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1510 first_tb = NULL;
9fa3e853
FB
1511 if (!p) {
1512 pd = IO_MEM_UNASSIGNED;
9fa3e853 1513 } else {
92e873b9 1514 PageDesc *p1;
9fa3e853 1515 pd = p->phys_offset;
92e873b9
FB
1516 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1517 /* NOTE: we also allocate the page at this stage */
1518 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1519 first_tb = p1->first_tb;
1520 }
9fa3e853
FB
1521 }
1522#if defined(DEBUG_TLB)
1523 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1524 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1525#endif
1526
1527 ret = 0;
1528#if !defined(CONFIG_SOFTMMU)
1529 if (is_softmmu)
1530#endif
1531 {
1532 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1533 /* IO memory case */
1534 address = vaddr | pd;
1535 addend = paddr;
1536 } else {
1537 /* standard memory */
1538 address = vaddr;
1539 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1540 }
1541
1542 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1543 addend -= vaddr;
67b915a5 1544 if (prot & PAGE_READ) {
9fa3e853
FB
1545 env->tlb_read[is_user][index].address = address;
1546 env->tlb_read[is_user][index].addend = addend;
1547 } else {
1548 env->tlb_read[is_user][index].address = -1;
1549 env->tlb_read[is_user][index].addend = -1;
1550 }
67b915a5 1551 if (prot & PAGE_WRITE) {
9fa3e853
FB
1552 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1553 /* ROM: access is ignored (same as unassigned) */
1554 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1555 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1556 } else
1557 /* XXX: the PowerPC code seems not ready to handle
1558 self modifying code with DCBI */
1559#if defined(TARGET_HAS_SMC) || 1
1560 if (first_tb) {
9fa3e853
FB
1561 /* if code is present, we use a specific memory
1562 handler. It works only for physical memory access */
1563 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1ccde1cb 1564 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1565 } else
1566#endif
1567 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb
FB
1568 !cpu_physical_memory_is_dirty(pd)) {
1569 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1570 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1571 } else {
1572 env->tlb_write[is_user][index].address = address;
1573 env->tlb_write[is_user][index].addend = addend;
1574 }
1575 } else {
1576 env->tlb_write[is_user][index].address = -1;
1577 env->tlb_write[is_user][index].addend = -1;
1578 }
1579 }
1580#if !defined(CONFIG_SOFTMMU)
1581 else {
1582 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1583 /* IO access: no mapping is done as it will be handled by the
1584 soft MMU */
1585 if (!(env->hflags & HF_SOFTMMU_MASK))
1586 ret = 2;
1587 } else {
1588 void *map_addr;
59817ccb
FB
1589
1590 if (vaddr >= MMAP_AREA_END) {
1591 ret = 2;
1592 } else {
1593 if (prot & PROT_WRITE) {
1594 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1595#if defined(TARGET_HAS_SMC) || 1
59817ccb 1596 first_tb ||
d720b93d 1597#endif
59817ccb
FB
1598 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1599 !cpu_physical_memory_is_dirty(pd))) {
1600 /* ROM: we do as if code was inside */
1601 /* if code is present, we only map as read only and save the
1602 original mapping */
1603 VirtPageDesc *vp;
1604
1605 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1606 vp->phys_addr = pd;
1607 vp->prot = prot;
1608 vp->valid_tag = virt_valid_tag;
1609 prot &= ~PAGE_WRITE;
1610 }
1611 }
1612 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1613 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1614 if (map_addr == MAP_FAILED) {
1615 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1616 paddr, vaddr);
9fa3e853 1617 }
9fa3e853
FB
1618 }
1619 }
1620 }
1621#endif
1622 return ret;
1623}
1624
1625/* called from signal handler: invalidate the code and unprotect the
1626 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1627int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1628{
1629#if !defined(CONFIG_SOFTMMU)
1630 VirtPageDesc *vp;
1631
1632#if defined(DEBUG_TLB)
1633 printf("page_unprotect: addr=0x%08x\n", addr);
1634#endif
1635 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1636
1637 /* if it is not mapped, no need to worry here */
1638 if (addr >= MMAP_AREA_END)
1639 return 0;
9fa3e853
FB
1640 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1641 if (!vp)
1642 return 0;
1643 /* NOTE: in this case, validate_tag is _not_ tested as it
1644 validates only the code TLB */
1645 if (vp->valid_tag != virt_valid_tag)
1646 return 0;
1647 if (!(vp->prot & PAGE_WRITE))
1648 return 0;
1649#if defined(DEBUG_TLB)
1650 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1651 addr, vp->phys_addr, vp->prot);
1652#endif
59817ccb
FB
1653 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1654 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1655 (unsigned long)addr, vp->prot);
d720b93d
FB
1656 /* set the dirty bit */
1657 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1658 /* flush the code inside */
1659 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1660 return 1;
1661#else
1662 return 0;
1663#endif
33417e70
FB
1664}
1665
0124311e
FB
1666#else
1667
ee8b7021 1668void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1669{
1670}
1671
2e12669a 1672void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1673{
1674}
1675
2e12669a
FB
1676int tlb_set_page(CPUState *env, target_ulong vaddr,
1677 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1678 int is_user, int is_softmmu)
1679{
1680 return 0;
1681}
0124311e 1682
9fa3e853
FB
1683/* dump memory mappings */
1684void page_dump(FILE *f)
33417e70 1685{
9fa3e853
FB
1686 unsigned long start, end;
1687 int i, j, prot, prot1;
1688 PageDesc *p;
33417e70 1689
9fa3e853
FB
1690 fprintf(f, "%-8s %-8s %-8s %s\n",
1691 "start", "end", "size", "prot");
1692 start = -1;
1693 end = -1;
1694 prot = 0;
1695 for(i = 0; i <= L1_SIZE; i++) {
1696 if (i < L1_SIZE)
1697 p = l1_map[i];
1698 else
1699 p = NULL;
1700 for(j = 0;j < L2_SIZE; j++) {
1701 if (!p)
1702 prot1 = 0;
1703 else
1704 prot1 = p[j].flags;
1705 if (prot1 != prot) {
1706 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1707 if (start != -1) {
1708 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1709 start, end, end - start,
1710 prot & PAGE_READ ? 'r' : '-',
1711 prot & PAGE_WRITE ? 'w' : '-',
1712 prot & PAGE_EXEC ? 'x' : '-');
1713 }
1714 if (prot1 != 0)
1715 start = end;
1716 else
1717 start = -1;
1718 prot = prot1;
1719 }
1720 if (!p)
1721 break;
1722 }
33417e70 1723 }
33417e70
FB
1724}
1725
9fa3e853 1726int page_get_flags(unsigned long address)
33417e70 1727{
9fa3e853
FB
1728 PageDesc *p;
1729
1730 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1731 if (!p)
9fa3e853
FB
1732 return 0;
1733 return p->flags;
1734}
1735
1736/* modify the flags of a page and invalidate the code if
1737 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1738 depending on PAGE_WRITE */
1739void page_set_flags(unsigned long start, unsigned long end, int flags)
1740{
1741 PageDesc *p;
1742 unsigned long addr;
1743
1744 start = start & TARGET_PAGE_MASK;
1745 end = TARGET_PAGE_ALIGN(end);
1746 if (flags & PAGE_WRITE)
1747 flags |= PAGE_WRITE_ORG;
1748 spin_lock(&tb_lock);
1749 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1750 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1751 /* if the write protection is set, then we invalidate the code
1752 inside */
1753 if (!(p->flags & PAGE_WRITE) &&
1754 (flags & PAGE_WRITE) &&
1755 p->first_tb) {
d720b93d 1756 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1757 }
1758 p->flags = flags;
1759 }
1760 spin_unlock(&tb_lock);
33417e70
FB
1761}
1762
9fa3e853
FB
1763/* called from signal handler: invalidate the code and unprotect the
1764 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1765int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1766{
1767 unsigned int page_index, prot, pindex;
1768 PageDesc *p, *p1;
1769 unsigned long host_start, host_end, addr;
1770
83fb7adf 1771 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1772 page_index = host_start >> TARGET_PAGE_BITS;
1773 p1 = page_find(page_index);
1774 if (!p1)
1775 return 0;
83fb7adf 1776 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1777 p = p1;
1778 prot = 0;
1779 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1780 prot |= p->flags;
1781 p++;
1782 }
1783 /* if the page was really writable, then we change its
1784 protection back to writable */
1785 if (prot & PAGE_WRITE_ORG) {
1786 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1787 if (!(p1[pindex].flags & PAGE_WRITE)) {
83fb7adf 1788 mprotect((void *)host_start, qemu_host_page_size,
9fa3e853
FB
1789 (prot & PAGE_BITS) | PAGE_WRITE);
1790 p1[pindex].flags |= PAGE_WRITE;
1791 /* and since the content will be modified, we must invalidate
1792 the corresponding translated code. */
d720b93d 1793 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1794#ifdef DEBUG_TB_CHECK
1795 tb_invalidate_check(address);
1796#endif
1797 return 1;
1798 }
1799 }
1800 return 0;
1801}
1802
1803/* call this function when system calls directly modify a memory area */
1804void page_unprotect_range(uint8_t *data, unsigned long data_size)
1805{
1806 unsigned long start, end, addr;
1807
1808 start = (unsigned long)data;
1809 end = start + data_size;
1810 start &= TARGET_PAGE_MASK;
1811 end = TARGET_PAGE_ALIGN(end);
1812 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1813 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1814 }
1815}
1816
1ccde1cb
FB
1817static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1818{
1819}
9fa3e853
FB
1820#endif /* defined(CONFIG_USER_ONLY) */
1821
33417e70
FB
1822/* register physical memory. 'size' must be a multiple of the target
1823 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1824 io memory page */
2e12669a
FB
1825void cpu_register_physical_memory(target_phys_addr_t start_addr,
1826 unsigned long size,
1827 unsigned long phys_offset)
33417e70
FB
1828{
1829 unsigned long addr, end_addr;
92e873b9 1830 PhysPageDesc *p;
33417e70 1831
5fd386f6 1832 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1833 end_addr = start_addr + size;
5fd386f6 1834 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
92e873b9 1835 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1836 p->phys_offset = phys_offset;
1837 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1838 phys_offset += TARGET_PAGE_SIZE;
1839 }
1840}
1841
a4193c8a 1842static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1843{
1844 return 0;
1845}
1846
a4193c8a 1847static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1848{
1849}
1850
1851static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1852 unassigned_mem_readb,
1853 unassigned_mem_readb,
1854 unassigned_mem_readb,
1855};
1856
1857static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1858 unassigned_mem_writeb,
1859 unassigned_mem_writeb,
1860 unassigned_mem_writeb,
1861};
1862
9fa3e853
FB
1863/* self modifying code support in soft mmu mode : writing to a page
1864 containing code comes to these functions */
1865
a4193c8a 1866static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1867{
1ccde1cb
FB
1868 unsigned long phys_addr;
1869
274da6b2 1870 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1871#if !defined(CONFIG_USER_ONLY)
d720b93d 1872 tb_invalidate_phys_page_fast(phys_addr, 1);
9fa3e853 1873#endif
1ccde1cb
FB
1874 stb_raw((uint8_t *)addr, val);
1875 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1876}
1877
a4193c8a 1878static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1879{
1ccde1cb
FB
1880 unsigned long phys_addr;
1881
274da6b2 1882 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1883#if !defined(CONFIG_USER_ONLY)
d720b93d 1884 tb_invalidate_phys_page_fast(phys_addr, 2);
9fa3e853 1885#endif
1ccde1cb
FB
1886 stw_raw((uint8_t *)addr, val);
1887 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1888}
1889
a4193c8a 1890static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1891{
1ccde1cb
FB
1892 unsigned long phys_addr;
1893
274da6b2 1894 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1895#if !defined(CONFIG_USER_ONLY)
d720b93d 1896 tb_invalidate_phys_page_fast(phys_addr, 4);
9fa3e853 1897#endif
1ccde1cb
FB
1898 stl_raw((uint8_t *)addr, val);
1899 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1900}
1901
1902static CPUReadMemoryFunc *code_mem_read[3] = {
1903 NULL, /* never used */
1904 NULL, /* never used */
1905 NULL, /* never used */
1906};
1907
1908static CPUWriteMemoryFunc *code_mem_write[3] = {
1909 code_mem_writeb,
1910 code_mem_writew,
1911 code_mem_writel,
1912};
33417e70 1913
a4193c8a 1914static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1915{
1916 stb_raw((uint8_t *)addr, val);
d720b93d 1917 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1918}
1919
a4193c8a 1920static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1921{
1922 stw_raw((uint8_t *)addr, val);
d720b93d 1923 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1924}
1925
a4193c8a 1926static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1927{
1928 stl_raw((uint8_t *)addr, val);
d720b93d 1929 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1930}
1931
1932static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1933 notdirty_mem_writeb,
1934 notdirty_mem_writew,
1935 notdirty_mem_writel,
1936};
1937
33417e70
FB
1938static void io_mem_init(void)
1939{
a4193c8a
FB
1940 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1941 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1942 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1943 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1944 io_mem_nb = 5;
1945
1946 /* alloc dirty bits array */
59817ccb 1947 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1948}
1949
1950/* mem_read and mem_write are arrays of functions containing the
1951 function to access byte (index 0), word (index 1) and dword (index
1952 2). All functions must be supplied. If io_index is non zero, the
1953 corresponding io zone is modified. If it is zero, a new io zone is
1954 allocated. The return value can be used with
1955 cpu_register_physical_memory(). (-1) is returned if error. */
1956int cpu_register_io_memory(int io_index,
1957 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1958 CPUWriteMemoryFunc **mem_write,
1959 void *opaque)
33417e70
FB
1960{
1961 int i;
1962
1963 if (io_index <= 0) {
1964 if (io_index >= IO_MEM_NB_ENTRIES)
1965 return -1;
1966 io_index = io_mem_nb++;
1967 } else {
1968 if (io_index >= IO_MEM_NB_ENTRIES)
1969 return -1;
1970 }
1971
1972 for(i = 0;i < 3; i++) {
1973 io_mem_read[io_index][i] = mem_read[i];
1974 io_mem_write[io_index][i] = mem_write[i];
1975 }
a4193c8a 1976 io_mem_opaque[io_index] = opaque;
33417e70
FB
1977 return io_index << IO_MEM_SHIFT;
1978}
61382a50 1979
13eb76e0
FB
1980/* physical memory access (slow version, mainly for debug) */
1981#if defined(CONFIG_USER_ONLY)
2e12669a 1982void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1983 int len, int is_write)
1984{
1985 int l, flags;
1986 target_ulong page;
1987
1988 while (len > 0) {
1989 page = addr & TARGET_PAGE_MASK;
1990 l = (page + TARGET_PAGE_SIZE) - addr;
1991 if (l > len)
1992 l = len;
1993 flags = page_get_flags(page);
1994 if (!(flags & PAGE_VALID))
1995 return;
1996 if (is_write) {
1997 if (!(flags & PAGE_WRITE))
1998 return;
1999 memcpy((uint8_t *)addr, buf, len);
2000 } else {
2001 if (!(flags & PAGE_READ))
2002 return;
2003 memcpy(buf, (uint8_t *)addr, len);
2004 }
2005 len -= l;
2006 buf += l;
2007 addr += l;
2008 }
2009}
2010#else
2e12669a 2011void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2012 int len, int is_write)
2013{
2014 int l, io_index;
2015 uint8_t *ptr;
2016 uint32_t val;
2e12669a
FB
2017 target_phys_addr_t page;
2018 unsigned long pd;
92e873b9 2019 PhysPageDesc *p;
13eb76e0
FB
2020
2021 while (len > 0) {
2022 page = addr & TARGET_PAGE_MASK;
2023 l = (page + TARGET_PAGE_SIZE) - addr;
2024 if (l > len)
2025 l = len;
92e873b9 2026 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2027 if (!p) {
2028 pd = IO_MEM_UNASSIGNED;
2029 } else {
2030 pd = p->phys_offset;
2031 }
2032
2033 if (is_write) {
2034 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2035 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2036 if (l >= 4 && ((addr & 3) == 0)) {
2037 /* 32 bit read access */
2038 val = ldl_raw(buf);
a4193c8a 2039 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2040 l = 4;
2041 } else if (l >= 2 && ((addr & 1) == 0)) {
2042 /* 16 bit read access */
2043 val = lduw_raw(buf);
a4193c8a 2044 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2045 l = 2;
2046 } else {
2047 /* 8 bit access */
2048 val = ldub_raw(buf);
a4193c8a 2049 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2050 l = 1;
2051 }
2052 } else {
b448f2f3
FB
2053 unsigned long addr1;
2054 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2055 /* RAM case */
b448f2f3 2056 ptr = phys_ram_base + addr1;
13eb76e0 2057 memcpy(ptr, buf, l);
b448f2f3
FB
2058 /* invalidate code */
2059 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2060 /* set dirty bit */
2061 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
13eb76e0
FB
2062 }
2063 } else {
2064 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2065 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2066 /* I/O case */
2067 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2068 if (l >= 4 && ((addr & 3) == 0)) {
2069 /* 32 bit read access */
a4193c8a 2070 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
13eb76e0
FB
2071 stl_raw(buf, val);
2072 l = 4;
2073 } else if (l >= 2 && ((addr & 1) == 0)) {
2074 /* 16 bit read access */
a4193c8a 2075 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
13eb76e0
FB
2076 stw_raw(buf, val);
2077 l = 2;
2078 } else {
2079 /* 8 bit access */
a4193c8a 2080 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
13eb76e0
FB
2081 stb_raw(buf, val);
2082 l = 1;
2083 }
2084 } else {
2085 /* RAM case */
2086 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2087 (addr & ~TARGET_PAGE_MASK);
2088 memcpy(buf, ptr, l);
2089 }
2090 }
2091 len -= l;
2092 buf += l;
2093 addr += l;
2094 }
2095}
2096#endif
2097
2098/* virtual memory access for debug */
b448f2f3
FB
2099int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2100 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2101{
2102 int l;
2103 target_ulong page, phys_addr;
2104
2105 while (len > 0) {
2106 page = addr & TARGET_PAGE_MASK;
2107 phys_addr = cpu_get_phys_page_debug(env, page);
2108 /* if no physical page mapped, return an error */
2109 if (phys_addr == -1)
2110 return -1;
2111 l = (page + TARGET_PAGE_SIZE) - addr;
2112 if (l > len)
2113 l = len;
b448f2f3
FB
2114 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2115 buf, l, is_write);
13eb76e0
FB
2116 len -= l;
2117 buf += l;
2118 addr += l;
2119 }
2120 return 0;
2121}
2122
61382a50
FB
2123#if !defined(CONFIG_USER_ONLY)
2124
2125#define MMUSUFFIX _cmmu
2126#define GETPC() NULL
2127#define env cpu_single_env
b769d8fe 2128#define SOFTMMU_CODE_ACCESS
61382a50
FB
2129
2130#define SHIFT 0
2131#include "softmmu_template.h"
2132
2133#define SHIFT 1
2134#include "softmmu_template.h"
2135
2136#define SHIFT 2
2137#include "softmmu_template.h"
2138
2139#define SHIFT 3
2140#include "softmmu_template.h"
2141
2142#undef env
2143
2144#endif