]> git.proxmox.com Git - qemu.git/blob - exec.c
0aaa8358c9977290cf618265d3b79efa118d8a01
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
35
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #if defined(CONFIG_USER_ONLY)
41 #include <qemu.h>
42 #endif
43
44 //#define DEBUG_TB_INVALIDATE
45 //#define DEBUG_FLUSH
46 //#define DEBUG_TLB
47 //#define DEBUG_UNASSIGNED
48
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
52
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
55
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
58 #undef DEBUG_TB_CHECK
59 #endif
60
61 /* threshold to flush the translated code buffer */
62 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
63
64 #define SMC_BITMAP_USE_THRESHOLD 10
65
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
68
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82 #else
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 #endif
86
87 TranslationBlock *tbs;
88 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
89 int nb_tbs;
90 /* any access to the tbs or the page table must use this lock */
91 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
92
93 uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
94 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
95 uint8_t *code_gen_ptr;
96
97 ram_addr_t phys_ram_size;
98 int phys_ram_fd;
99 uint8_t *phys_ram_base;
100 uint8_t *phys_ram_dirty;
101 static ram_addr_t phys_ram_alloc_offset = 0;
102
103 CPUState *first_cpu;
104 /* current CPU in the current thread. It is only valid inside
105 cpu_exec() */
106 CPUState *cpu_single_env;
107
108 typedef struct PageDesc {
109 /* list of TBs intersecting this ram page */
110 TranslationBlock *first_tb;
111 /* in order to optimize self modifying code, we count the number
112 of lookups we do to a given page to use a bitmap */
113 unsigned int code_write_count;
114 uint8_t *code_bitmap;
115 #if defined(CONFIG_USER_ONLY)
116 unsigned long flags;
117 #endif
118 } PageDesc;
119
120 typedef struct PhysPageDesc {
121 /* offset in host memory of the page + io_index in the low 12 bits */
122 ram_addr_t phys_offset;
123 } PhysPageDesc;
124
125 #define L2_BITS 10
126 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
127 /* XXX: this is a temporary hack for alpha target.
128 * In the future, this is to be replaced by a multi-level table
129 * to actually be able to handle the complete 64 bits address space.
130 */
131 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
132 #else
133 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
134 #endif
135
136 #define L1_SIZE (1 << L1_BITS)
137 #define L2_SIZE (1 << L2_BITS)
138
139 static void io_mem_init(void);
140
141 unsigned long qemu_real_host_page_size;
142 unsigned long qemu_host_page_bits;
143 unsigned long qemu_host_page_size;
144 unsigned long qemu_host_page_mask;
145
146 /* XXX: for system emulation, it could just be an array */
147 static PageDesc *l1_map[L1_SIZE];
148 PhysPageDesc **l1_phys_map;
149
150 /* io memory support */
151 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
152 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
153 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
154 static int io_mem_nb;
155 #if defined(CONFIG_SOFTMMU)
156 static int io_mem_watch;
157 #endif
158
159 /* log support */
160 char *logfilename = "/tmp/qemu.log";
161 FILE *logfile;
162 int loglevel;
163 static int log_append = 0;
164
165 /* statistics */
166 static int tlb_flush_count;
167 static int tb_flush_count;
168 static int tb_phys_invalidate_count;
169
170 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
171 typedef struct subpage_t {
172 target_phys_addr_t base;
173 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
174 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
175 void *opaque[TARGET_PAGE_SIZE][2][4];
176 } subpage_t;
177
178 #ifdef _WIN32
179 static void map_exec(void *addr, long size)
180 {
181 DWORD old_protect;
182 VirtualProtect(addr, size,
183 PAGE_EXECUTE_READWRITE, &old_protect);
184
185 }
186 #else
187 static void map_exec(void *addr, long size)
188 {
189 unsigned long start, end;
190
191 start = (unsigned long)addr;
192 start &= ~(qemu_real_host_page_size - 1);
193
194 end = (unsigned long)addr + size;
195 end += qemu_real_host_page_size - 1;
196 end &= ~(qemu_real_host_page_size - 1);
197
198 mprotect((void *)start, end - start,
199 PROT_READ | PROT_WRITE | PROT_EXEC);
200 }
201 #endif
202
203 static void page_init(void)
204 {
205 /* NOTE: we can always suppose that qemu_host_page_size >=
206 TARGET_PAGE_SIZE */
207 #ifdef _WIN32
208 {
209 SYSTEM_INFO system_info;
210 DWORD old_protect;
211
212 GetSystemInfo(&system_info);
213 qemu_real_host_page_size = system_info.dwPageSize;
214 }
215 #else
216 qemu_real_host_page_size = getpagesize();
217 #endif
218 map_exec(code_gen_buffer, sizeof(code_gen_buffer));
219 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
220
221 if (qemu_host_page_size == 0)
222 qemu_host_page_size = qemu_real_host_page_size;
223 if (qemu_host_page_size < TARGET_PAGE_SIZE)
224 qemu_host_page_size = TARGET_PAGE_SIZE;
225 qemu_host_page_bits = 0;
226 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
227 qemu_host_page_bits++;
228 qemu_host_page_mask = ~(qemu_host_page_size - 1);
229 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
230 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
231
232 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
233 {
234 long long startaddr, endaddr;
235 FILE *f;
236 int n;
237
238 f = fopen("/proc/self/maps", "r");
239 if (f) {
240 do {
241 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
242 if (n == 2) {
243 startaddr = MIN(startaddr,
244 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
245 endaddr = MIN(endaddr,
246 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
247 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
248 TARGET_PAGE_ALIGN(endaddr),
249 PAGE_RESERVED);
250 }
251 } while (!feof(f));
252 fclose(f);
253 }
254 }
255 #endif
256 }
257
258 static inline PageDesc *page_find_alloc(target_ulong index)
259 {
260 PageDesc **lp, *p;
261
262 lp = &l1_map[index >> L2_BITS];
263 p = *lp;
264 if (!p) {
265 /* allocate if not found */
266 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
267 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
268 *lp = p;
269 }
270 return p + (index & (L2_SIZE - 1));
271 }
272
273 static inline PageDesc *page_find(target_ulong index)
274 {
275 PageDesc *p;
276
277 p = l1_map[index >> L2_BITS];
278 if (!p)
279 return 0;
280 return p + (index & (L2_SIZE - 1));
281 }
282
283 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
284 {
285 void **lp, **p;
286 PhysPageDesc *pd;
287
288 p = (void **)l1_phys_map;
289 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
290
291 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
292 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
293 #endif
294 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
295 p = *lp;
296 if (!p) {
297 /* allocate if not found */
298 if (!alloc)
299 return NULL;
300 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
301 memset(p, 0, sizeof(void *) * L1_SIZE);
302 *lp = p;
303 }
304 #endif
305 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
306 pd = *lp;
307 if (!pd) {
308 int i;
309 /* allocate if not found */
310 if (!alloc)
311 return NULL;
312 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
313 *lp = pd;
314 for (i = 0; i < L2_SIZE; i++)
315 pd[i].phys_offset = IO_MEM_UNASSIGNED;
316 }
317 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
318 }
319
320 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
321 {
322 return phys_page_find_alloc(index, 0);
323 }
324
325 #if !defined(CONFIG_USER_ONLY)
326 static void tlb_protect_code(ram_addr_t ram_addr);
327 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
328 target_ulong vaddr);
329 #endif
330
331 void cpu_exec_init(CPUState *env)
332 {
333 CPUState **penv;
334 int cpu_index;
335
336 if (!code_gen_ptr) {
337 cpu_gen_init();
338 tbs = qemu_malloc(CODE_GEN_MAX_BLOCKS * sizeof(TranslationBlock));
339 code_gen_ptr = code_gen_buffer;
340 page_init();
341 io_mem_init();
342 }
343 env->next_cpu = NULL;
344 penv = &first_cpu;
345 cpu_index = 0;
346 while (*penv != NULL) {
347 penv = (CPUState **)&(*penv)->next_cpu;
348 cpu_index++;
349 }
350 env->cpu_index = cpu_index;
351 env->nb_watchpoints = 0;
352 *penv = env;
353 }
354
355 static inline void invalidate_page_bitmap(PageDesc *p)
356 {
357 if (p->code_bitmap) {
358 qemu_free(p->code_bitmap);
359 p->code_bitmap = NULL;
360 }
361 p->code_write_count = 0;
362 }
363
364 /* set to NULL all the 'first_tb' fields in all PageDescs */
365 static void page_flush_tb(void)
366 {
367 int i, j;
368 PageDesc *p;
369
370 for(i = 0; i < L1_SIZE; i++) {
371 p = l1_map[i];
372 if (p) {
373 for(j = 0; j < L2_SIZE; j++) {
374 p->first_tb = NULL;
375 invalidate_page_bitmap(p);
376 p++;
377 }
378 }
379 }
380 }
381
382 /* flush all the translation blocks */
383 /* XXX: tb_flush is currently not thread safe */
384 void tb_flush(CPUState *env1)
385 {
386 CPUState *env;
387 #if defined(DEBUG_FLUSH)
388 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
389 (unsigned long)(code_gen_ptr - code_gen_buffer),
390 nb_tbs, nb_tbs > 0 ?
391 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
392 #endif
393 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
394 cpu_abort(env1, "Internal error: code buffer overflow\n");
395
396 nb_tbs = 0;
397
398 for(env = first_cpu; env != NULL; env = env->next_cpu) {
399 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
400 }
401
402 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
403 page_flush_tb();
404
405 code_gen_ptr = code_gen_buffer;
406 /* XXX: flush processor icache at this point if cache flush is
407 expensive */
408 tb_flush_count++;
409 }
410
411 #ifdef DEBUG_TB_CHECK
412
413 static void tb_invalidate_check(target_ulong address)
414 {
415 TranslationBlock *tb;
416 int i;
417 address &= TARGET_PAGE_MASK;
418 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
419 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
420 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
421 address >= tb->pc + tb->size)) {
422 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
423 address, (long)tb->pc, tb->size);
424 }
425 }
426 }
427 }
428
429 /* verify that all the pages have correct rights for code */
430 static void tb_page_check(void)
431 {
432 TranslationBlock *tb;
433 int i, flags1, flags2;
434
435 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
436 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
437 flags1 = page_get_flags(tb->pc);
438 flags2 = page_get_flags(tb->pc + tb->size - 1);
439 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
440 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
441 (long)tb->pc, tb->size, flags1, flags2);
442 }
443 }
444 }
445 }
446
447 void tb_jmp_check(TranslationBlock *tb)
448 {
449 TranslationBlock *tb1;
450 unsigned int n1;
451
452 /* suppress any remaining jumps to this TB */
453 tb1 = tb->jmp_first;
454 for(;;) {
455 n1 = (long)tb1 & 3;
456 tb1 = (TranslationBlock *)((long)tb1 & ~3);
457 if (n1 == 2)
458 break;
459 tb1 = tb1->jmp_next[n1];
460 }
461 /* check end of list */
462 if (tb1 != tb) {
463 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
464 }
465 }
466
467 #endif
468
469 /* invalidate one TB */
470 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
471 int next_offset)
472 {
473 TranslationBlock *tb1;
474 for(;;) {
475 tb1 = *ptb;
476 if (tb1 == tb) {
477 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
478 break;
479 }
480 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
481 }
482 }
483
484 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
485 {
486 TranslationBlock *tb1;
487 unsigned int n1;
488
489 for(;;) {
490 tb1 = *ptb;
491 n1 = (long)tb1 & 3;
492 tb1 = (TranslationBlock *)((long)tb1 & ~3);
493 if (tb1 == tb) {
494 *ptb = tb1->page_next[n1];
495 break;
496 }
497 ptb = &tb1->page_next[n1];
498 }
499 }
500
501 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
502 {
503 TranslationBlock *tb1, **ptb;
504 unsigned int n1;
505
506 ptb = &tb->jmp_next[n];
507 tb1 = *ptb;
508 if (tb1) {
509 /* find tb(n) in circular list */
510 for(;;) {
511 tb1 = *ptb;
512 n1 = (long)tb1 & 3;
513 tb1 = (TranslationBlock *)((long)tb1 & ~3);
514 if (n1 == n && tb1 == tb)
515 break;
516 if (n1 == 2) {
517 ptb = &tb1->jmp_first;
518 } else {
519 ptb = &tb1->jmp_next[n1];
520 }
521 }
522 /* now we can suppress tb(n) from the list */
523 *ptb = tb->jmp_next[n];
524
525 tb->jmp_next[n] = NULL;
526 }
527 }
528
529 /* reset the jump entry 'n' of a TB so that it is not chained to
530 another TB */
531 static inline void tb_reset_jump(TranslationBlock *tb, int n)
532 {
533 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
534 }
535
536 static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
537 {
538 CPUState *env;
539 PageDesc *p;
540 unsigned int h, n1;
541 target_phys_addr_t phys_pc;
542 TranslationBlock *tb1, *tb2;
543
544 /* remove the TB from the hash list */
545 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
546 h = tb_phys_hash_func(phys_pc);
547 tb_remove(&tb_phys_hash[h], tb,
548 offsetof(TranslationBlock, phys_hash_next));
549
550 /* remove the TB from the page list */
551 if (tb->page_addr[0] != page_addr) {
552 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
553 tb_page_remove(&p->first_tb, tb);
554 invalidate_page_bitmap(p);
555 }
556 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
557 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
558 tb_page_remove(&p->first_tb, tb);
559 invalidate_page_bitmap(p);
560 }
561
562 tb_invalidated_flag = 1;
563
564 /* remove the TB from the hash list */
565 h = tb_jmp_cache_hash_func(tb->pc);
566 for(env = first_cpu; env != NULL; env = env->next_cpu) {
567 if (env->tb_jmp_cache[h] == tb)
568 env->tb_jmp_cache[h] = NULL;
569 }
570
571 /* suppress this TB from the two jump lists */
572 tb_jmp_remove(tb, 0);
573 tb_jmp_remove(tb, 1);
574
575 /* suppress any remaining jumps to this TB */
576 tb1 = tb->jmp_first;
577 for(;;) {
578 n1 = (long)tb1 & 3;
579 if (n1 == 2)
580 break;
581 tb1 = (TranslationBlock *)((long)tb1 & ~3);
582 tb2 = tb1->jmp_next[n1];
583 tb_reset_jump(tb1, n1);
584 tb1->jmp_next[n1] = NULL;
585 tb1 = tb2;
586 }
587 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
588
589 tb_phys_invalidate_count++;
590 }
591
592 static inline void set_bits(uint8_t *tab, int start, int len)
593 {
594 int end, mask, end1;
595
596 end = start + len;
597 tab += start >> 3;
598 mask = 0xff << (start & 7);
599 if ((start & ~7) == (end & ~7)) {
600 if (start < end) {
601 mask &= ~(0xff << (end & 7));
602 *tab |= mask;
603 }
604 } else {
605 *tab++ |= mask;
606 start = (start + 8) & ~7;
607 end1 = end & ~7;
608 while (start < end1) {
609 *tab++ = 0xff;
610 start += 8;
611 }
612 if (start < end) {
613 mask = ~(0xff << (end & 7));
614 *tab |= mask;
615 }
616 }
617 }
618
619 static void build_page_bitmap(PageDesc *p)
620 {
621 int n, tb_start, tb_end;
622 TranslationBlock *tb;
623
624 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
625 if (!p->code_bitmap)
626 return;
627 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
628
629 tb = p->first_tb;
630 while (tb != NULL) {
631 n = (long)tb & 3;
632 tb = (TranslationBlock *)((long)tb & ~3);
633 /* NOTE: this is subtle as a TB may span two physical pages */
634 if (n == 0) {
635 /* NOTE: tb_end may be after the end of the page, but
636 it is not a problem */
637 tb_start = tb->pc & ~TARGET_PAGE_MASK;
638 tb_end = tb_start + tb->size;
639 if (tb_end > TARGET_PAGE_SIZE)
640 tb_end = TARGET_PAGE_SIZE;
641 } else {
642 tb_start = 0;
643 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
644 }
645 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
646 tb = tb->page_next[n];
647 }
648 }
649
650 #ifdef TARGET_HAS_PRECISE_SMC
651
652 static void tb_gen_code(CPUState *env,
653 target_ulong pc, target_ulong cs_base, int flags,
654 int cflags)
655 {
656 TranslationBlock *tb;
657 uint8_t *tc_ptr;
658 target_ulong phys_pc, phys_page2, virt_page2;
659 int code_gen_size;
660
661 phys_pc = get_phys_addr_code(env, pc);
662 tb = tb_alloc(pc);
663 if (!tb) {
664 /* flush must be done */
665 tb_flush(env);
666 /* cannot fail at this point */
667 tb = tb_alloc(pc);
668 }
669 tc_ptr = code_gen_ptr;
670 tb->tc_ptr = tc_ptr;
671 tb->cs_base = cs_base;
672 tb->flags = flags;
673 tb->cflags = cflags;
674 cpu_gen_code(env, tb, &code_gen_size);
675 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
676
677 /* check next page if needed */
678 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
679 phys_page2 = -1;
680 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
681 phys_page2 = get_phys_addr_code(env, virt_page2);
682 }
683 tb_link_phys(tb, phys_pc, phys_page2);
684 }
685 #endif
686
687 /* invalidate all TBs which intersect with the target physical page
688 starting in range [start;end[. NOTE: start and end must refer to
689 the same physical page. 'is_cpu_write_access' should be true if called
690 from a real cpu write access: the virtual CPU will exit the current
691 TB if code is modified inside this TB. */
692 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
693 int is_cpu_write_access)
694 {
695 int n, current_tb_modified, current_tb_not_found, current_flags;
696 CPUState *env = cpu_single_env;
697 PageDesc *p;
698 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
699 target_ulong tb_start, tb_end;
700 target_ulong current_pc, current_cs_base;
701
702 p = page_find(start >> TARGET_PAGE_BITS);
703 if (!p)
704 return;
705 if (!p->code_bitmap &&
706 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
707 is_cpu_write_access) {
708 /* build code bitmap */
709 build_page_bitmap(p);
710 }
711
712 /* we remove all the TBs in the range [start, end[ */
713 /* XXX: see if in some cases it could be faster to invalidate all the code */
714 current_tb_not_found = is_cpu_write_access;
715 current_tb_modified = 0;
716 current_tb = NULL; /* avoid warning */
717 current_pc = 0; /* avoid warning */
718 current_cs_base = 0; /* avoid warning */
719 current_flags = 0; /* avoid warning */
720 tb = p->first_tb;
721 while (tb != NULL) {
722 n = (long)tb & 3;
723 tb = (TranslationBlock *)((long)tb & ~3);
724 tb_next = tb->page_next[n];
725 /* NOTE: this is subtle as a TB may span two physical pages */
726 if (n == 0) {
727 /* NOTE: tb_end may be after the end of the page, but
728 it is not a problem */
729 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
730 tb_end = tb_start + tb->size;
731 } else {
732 tb_start = tb->page_addr[1];
733 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
734 }
735 if (!(tb_end <= start || tb_start >= end)) {
736 #ifdef TARGET_HAS_PRECISE_SMC
737 if (current_tb_not_found) {
738 current_tb_not_found = 0;
739 current_tb = NULL;
740 if (env->mem_write_pc) {
741 /* now we have a real cpu fault */
742 current_tb = tb_find_pc(env->mem_write_pc);
743 }
744 }
745 if (current_tb == tb &&
746 !(current_tb->cflags & CF_SINGLE_INSN)) {
747 /* If we are modifying the current TB, we must stop
748 its execution. We could be more precise by checking
749 that the modification is after the current PC, but it
750 would require a specialized function to partially
751 restore the CPU state */
752
753 current_tb_modified = 1;
754 cpu_restore_state(current_tb, env,
755 env->mem_write_pc, NULL);
756 #if defined(TARGET_I386)
757 current_flags = env->hflags;
758 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
759 current_cs_base = (target_ulong)env->segs[R_CS].base;
760 current_pc = current_cs_base + env->eip;
761 #else
762 #error unsupported CPU
763 #endif
764 }
765 #endif /* TARGET_HAS_PRECISE_SMC */
766 /* we need to do that to handle the case where a signal
767 occurs while doing tb_phys_invalidate() */
768 saved_tb = NULL;
769 if (env) {
770 saved_tb = env->current_tb;
771 env->current_tb = NULL;
772 }
773 tb_phys_invalidate(tb, -1);
774 if (env) {
775 env->current_tb = saved_tb;
776 if (env->interrupt_request && env->current_tb)
777 cpu_interrupt(env, env->interrupt_request);
778 }
779 }
780 tb = tb_next;
781 }
782 #if !defined(CONFIG_USER_ONLY)
783 /* if no code remaining, no need to continue to use slow writes */
784 if (!p->first_tb) {
785 invalidate_page_bitmap(p);
786 if (is_cpu_write_access) {
787 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
788 }
789 }
790 #endif
791 #ifdef TARGET_HAS_PRECISE_SMC
792 if (current_tb_modified) {
793 /* we generate a block containing just the instruction
794 modifying the memory. It will ensure that it cannot modify
795 itself */
796 env->current_tb = NULL;
797 tb_gen_code(env, current_pc, current_cs_base, current_flags,
798 CF_SINGLE_INSN);
799 cpu_resume_from_signal(env, NULL);
800 }
801 #endif
802 }
803
804 /* len must be <= 8 and start must be a multiple of len */
805 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
806 {
807 PageDesc *p;
808 int offset, b;
809 #if 0
810 if (1) {
811 if (loglevel) {
812 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
813 cpu_single_env->mem_write_vaddr, len,
814 cpu_single_env->eip,
815 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
816 }
817 }
818 #endif
819 p = page_find(start >> TARGET_PAGE_BITS);
820 if (!p)
821 return;
822 if (p->code_bitmap) {
823 offset = start & ~TARGET_PAGE_MASK;
824 b = p->code_bitmap[offset >> 3] >> (offset & 7);
825 if (b & ((1 << len) - 1))
826 goto do_invalidate;
827 } else {
828 do_invalidate:
829 tb_invalidate_phys_page_range(start, start + len, 1);
830 }
831 }
832
833 #if !defined(CONFIG_SOFTMMU)
834 static void tb_invalidate_phys_page(target_phys_addr_t addr,
835 unsigned long pc, void *puc)
836 {
837 int n, current_flags, current_tb_modified;
838 target_ulong current_pc, current_cs_base;
839 PageDesc *p;
840 TranslationBlock *tb, *current_tb;
841 #ifdef TARGET_HAS_PRECISE_SMC
842 CPUState *env = cpu_single_env;
843 #endif
844
845 addr &= TARGET_PAGE_MASK;
846 p = page_find(addr >> TARGET_PAGE_BITS);
847 if (!p)
848 return;
849 tb = p->first_tb;
850 current_tb_modified = 0;
851 current_tb = NULL;
852 current_pc = 0; /* avoid warning */
853 current_cs_base = 0; /* avoid warning */
854 current_flags = 0; /* avoid warning */
855 #ifdef TARGET_HAS_PRECISE_SMC
856 if (tb && pc != 0) {
857 current_tb = tb_find_pc(pc);
858 }
859 #endif
860 while (tb != NULL) {
861 n = (long)tb & 3;
862 tb = (TranslationBlock *)((long)tb & ~3);
863 #ifdef TARGET_HAS_PRECISE_SMC
864 if (current_tb == tb &&
865 !(current_tb->cflags & CF_SINGLE_INSN)) {
866 /* If we are modifying the current TB, we must stop
867 its execution. We could be more precise by checking
868 that the modification is after the current PC, but it
869 would require a specialized function to partially
870 restore the CPU state */
871
872 current_tb_modified = 1;
873 cpu_restore_state(current_tb, env, pc, puc);
874 #if defined(TARGET_I386)
875 current_flags = env->hflags;
876 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
877 current_cs_base = (target_ulong)env->segs[R_CS].base;
878 current_pc = current_cs_base + env->eip;
879 #else
880 #error unsupported CPU
881 #endif
882 }
883 #endif /* TARGET_HAS_PRECISE_SMC */
884 tb_phys_invalidate(tb, addr);
885 tb = tb->page_next[n];
886 }
887 p->first_tb = NULL;
888 #ifdef TARGET_HAS_PRECISE_SMC
889 if (current_tb_modified) {
890 /* we generate a block containing just the instruction
891 modifying the memory. It will ensure that it cannot modify
892 itself */
893 env->current_tb = NULL;
894 tb_gen_code(env, current_pc, current_cs_base, current_flags,
895 CF_SINGLE_INSN);
896 cpu_resume_from_signal(env, puc);
897 }
898 #endif
899 }
900 #endif
901
902 /* add the tb in the target page and protect it if necessary */
903 static inline void tb_alloc_page(TranslationBlock *tb,
904 unsigned int n, target_ulong page_addr)
905 {
906 PageDesc *p;
907 TranslationBlock *last_first_tb;
908
909 tb->page_addr[n] = page_addr;
910 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
911 tb->page_next[n] = p->first_tb;
912 last_first_tb = p->first_tb;
913 p->first_tb = (TranslationBlock *)((long)tb | n);
914 invalidate_page_bitmap(p);
915
916 #if defined(TARGET_HAS_SMC) || 1
917
918 #if defined(CONFIG_USER_ONLY)
919 if (p->flags & PAGE_WRITE) {
920 target_ulong addr;
921 PageDesc *p2;
922 int prot;
923
924 /* force the host page as non writable (writes will have a
925 page fault + mprotect overhead) */
926 page_addr &= qemu_host_page_mask;
927 prot = 0;
928 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
929 addr += TARGET_PAGE_SIZE) {
930
931 p2 = page_find (addr >> TARGET_PAGE_BITS);
932 if (!p2)
933 continue;
934 prot |= p2->flags;
935 p2->flags &= ~PAGE_WRITE;
936 page_get_flags(addr);
937 }
938 mprotect(g2h(page_addr), qemu_host_page_size,
939 (prot & PAGE_BITS) & ~PAGE_WRITE);
940 #ifdef DEBUG_TB_INVALIDATE
941 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
942 page_addr);
943 #endif
944 }
945 #else
946 /* if some code is already present, then the pages are already
947 protected. So we handle the case where only the first TB is
948 allocated in a physical page */
949 if (!last_first_tb) {
950 tlb_protect_code(page_addr);
951 }
952 #endif
953
954 #endif /* TARGET_HAS_SMC */
955 }
956
957 /* Allocate a new translation block. Flush the translation buffer if
958 too many translation blocks or too much generated code. */
959 TranslationBlock *tb_alloc(target_ulong pc)
960 {
961 TranslationBlock *tb;
962
963 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
964 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
965 return NULL;
966 tb = &tbs[nb_tbs++];
967 tb->pc = pc;
968 tb->cflags = 0;
969 return tb;
970 }
971
972 /* add a new TB and link it to the physical page tables. phys_page2 is
973 (-1) to indicate that only one page contains the TB. */
974 void tb_link_phys(TranslationBlock *tb,
975 target_ulong phys_pc, target_ulong phys_page2)
976 {
977 unsigned int h;
978 TranslationBlock **ptb;
979
980 /* add in the physical hash table */
981 h = tb_phys_hash_func(phys_pc);
982 ptb = &tb_phys_hash[h];
983 tb->phys_hash_next = *ptb;
984 *ptb = tb;
985
986 /* add in the page list */
987 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
988 if (phys_page2 != -1)
989 tb_alloc_page(tb, 1, phys_page2);
990 else
991 tb->page_addr[1] = -1;
992
993 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
994 tb->jmp_next[0] = NULL;
995 tb->jmp_next[1] = NULL;
996
997 /* init original jump addresses */
998 if (tb->tb_next_offset[0] != 0xffff)
999 tb_reset_jump(tb, 0);
1000 if (tb->tb_next_offset[1] != 0xffff)
1001 tb_reset_jump(tb, 1);
1002
1003 #ifdef DEBUG_TB_CHECK
1004 tb_page_check();
1005 #endif
1006 }
1007
1008 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1009 tb[1].tc_ptr. Return NULL if not found */
1010 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1011 {
1012 int m_min, m_max, m;
1013 unsigned long v;
1014 TranslationBlock *tb;
1015
1016 if (nb_tbs <= 0)
1017 return NULL;
1018 if (tc_ptr < (unsigned long)code_gen_buffer ||
1019 tc_ptr >= (unsigned long)code_gen_ptr)
1020 return NULL;
1021 /* binary search (cf Knuth) */
1022 m_min = 0;
1023 m_max = nb_tbs - 1;
1024 while (m_min <= m_max) {
1025 m = (m_min + m_max) >> 1;
1026 tb = &tbs[m];
1027 v = (unsigned long)tb->tc_ptr;
1028 if (v == tc_ptr)
1029 return tb;
1030 else if (tc_ptr < v) {
1031 m_max = m - 1;
1032 } else {
1033 m_min = m + 1;
1034 }
1035 }
1036 return &tbs[m_max];
1037 }
1038
1039 static void tb_reset_jump_recursive(TranslationBlock *tb);
1040
1041 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1042 {
1043 TranslationBlock *tb1, *tb_next, **ptb;
1044 unsigned int n1;
1045
1046 tb1 = tb->jmp_next[n];
1047 if (tb1 != NULL) {
1048 /* find head of list */
1049 for(;;) {
1050 n1 = (long)tb1 & 3;
1051 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1052 if (n1 == 2)
1053 break;
1054 tb1 = tb1->jmp_next[n1];
1055 }
1056 /* we are now sure now that tb jumps to tb1 */
1057 tb_next = tb1;
1058
1059 /* remove tb from the jmp_first list */
1060 ptb = &tb_next->jmp_first;
1061 for(;;) {
1062 tb1 = *ptb;
1063 n1 = (long)tb1 & 3;
1064 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1065 if (n1 == n && tb1 == tb)
1066 break;
1067 ptb = &tb1->jmp_next[n1];
1068 }
1069 *ptb = tb->jmp_next[n];
1070 tb->jmp_next[n] = NULL;
1071
1072 /* suppress the jump to next tb in generated code */
1073 tb_reset_jump(tb, n);
1074
1075 /* suppress jumps in the tb on which we could have jumped */
1076 tb_reset_jump_recursive(tb_next);
1077 }
1078 }
1079
1080 static void tb_reset_jump_recursive(TranslationBlock *tb)
1081 {
1082 tb_reset_jump_recursive2(tb, 0);
1083 tb_reset_jump_recursive2(tb, 1);
1084 }
1085
1086 #if defined(TARGET_HAS_ICE)
1087 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1088 {
1089 target_phys_addr_t addr;
1090 target_ulong pd;
1091 ram_addr_t ram_addr;
1092 PhysPageDesc *p;
1093
1094 addr = cpu_get_phys_page_debug(env, pc);
1095 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1096 if (!p) {
1097 pd = IO_MEM_UNASSIGNED;
1098 } else {
1099 pd = p->phys_offset;
1100 }
1101 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1102 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1103 }
1104 #endif
1105
1106 /* Add a watchpoint. */
1107 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1108 {
1109 int i;
1110
1111 for (i = 0; i < env->nb_watchpoints; i++) {
1112 if (addr == env->watchpoint[i].vaddr)
1113 return 0;
1114 }
1115 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1116 return -1;
1117
1118 i = env->nb_watchpoints++;
1119 env->watchpoint[i].vaddr = addr;
1120 tlb_flush_page(env, addr);
1121 /* FIXME: This flush is needed because of the hack to make memory ops
1122 terminate the TB. It can be removed once the proper IO trap and
1123 re-execute bits are in. */
1124 tb_flush(env);
1125 return i;
1126 }
1127
1128 /* Remove a watchpoint. */
1129 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1130 {
1131 int i;
1132
1133 for (i = 0; i < env->nb_watchpoints; i++) {
1134 if (addr == env->watchpoint[i].vaddr) {
1135 env->nb_watchpoints--;
1136 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1137 tlb_flush_page(env, addr);
1138 return 0;
1139 }
1140 }
1141 return -1;
1142 }
1143
1144 /* Remove all watchpoints. */
1145 void cpu_watchpoint_remove_all(CPUState *env) {
1146 int i;
1147
1148 for (i = 0; i < env->nb_watchpoints; i++) {
1149 tlb_flush_page(env, env->watchpoint[i].vaddr);
1150 }
1151 env->nb_watchpoints = 0;
1152 }
1153
1154 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1155 breakpoint is reached */
1156 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1157 {
1158 #if defined(TARGET_HAS_ICE)
1159 int i;
1160
1161 for(i = 0; i < env->nb_breakpoints; i++) {
1162 if (env->breakpoints[i] == pc)
1163 return 0;
1164 }
1165
1166 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1167 return -1;
1168 env->breakpoints[env->nb_breakpoints++] = pc;
1169
1170 breakpoint_invalidate(env, pc);
1171 return 0;
1172 #else
1173 return -1;
1174 #endif
1175 }
1176
1177 /* remove all breakpoints */
1178 void cpu_breakpoint_remove_all(CPUState *env) {
1179 #if defined(TARGET_HAS_ICE)
1180 int i;
1181 for(i = 0; i < env->nb_breakpoints; i++) {
1182 breakpoint_invalidate(env, env->breakpoints[i]);
1183 }
1184 env->nb_breakpoints = 0;
1185 #endif
1186 }
1187
1188 /* remove a breakpoint */
1189 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1190 {
1191 #if defined(TARGET_HAS_ICE)
1192 int i;
1193 for(i = 0; i < env->nb_breakpoints; i++) {
1194 if (env->breakpoints[i] == pc)
1195 goto found;
1196 }
1197 return -1;
1198 found:
1199 env->nb_breakpoints--;
1200 if (i < env->nb_breakpoints)
1201 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1202
1203 breakpoint_invalidate(env, pc);
1204 return 0;
1205 #else
1206 return -1;
1207 #endif
1208 }
1209
1210 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1211 CPU loop after each instruction */
1212 void cpu_single_step(CPUState *env, int enabled)
1213 {
1214 #if defined(TARGET_HAS_ICE)
1215 if (env->singlestep_enabled != enabled) {
1216 env->singlestep_enabled = enabled;
1217 /* must flush all the translated code to avoid inconsistancies */
1218 /* XXX: only flush what is necessary */
1219 tb_flush(env);
1220 }
1221 #endif
1222 }
1223
1224 /* enable or disable low levels log */
1225 void cpu_set_log(int log_flags)
1226 {
1227 loglevel = log_flags;
1228 if (loglevel && !logfile) {
1229 logfile = fopen(logfilename, log_append ? "a" : "w");
1230 if (!logfile) {
1231 perror(logfilename);
1232 _exit(1);
1233 }
1234 #if !defined(CONFIG_SOFTMMU)
1235 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1236 {
1237 static uint8_t logfile_buf[4096];
1238 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1239 }
1240 #else
1241 setvbuf(logfile, NULL, _IOLBF, 0);
1242 #endif
1243 log_append = 1;
1244 }
1245 if (!loglevel && logfile) {
1246 fclose(logfile);
1247 logfile = NULL;
1248 }
1249 }
1250
1251 void cpu_set_log_filename(const char *filename)
1252 {
1253 logfilename = strdup(filename);
1254 if (logfile) {
1255 fclose(logfile);
1256 logfile = NULL;
1257 }
1258 cpu_set_log(loglevel);
1259 }
1260
1261 /* mask must never be zero, except for A20 change call */
1262 void cpu_interrupt(CPUState *env, int mask)
1263 {
1264 TranslationBlock *tb;
1265 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1266
1267 env->interrupt_request |= mask;
1268 /* if the cpu is currently executing code, we must unlink it and
1269 all the potentially executing TB */
1270 tb = env->current_tb;
1271 if (tb && !testandset(&interrupt_lock)) {
1272 env->current_tb = NULL;
1273 tb_reset_jump_recursive(tb);
1274 resetlock(&interrupt_lock);
1275 }
1276 }
1277
1278 void cpu_reset_interrupt(CPUState *env, int mask)
1279 {
1280 env->interrupt_request &= ~mask;
1281 }
1282
1283 CPULogItem cpu_log_items[] = {
1284 { CPU_LOG_TB_OUT_ASM, "out_asm",
1285 "show generated host assembly code for each compiled TB" },
1286 { CPU_LOG_TB_IN_ASM, "in_asm",
1287 "show target assembly code for each compiled TB" },
1288 { CPU_LOG_TB_OP, "op",
1289 "show micro ops for each compiled TB" },
1290 { CPU_LOG_TB_OP_OPT, "op_opt",
1291 "show micro ops "
1292 #ifdef TARGET_I386
1293 "before eflags optimization and "
1294 #endif
1295 "after liveness analysis" },
1296 { CPU_LOG_INT, "int",
1297 "show interrupts/exceptions in short format" },
1298 { CPU_LOG_EXEC, "exec",
1299 "show trace before each executed TB (lots of logs)" },
1300 { CPU_LOG_TB_CPU, "cpu",
1301 "show CPU state before block translation" },
1302 #ifdef TARGET_I386
1303 { CPU_LOG_PCALL, "pcall",
1304 "show protected mode far calls/returns/exceptions" },
1305 #endif
1306 #ifdef DEBUG_IOPORT
1307 { CPU_LOG_IOPORT, "ioport",
1308 "show all i/o ports accesses" },
1309 #endif
1310 { 0, NULL, NULL },
1311 };
1312
1313 static int cmp1(const char *s1, int n, const char *s2)
1314 {
1315 if (strlen(s2) != n)
1316 return 0;
1317 return memcmp(s1, s2, n) == 0;
1318 }
1319
1320 /* takes a comma separated list of log masks. Return 0 if error. */
1321 int cpu_str_to_log_mask(const char *str)
1322 {
1323 CPULogItem *item;
1324 int mask;
1325 const char *p, *p1;
1326
1327 p = str;
1328 mask = 0;
1329 for(;;) {
1330 p1 = strchr(p, ',');
1331 if (!p1)
1332 p1 = p + strlen(p);
1333 if(cmp1(p,p1-p,"all")) {
1334 for(item = cpu_log_items; item->mask != 0; item++) {
1335 mask |= item->mask;
1336 }
1337 } else {
1338 for(item = cpu_log_items; item->mask != 0; item++) {
1339 if (cmp1(p, p1 - p, item->name))
1340 goto found;
1341 }
1342 return 0;
1343 }
1344 found:
1345 mask |= item->mask;
1346 if (*p1 != ',')
1347 break;
1348 p = p1 + 1;
1349 }
1350 return mask;
1351 }
1352
1353 void cpu_abort(CPUState *env, const char *fmt, ...)
1354 {
1355 va_list ap;
1356 va_list ap2;
1357
1358 va_start(ap, fmt);
1359 va_copy(ap2, ap);
1360 fprintf(stderr, "qemu: fatal: ");
1361 vfprintf(stderr, fmt, ap);
1362 fprintf(stderr, "\n");
1363 #ifdef TARGET_I386
1364 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1365 #else
1366 cpu_dump_state(env, stderr, fprintf, 0);
1367 #endif
1368 if (logfile) {
1369 fprintf(logfile, "qemu: fatal: ");
1370 vfprintf(logfile, fmt, ap2);
1371 fprintf(logfile, "\n");
1372 #ifdef TARGET_I386
1373 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1374 #else
1375 cpu_dump_state(env, logfile, fprintf, 0);
1376 #endif
1377 fflush(logfile);
1378 fclose(logfile);
1379 }
1380 va_end(ap2);
1381 va_end(ap);
1382 abort();
1383 }
1384
1385 CPUState *cpu_copy(CPUState *env)
1386 {
1387 CPUState *new_env = cpu_init(env->cpu_model_str);
1388 /* preserve chaining and index */
1389 CPUState *next_cpu = new_env->next_cpu;
1390 int cpu_index = new_env->cpu_index;
1391 memcpy(new_env, env, sizeof(CPUState));
1392 new_env->next_cpu = next_cpu;
1393 new_env->cpu_index = cpu_index;
1394 return new_env;
1395 }
1396
1397 #if !defined(CONFIG_USER_ONLY)
1398
1399 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1400 {
1401 unsigned int i;
1402
1403 /* Discard jump cache entries for any tb which might potentially
1404 overlap the flushed page. */
1405 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1406 memset (&env->tb_jmp_cache[i], 0,
1407 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1408
1409 i = tb_jmp_cache_hash_page(addr);
1410 memset (&env->tb_jmp_cache[i], 0,
1411 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1412 }
1413
1414 /* NOTE: if flush_global is true, also flush global entries (not
1415 implemented yet) */
1416 void tlb_flush(CPUState *env, int flush_global)
1417 {
1418 int i;
1419
1420 #if defined(DEBUG_TLB)
1421 printf("tlb_flush:\n");
1422 #endif
1423 /* must reset current TB so that interrupts cannot modify the
1424 links while we are modifying them */
1425 env->current_tb = NULL;
1426
1427 for(i = 0; i < CPU_TLB_SIZE; i++) {
1428 env->tlb_table[0][i].addr_read = -1;
1429 env->tlb_table[0][i].addr_write = -1;
1430 env->tlb_table[0][i].addr_code = -1;
1431 env->tlb_table[1][i].addr_read = -1;
1432 env->tlb_table[1][i].addr_write = -1;
1433 env->tlb_table[1][i].addr_code = -1;
1434 #if (NB_MMU_MODES >= 3)
1435 env->tlb_table[2][i].addr_read = -1;
1436 env->tlb_table[2][i].addr_write = -1;
1437 env->tlb_table[2][i].addr_code = -1;
1438 #if (NB_MMU_MODES == 4)
1439 env->tlb_table[3][i].addr_read = -1;
1440 env->tlb_table[3][i].addr_write = -1;
1441 env->tlb_table[3][i].addr_code = -1;
1442 #endif
1443 #endif
1444 }
1445
1446 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1447
1448 #if !defined(CONFIG_SOFTMMU)
1449 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1450 #endif
1451 #ifdef USE_KQEMU
1452 if (env->kqemu_enabled) {
1453 kqemu_flush(env, flush_global);
1454 }
1455 #endif
1456 tlb_flush_count++;
1457 }
1458
1459 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1460 {
1461 if (addr == (tlb_entry->addr_read &
1462 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1463 addr == (tlb_entry->addr_write &
1464 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1465 addr == (tlb_entry->addr_code &
1466 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1467 tlb_entry->addr_read = -1;
1468 tlb_entry->addr_write = -1;
1469 tlb_entry->addr_code = -1;
1470 }
1471 }
1472
1473 void tlb_flush_page(CPUState *env, target_ulong addr)
1474 {
1475 int i;
1476
1477 #if defined(DEBUG_TLB)
1478 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1479 #endif
1480 /* must reset current TB so that interrupts cannot modify the
1481 links while we are modifying them */
1482 env->current_tb = NULL;
1483
1484 addr &= TARGET_PAGE_MASK;
1485 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1486 tlb_flush_entry(&env->tlb_table[0][i], addr);
1487 tlb_flush_entry(&env->tlb_table[1][i], addr);
1488 #if (NB_MMU_MODES >= 3)
1489 tlb_flush_entry(&env->tlb_table[2][i], addr);
1490 #if (NB_MMU_MODES == 4)
1491 tlb_flush_entry(&env->tlb_table[3][i], addr);
1492 #endif
1493 #endif
1494
1495 tlb_flush_jmp_cache(env, addr);
1496
1497 #if !defined(CONFIG_SOFTMMU)
1498 if (addr < MMAP_AREA_END)
1499 munmap((void *)addr, TARGET_PAGE_SIZE);
1500 #endif
1501 #ifdef USE_KQEMU
1502 if (env->kqemu_enabled) {
1503 kqemu_flush_page(env, addr);
1504 }
1505 #endif
1506 }
1507
1508 /* update the TLBs so that writes to code in the virtual page 'addr'
1509 can be detected */
1510 static void tlb_protect_code(ram_addr_t ram_addr)
1511 {
1512 cpu_physical_memory_reset_dirty(ram_addr,
1513 ram_addr + TARGET_PAGE_SIZE,
1514 CODE_DIRTY_FLAG);
1515 }
1516
1517 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1518 tested for self modifying code */
1519 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1520 target_ulong vaddr)
1521 {
1522 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1523 }
1524
1525 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1526 unsigned long start, unsigned long length)
1527 {
1528 unsigned long addr;
1529 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1530 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1531 if ((addr - start) < length) {
1532 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1533 }
1534 }
1535 }
1536
1537 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1538 int dirty_flags)
1539 {
1540 CPUState *env;
1541 unsigned long length, start1;
1542 int i, mask, len;
1543 uint8_t *p;
1544
1545 start &= TARGET_PAGE_MASK;
1546 end = TARGET_PAGE_ALIGN(end);
1547
1548 length = end - start;
1549 if (length == 0)
1550 return;
1551 len = length >> TARGET_PAGE_BITS;
1552 #ifdef USE_KQEMU
1553 /* XXX: should not depend on cpu context */
1554 env = first_cpu;
1555 if (env->kqemu_enabled) {
1556 ram_addr_t addr;
1557 addr = start;
1558 for(i = 0; i < len; i++) {
1559 kqemu_set_notdirty(env, addr);
1560 addr += TARGET_PAGE_SIZE;
1561 }
1562 }
1563 #endif
1564 mask = ~dirty_flags;
1565 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1566 for(i = 0; i < len; i++)
1567 p[i] &= mask;
1568
1569 /* we modify the TLB cache so that the dirty bit will be set again
1570 when accessing the range */
1571 start1 = start + (unsigned long)phys_ram_base;
1572 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1573 for(i = 0; i < CPU_TLB_SIZE; i++)
1574 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1575 for(i = 0; i < CPU_TLB_SIZE; i++)
1576 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1577 #if (NB_MMU_MODES >= 3)
1578 for(i = 0; i < CPU_TLB_SIZE; i++)
1579 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1580 #if (NB_MMU_MODES == 4)
1581 for(i = 0; i < CPU_TLB_SIZE; i++)
1582 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1583 #endif
1584 #endif
1585 }
1586
1587 #if !defined(CONFIG_SOFTMMU)
1588 /* XXX: this is expensive */
1589 {
1590 VirtPageDesc *p;
1591 int j;
1592 target_ulong addr;
1593
1594 for(i = 0; i < L1_SIZE; i++) {
1595 p = l1_virt_map[i];
1596 if (p) {
1597 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1598 for(j = 0; j < L2_SIZE; j++) {
1599 if (p->valid_tag == virt_valid_tag &&
1600 p->phys_addr >= start && p->phys_addr < end &&
1601 (p->prot & PROT_WRITE)) {
1602 if (addr < MMAP_AREA_END) {
1603 mprotect((void *)addr, TARGET_PAGE_SIZE,
1604 p->prot & ~PROT_WRITE);
1605 }
1606 }
1607 addr += TARGET_PAGE_SIZE;
1608 p++;
1609 }
1610 }
1611 }
1612 }
1613 #endif
1614 }
1615
1616 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1617 {
1618 ram_addr_t ram_addr;
1619
1620 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1621 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1622 tlb_entry->addend - (unsigned long)phys_ram_base;
1623 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1624 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1625 }
1626 }
1627 }
1628
1629 /* update the TLB according to the current state of the dirty bits */
1630 void cpu_tlb_update_dirty(CPUState *env)
1631 {
1632 int i;
1633 for(i = 0; i < CPU_TLB_SIZE; i++)
1634 tlb_update_dirty(&env->tlb_table[0][i]);
1635 for(i = 0; i < CPU_TLB_SIZE; i++)
1636 tlb_update_dirty(&env->tlb_table[1][i]);
1637 #if (NB_MMU_MODES >= 3)
1638 for(i = 0; i < CPU_TLB_SIZE; i++)
1639 tlb_update_dirty(&env->tlb_table[2][i]);
1640 #if (NB_MMU_MODES == 4)
1641 for(i = 0; i < CPU_TLB_SIZE; i++)
1642 tlb_update_dirty(&env->tlb_table[3][i]);
1643 #endif
1644 #endif
1645 }
1646
1647 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1648 unsigned long start)
1649 {
1650 unsigned long addr;
1651 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1652 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1653 if (addr == start) {
1654 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1655 }
1656 }
1657 }
1658
1659 /* update the TLB corresponding to virtual page vaddr and phys addr
1660 addr so that it is no longer dirty */
1661 static inline void tlb_set_dirty(CPUState *env,
1662 unsigned long addr, target_ulong vaddr)
1663 {
1664 int i;
1665
1666 addr &= TARGET_PAGE_MASK;
1667 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1668 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1669 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1670 #if (NB_MMU_MODES >= 3)
1671 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1672 #if (NB_MMU_MODES == 4)
1673 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1674 #endif
1675 #endif
1676 }
1677
1678 /* add a new TLB entry. At most one entry for a given virtual address
1679 is permitted. Return 0 if OK or 2 if the page could not be mapped
1680 (can only happen in non SOFTMMU mode for I/O pages or pages
1681 conflicting with the host address space). */
1682 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1683 target_phys_addr_t paddr, int prot,
1684 int mmu_idx, int is_softmmu)
1685 {
1686 PhysPageDesc *p;
1687 unsigned long pd;
1688 unsigned int index;
1689 target_ulong address;
1690 target_phys_addr_t addend;
1691 int ret;
1692 CPUTLBEntry *te;
1693 int i;
1694
1695 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1696 if (!p) {
1697 pd = IO_MEM_UNASSIGNED;
1698 } else {
1699 pd = p->phys_offset;
1700 }
1701 #if defined(DEBUG_TLB)
1702 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1703 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1704 #endif
1705
1706 ret = 0;
1707 #if !defined(CONFIG_SOFTMMU)
1708 if (is_softmmu)
1709 #endif
1710 {
1711 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1712 /* IO memory case */
1713 address = vaddr | pd;
1714 addend = paddr;
1715 } else {
1716 /* standard memory */
1717 address = vaddr;
1718 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1719 }
1720
1721 /* Make accesses to pages with watchpoints go via the
1722 watchpoint trap routines. */
1723 for (i = 0; i < env->nb_watchpoints; i++) {
1724 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1725 if (address & ~TARGET_PAGE_MASK) {
1726 env->watchpoint[i].addend = 0;
1727 address = vaddr | io_mem_watch;
1728 } else {
1729 env->watchpoint[i].addend = pd - paddr +
1730 (unsigned long) phys_ram_base;
1731 /* TODO: Figure out how to make read watchpoints coexist
1732 with code. */
1733 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1734 }
1735 }
1736 }
1737
1738 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1739 addend -= vaddr;
1740 te = &env->tlb_table[mmu_idx][index];
1741 te->addend = addend;
1742 if (prot & PAGE_READ) {
1743 te->addr_read = address;
1744 } else {
1745 te->addr_read = -1;
1746 }
1747
1748 if (prot & PAGE_EXEC) {
1749 te->addr_code = address;
1750 } else {
1751 te->addr_code = -1;
1752 }
1753 if (prot & PAGE_WRITE) {
1754 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1755 (pd & IO_MEM_ROMD)) {
1756 /* write access calls the I/O callback */
1757 te->addr_write = vaddr |
1758 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1759 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1760 !cpu_physical_memory_is_dirty(pd)) {
1761 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1762 } else {
1763 te->addr_write = address;
1764 }
1765 } else {
1766 te->addr_write = -1;
1767 }
1768 }
1769 #if !defined(CONFIG_SOFTMMU)
1770 else {
1771 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1772 /* IO access: no mapping is done as it will be handled by the
1773 soft MMU */
1774 if (!(env->hflags & HF_SOFTMMU_MASK))
1775 ret = 2;
1776 } else {
1777 void *map_addr;
1778
1779 if (vaddr >= MMAP_AREA_END) {
1780 ret = 2;
1781 } else {
1782 if (prot & PROT_WRITE) {
1783 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1784 #if defined(TARGET_HAS_SMC) || 1
1785 first_tb ||
1786 #endif
1787 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1788 !cpu_physical_memory_is_dirty(pd))) {
1789 /* ROM: we do as if code was inside */
1790 /* if code is present, we only map as read only and save the
1791 original mapping */
1792 VirtPageDesc *vp;
1793
1794 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1795 vp->phys_addr = pd;
1796 vp->prot = prot;
1797 vp->valid_tag = virt_valid_tag;
1798 prot &= ~PAGE_WRITE;
1799 }
1800 }
1801 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1802 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1803 if (map_addr == MAP_FAILED) {
1804 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1805 paddr, vaddr);
1806 }
1807 }
1808 }
1809 }
1810 #endif
1811 return ret;
1812 }
1813
1814 /* called from signal handler: invalidate the code and unprotect the
1815 page. Return TRUE if the fault was succesfully handled. */
1816 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1817 {
1818 #if !defined(CONFIG_SOFTMMU)
1819 VirtPageDesc *vp;
1820
1821 #if defined(DEBUG_TLB)
1822 printf("page_unprotect: addr=0x%08x\n", addr);
1823 #endif
1824 addr &= TARGET_PAGE_MASK;
1825
1826 /* if it is not mapped, no need to worry here */
1827 if (addr >= MMAP_AREA_END)
1828 return 0;
1829 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1830 if (!vp)
1831 return 0;
1832 /* NOTE: in this case, validate_tag is _not_ tested as it
1833 validates only the code TLB */
1834 if (vp->valid_tag != virt_valid_tag)
1835 return 0;
1836 if (!(vp->prot & PAGE_WRITE))
1837 return 0;
1838 #if defined(DEBUG_TLB)
1839 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1840 addr, vp->phys_addr, vp->prot);
1841 #endif
1842 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1843 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1844 (unsigned long)addr, vp->prot);
1845 /* set the dirty bit */
1846 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1847 /* flush the code inside */
1848 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1849 return 1;
1850 #else
1851 return 0;
1852 #endif
1853 }
1854
1855 #else
1856
1857 void tlb_flush(CPUState *env, int flush_global)
1858 {
1859 }
1860
1861 void tlb_flush_page(CPUState *env, target_ulong addr)
1862 {
1863 }
1864
1865 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1866 target_phys_addr_t paddr, int prot,
1867 int mmu_idx, int is_softmmu)
1868 {
1869 return 0;
1870 }
1871
1872 /* dump memory mappings */
1873 void page_dump(FILE *f)
1874 {
1875 unsigned long start, end;
1876 int i, j, prot, prot1;
1877 PageDesc *p;
1878
1879 fprintf(f, "%-8s %-8s %-8s %s\n",
1880 "start", "end", "size", "prot");
1881 start = -1;
1882 end = -1;
1883 prot = 0;
1884 for(i = 0; i <= L1_SIZE; i++) {
1885 if (i < L1_SIZE)
1886 p = l1_map[i];
1887 else
1888 p = NULL;
1889 for(j = 0;j < L2_SIZE; j++) {
1890 if (!p)
1891 prot1 = 0;
1892 else
1893 prot1 = p[j].flags;
1894 if (prot1 != prot) {
1895 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1896 if (start != -1) {
1897 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1898 start, end, end - start,
1899 prot & PAGE_READ ? 'r' : '-',
1900 prot & PAGE_WRITE ? 'w' : '-',
1901 prot & PAGE_EXEC ? 'x' : '-');
1902 }
1903 if (prot1 != 0)
1904 start = end;
1905 else
1906 start = -1;
1907 prot = prot1;
1908 }
1909 if (!p)
1910 break;
1911 }
1912 }
1913 }
1914
1915 int page_get_flags(target_ulong address)
1916 {
1917 PageDesc *p;
1918
1919 p = page_find(address >> TARGET_PAGE_BITS);
1920 if (!p)
1921 return 0;
1922 return p->flags;
1923 }
1924
1925 /* modify the flags of a page and invalidate the code if
1926 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1927 depending on PAGE_WRITE */
1928 void page_set_flags(target_ulong start, target_ulong end, int flags)
1929 {
1930 PageDesc *p;
1931 target_ulong addr;
1932
1933 start = start & TARGET_PAGE_MASK;
1934 end = TARGET_PAGE_ALIGN(end);
1935 if (flags & PAGE_WRITE)
1936 flags |= PAGE_WRITE_ORG;
1937 spin_lock(&tb_lock);
1938 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1939 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1940 /* if the write protection is set, then we invalidate the code
1941 inside */
1942 if (!(p->flags & PAGE_WRITE) &&
1943 (flags & PAGE_WRITE) &&
1944 p->first_tb) {
1945 tb_invalidate_phys_page(addr, 0, NULL);
1946 }
1947 p->flags = flags;
1948 }
1949 spin_unlock(&tb_lock);
1950 }
1951
1952 int page_check_range(target_ulong start, target_ulong len, int flags)
1953 {
1954 PageDesc *p;
1955 target_ulong end;
1956 target_ulong addr;
1957
1958 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1959 start = start & TARGET_PAGE_MASK;
1960
1961 if( end < start )
1962 /* we've wrapped around */
1963 return -1;
1964 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1965 p = page_find(addr >> TARGET_PAGE_BITS);
1966 if( !p )
1967 return -1;
1968 if( !(p->flags & PAGE_VALID) )
1969 return -1;
1970
1971 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1972 return -1;
1973 if (flags & PAGE_WRITE) {
1974 if (!(p->flags & PAGE_WRITE_ORG))
1975 return -1;
1976 /* unprotect the page if it was put read-only because it
1977 contains translated code */
1978 if (!(p->flags & PAGE_WRITE)) {
1979 if (!page_unprotect(addr, 0, NULL))
1980 return -1;
1981 }
1982 return 0;
1983 }
1984 }
1985 return 0;
1986 }
1987
1988 /* called from signal handler: invalidate the code and unprotect the
1989 page. Return TRUE if the fault was succesfully handled. */
1990 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1991 {
1992 unsigned int page_index, prot, pindex;
1993 PageDesc *p, *p1;
1994 target_ulong host_start, host_end, addr;
1995
1996 host_start = address & qemu_host_page_mask;
1997 page_index = host_start >> TARGET_PAGE_BITS;
1998 p1 = page_find(page_index);
1999 if (!p1)
2000 return 0;
2001 host_end = host_start + qemu_host_page_size;
2002 p = p1;
2003 prot = 0;
2004 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2005 prot |= p->flags;
2006 p++;
2007 }
2008 /* if the page was really writable, then we change its
2009 protection back to writable */
2010 if (prot & PAGE_WRITE_ORG) {
2011 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2012 if (!(p1[pindex].flags & PAGE_WRITE)) {
2013 mprotect((void *)g2h(host_start), qemu_host_page_size,
2014 (prot & PAGE_BITS) | PAGE_WRITE);
2015 p1[pindex].flags |= PAGE_WRITE;
2016 /* and since the content will be modified, we must invalidate
2017 the corresponding translated code. */
2018 tb_invalidate_phys_page(address, pc, puc);
2019 #ifdef DEBUG_TB_CHECK
2020 tb_invalidate_check(address);
2021 #endif
2022 return 1;
2023 }
2024 }
2025 return 0;
2026 }
2027
2028 static inline void tlb_set_dirty(CPUState *env,
2029 unsigned long addr, target_ulong vaddr)
2030 {
2031 }
2032 #endif /* defined(CONFIG_USER_ONLY) */
2033
2034 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2035 ram_addr_t memory);
2036 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2037 ram_addr_t orig_memory);
2038 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2039 need_subpage) \
2040 do { \
2041 if (addr > start_addr) \
2042 start_addr2 = 0; \
2043 else { \
2044 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2045 if (start_addr2 > 0) \
2046 need_subpage = 1; \
2047 } \
2048 \
2049 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2050 end_addr2 = TARGET_PAGE_SIZE - 1; \
2051 else { \
2052 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2053 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2054 need_subpage = 1; \
2055 } \
2056 } while (0)
2057
2058 /* register physical memory. 'size' must be a multiple of the target
2059 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2060 io memory page */
2061 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2062 ram_addr_t size,
2063 ram_addr_t phys_offset)
2064 {
2065 target_phys_addr_t addr, end_addr;
2066 PhysPageDesc *p;
2067 CPUState *env;
2068 ram_addr_t orig_size = size;
2069 void *subpage;
2070
2071 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2072 end_addr = start_addr + (target_phys_addr_t)size;
2073 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2074 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2075 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2076 ram_addr_t orig_memory = p->phys_offset;
2077 target_phys_addr_t start_addr2, end_addr2;
2078 int need_subpage = 0;
2079
2080 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2081 need_subpage);
2082 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2083 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2084 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2085 &p->phys_offset, orig_memory);
2086 } else {
2087 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2088 >> IO_MEM_SHIFT];
2089 }
2090 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2091 } else {
2092 p->phys_offset = phys_offset;
2093 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2094 (phys_offset & IO_MEM_ROMD))
2095 phys_offset += TARGET_PAGE_SIZE;
2096 }
2097 } else {
2098 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2099 p->phys_offset = phys_offset;
2100 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2101 (phys_offset & IO_MEM_ROMD))
2102 phys_offset += TARGET_PAGE_SIZE;
2103 else {
2104 target_phys_addr_t start_addr2, end_addr2;
2105 int need_subpage = 0;
2106
2107 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2108 end_addr2, need_subpage);
2109
2110 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2111 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2112 &p->phys_offset, IO_MEM_UNASSIGNED);
2113 subpage_register(subpage, start_addr2, end_addr2,
2114 phys_offset);
2115 }
2116 }
2117 }
2118 }
2119
2120 /* since each CPU stores ram addresses in its TLB cache, we must
2121 reset the modified entries */
2122 /* XXX: slow ! */
2123 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2124 tlb_flush(env, 1);
2125 }
2126 }
2127
2128 /* XXX: temporary until new memory mapping API */
2129 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2130 {
2131 PhysPageDesc *p;
2132
2133 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2134 if (!p)
2135 return IO_MEM_UNASSIGNED;
2136 return p->phys_offset;
2137 }
2138
2139 /* XXX: better than nothing */
2140 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2141 {
2142 ram_addr_t addr;
2143 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2144 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2145 (uint64_t)size, (uint64_t)phys_ram_size);
2146 abort();
2147 }
2148 addr = phys_ram_alloc_offset;
2149 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2150 return addr;
2151 }
2152
2153 void qemu_ram_free(ram_addr_t addr)
2154 {
2155 }
2156
2157 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2158 {
2159 #ifdef DEBUG_UNASSIGNED
2160 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2161 #endif
2162 #ifdef TARGET_SPARC
2163 do_unassigned_access(addr, 0, 0, 0);
2164 #elif TARGET_CRIS
2165 do_unassigned_access(addr, 0, 0, 0);
2166 #endif
2167 return 0;
2168 }
2169
2170 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2171 {
2172 #ifdef DEBUG_UNASSIGNED
2173 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2174 #endif
2175 #ifdef TARGET_SPARC
2176 do_unassigned_access(addr, 1, 0, 0);
2177 #elif TARGET_CRIS
2178 do_unassigned_access(addr, 1, 0, 0);
2179 #endif
2180 }
2181
2182 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2183 unassigned_mem_readb,
2184 unassigned_mem_readb,
2185 unassigned_mem_readb,
2186 };
2187
2188 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2189 unassigned_mem_writeb,
2190 unassigned_mem_writeb,
2191 unassigned_mem_writeb,
2192 };
2193
2194 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2195 {
2196 unsigned long ram_addr;
2197 int dirty_flags;
2198 ram_addr = addr - (unsigned long)phys_ram_base;
2199 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2200 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2201 #if !defined(CONFIG_USER_ONLY)
2202 tb_invalidate_phys_page_fast(ram_addr, 1);
2203 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2204 #endif
2205 }
2206 stb_p((uint8_t *)(long)addr, val);
2207 #ifdef USE_KQEMU
2208 if (cpu_single_env->kqemu_enabled &&
2209 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2210 kqemu_modify_page(cpu_single_env, ram_addr);
2211 #endif
2212 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2213 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2214 /* we remove the notdirty callback only if the code has been
2215 flushed */
2216 if (dirty_flags == 0xff)
2217 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2218 }
2219
2220 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2221 {
2222 unsigned long ram_addr;
2223 int dirty_flags;
2224 ram_addr = addr - (unsigned long)phys_ram_base;
2225 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2226 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2227 #if !defined(CONFIG_USER_ONLY)
2228 tb_invalidate_phys_page_fast(ram_addr, 2);
2229 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2230 #endif
2231 }
2232 stw_p((uint8_t *)(long)addr, val);
2233 #ifdef USE_KQEMU
2234 if (cpu_single_env->kqemu_enabled &&
2235 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2236 kqemu_modify_page(cpu_single_env, ram_addr);
2237 #endif
2238 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2239 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2240 /* we remove the notdirty callback only if the code has been
2241 flushed */
2242 if (dirty_flags == 0xff)
2243 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2244 }
2245
2246 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2247 {
2248 unsigned long ram_addr;
2249 int dirty_flags;
2250 ram_addr = addr - (unsigned long)phys_ram_base;
2251 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2252 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2253 #if !defined(CONFIG_USER_ONLY)
2254 tb_invalidate_phys_page_fast(ram_addr, 4);
2255 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2256 #endif
2257 }
2258 stl_p((uint8_t *)(long)addr, val);
2259 #ifdef USE_KQEMU
2260 if (cpu_single_env->kqemu_enabled &&
2261 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2262 kqemu_modify_page(cpu_single_env, ram_addr);
2263 #endif
2264 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2265 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2266 /* we remove the notdirty callback only if the code has been
2267 flushed */
2268 if (dirty_flags == 0xff)
2269 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2270 }
2271
2272 static CPUReadMemoryFunc *error_mem_read[3] = {
2273 NULL, /* never used */
2274 NULL, /* never used */
2275 NULL, /* never used */
2276 };
2277
2278 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2279 notdirty_mem_writeb,
2280 notdirty_mem_writew,
2281 notdirty_mem_writel,
2282 };
2283
2284 #if defined(CONFIG_SOFTMMU)
2285 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2286 so these check for a hit then pass through to the normal out-of-line
2287 phys routines. */
2288 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2289 {
2290 return ldub_phys(addr);
2291 }
2292
2293 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2294 {
2295 return lduw_phys(addr);
2296 }
2297
2298 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2299 {
2300 return ldl_phys(addr);
2301 }
2302
2303 /* Generate a debug exception if a watchpoint has been hit.
2304 Returns the real physical address of the access. addr will be a host
2305 address in case of a RAM location. */
2306 static target_ulong check_watchpoint(target_phys_addr_t addr)
2307 {
2308 CPUState *env = cpu_single_env;
2309 target_ulong watch;
2310 target_ulong retaddr;
2311 int i;
2312
2313 retaddr = addr;
2314 for (i = 0; i < env->nb_watchpoints; i++) {
2315 watch = env->watchpoint[i].vaddr;
2316 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2317 retaddr = addr - env->watchpoint[i].addend;
2318 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2319 cpu_single_env->watchpoint_hit = i + 1;
2320 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2321 break;
2322 }
2323 }
2324 }
2325 return retaddr;
2326 }
2327
2328 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2329 uint32_t val)
2330 {
2331 addr = check_watchpoint(addr);
2332 stb_phys(addr, val);
2333 }
2334
2335 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2336 uint32_t val)
2337 {
2338 addr = check_watchpoint(addr);
2339 stw_phys(addr, val);
2340 }
2341
2342 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2343 uint32_t val)
2344 {
2345 addr = check_watchpoint(addr);
2346 stl_phys(addr, val);
2347 }
2348
2349 static CPUReadMemoryFunc *watch_mem_read[3] = {
2350 watch_mem_readb,
2351 watch_mem_readw,
2352 watch_mem_readl,
2353 };
2354
2355 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2356 watch_mem_writeb,
2357 watch_mem_writew,
2358 watch_mem_writel,
2359 };
2360 #endif
2361
2362 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2363 unsigned int len)
2364 {
2365 uint32_t ret;
2366 unsigned int idx;
2367
2368 idx = SUBPAGE_IDX(addr - mmio->base);
2369 #if defined(DEBUG_SUBPAGE)
2370 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2371 mmio, len, addr, idx);
2372 #endif
2373 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2374
2375 return ret;
2376 }
2377
2378 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2379 uint32_t value, unsigned int len)
2380 {
2381 unsigned int idx;
2382
2383 idx = SUBPAGE_IDX(addr - mmio->base);
2384 #if defined(DEBUG_SUBPAGE)
2385 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2386 mmio, len, addr, idx, value);
2387 #endif
2388 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2389 }
2390
2391 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2392 {
2393 #if defined(DEBUG_SUBPAGE)
2394 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2395 #endif
2396
2397 return subpage_readlen(opaque, addr, 0);
2398 }
2399
2400 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2401 uint32_t value)
2402 {
2403 #if defined(DEBUG_SUBPAGE)
2404 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2405 #endif
2406 subpage_writelen(opaque, addr, value, 0);
2407 }
2408
2409 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2410 {
2411 #if defined(DEBUG_SUBPAGE)
2412 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2413 #endif
2414
2415 return subpage_readlen(opaque, addr, 1);
2416 }
2417
2418 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2419 uint32_t value)
2420 {
2421 #if defined(DEBUG_SUBPAGE)
2422 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2423 #endif
2424 subpage_writelen(opaque, addr, value, 1);
2425 }
2426
2427 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2428 {
2429 #if defined(DEBUG_SUBPAGE)
2430 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2431 #endif
2432
2433 return subpage_readlen(opaque, addr, 2);
2434 }
2435
2436 static void subpage_writel (void *opaque,
2437 target_phys_addr_t addr, uint32_t value)
2438 {
2439 #if defined(DEBUG_SUBPAGE)
2440 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2441 #endif
2442 subpage_writelen(opaque, addr, value, 2);
2443 }
2444
2445 static CPUReadMemoryFunc *subpage_read[] = {
2446 &subpage_readb,
2447 &subpage_readw,
2448 &subpage_readl,
2449 };
2450
2451 static CPUWriteMemoryFunc *subpage_write[] = {
2452 &subpage_writeb,
2453 &subpage_writew,
2454 &subpage_writel,
2455 };
2456
2457 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2458 ram_addr_t memory)
2459 {
2460 int idx, eidx;
2461 unsigned int i;
2462
2463 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2464 return -1;
2465 idx = SUBPAGE_IDX(start);
2466 eidx = SUBPAGE_IDX(end);
2467 #if defined(DEBUG_SUBPAGE)
2468 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2469 mmio, start, end, idx, eidx, memory);
2470 #endif
2471 memory >>= IO_MEM_SHIFT;
2472 for (; idx <= eidx; idx++) {
2473 for (i = 0; i < 4; i++) {
2474 if (io_mem_read[memory][i]) {
2475 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2476 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2477 }
2478 if (io_mem_write[memory][i]) {
2479 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2480 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2481 }
2482 }
2483 }
2484
2485 return 0;
2486 }
2487
2488 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2489 ram_addr_t orig_memory)
2490 {
2491 subpage_t *mmio;
2492 int subpage_memory;
2493
2494 mmio = qemu_mallocz(sizeof(subpage_t));
2495 if (mmio != NULL) {
2496 mmio->base = base;
2497 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2498 #if defined(DEBUG_SUBPAGE)
2499 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2500 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2501 #endif
2502 *phys = subpage_memory | IO_MEM_SUBPAGE;
2503 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2504 }
2505
2506 return mmio;
2507 }
2508
2509 static void io_mem_init(void)
2510 {
2511 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2512 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2513 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2514 io_mem_nb = 5;
2515
2516 #if defined(CONFIG_SOFTMMU)
2517 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2518 watch_mem_write, NULL);
2519 #endif
2520 /* alloc dirty bits array */
2521 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2522 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2523 }
2524
2525 /* mem_read and mem_write are arrays of functions containing the
2526 function to access byte (index 0), word (index 1) and dword (index
2527 2). Functions can be omitted with a NULL function pointer. The
2528 registered functions may be modified dynamically later.
2529 If io_index is non zero, the corresponding io zone is
2530 modified. If it is zero, a new io zone is allocated. The return
2531 value can be used with cpu_register_physical_memory(). (-1) is
2532 returned if error. */
2533 int cpu_register_io_memory(int io_index,
2534 CPUReadMemoryFunc **mem_read,
2535 CPUWriteMemoryFunc **mem_write,
2536 void *opaque)
2537 {
2538 int i, subwidth = 0;
2539
2540 if (io_index <= 0) {
2541 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2542 return -1;
2543 io_index = io_mem_nb++;
2544 } else {
2545 if (io_index >= IO_MEM_NB_ENTRIES)
2546 return -1;
2547 }
2548
2549 for(i = 0;i < 3; i++) {
2550 if (!mem_read[i] || !mem_write[i])
2551 subwidth = IO_MEM_SUBWIDTH;
2552 io_mem_read[io_index][i] = mem_read[i];
2553 io_mem_write[io_index][i] = mem_write[i];
2554 }
2555 io_mem_opaque[io_index] = opaque;
2556 return (io_index << IO_MEM_SHIFT) | subwidth;
2557 }
2558
2559 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2560 {
2561 return io_mem_write[io_index >> IO_MEM_SHIFT];
2562 }
2563
2564 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2565 {
2566 return io_mem_read[io_index >> IO_MEM_SHIFT];
2567 }
2568
2569 /* physical memory access (slow version, mainly for debug) */
2570 #if defined(CONFIG_USER_ONLY)
2571 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2572 int len, int is_write)
2573 {
2574 int l, flags;
2575 target_ulong page;
2576 void * p;
2577
2578 while (len > 0) {
2579 page = addr & TARGET_PAGE_MASK;
2580 l = (page + TARGET_PAGE_SIZE) - addr;
2581 if (l > len)
2582 l = len;
2583 flags = page_get_flags(page);
2584 if (!(flags & PAGE_VALID))
2585 return;
2586 if (is_write) {
2587 if (!(flags & PAGE_WRITE))
2588 return;
2589 /* XXX: this code should not depend on lock_user */
2590 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2591 /* FIXME - should this return an error rather than just fail? */
2592 return;
2593 memcpy(p, buf, l);
2594 unlock_user(p, addr, l);
2595 } else {
2596 if (!(flags & PAGE_READ))
2597 return;
2598 /* XXX: this code should not depend on lock_user */
2599 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2600 /* FIXME - should this return an error rather than just fail? */
2601 return;
2602 memcpy(buf, p, l);
2603 unlock_user(p, addr, 0);
2604 }
2605 len -= l;
2606 buf += l;
2607 addr += l;
2608 }
2609 }
2610
2611 #else
2612 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2613 int len, int is_write)
2614 {
2615 int l, io_index;
2616 uint8_t *ptr;
2617 uint32_t val;
2618 target_phys_addr_t page;
2619 unsigned long pd;
2620 PhysPageDesc *p;
2621
2622 while (len > 0) {
2623 page = addr & TARGET_PAGE_MASK;
2624 l = (page + TARGET_PAGE_SIZE) - addr;
2625 if (l > len)
2626 l = len;
2627 p = phys_page_find(page >> TARGET_PAGE_BITS);
2628 if (!p) {
2629 pd = IO_MEM_UNASSIGNED;
2630 } else {
2631 pd = p->phys_offset;
2632 }
2633
2634 if (is_write) {
2635 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2636 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2637 /* XXX: could force cpu_single_env to NULL to avoid
2638 potential bugs */
2639 if (l >= 4 && ((addr & 3) == 0)) {
2640 /* 32 bit write access */
2641 val = ldl_p(buf);
2642 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2643 l = 4;
2644 } else if (l >= 2 && ((addr & 1) == 0)) {
2645 /* 16 bit write access */
2646 val = lduw_p(buf);
2647 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2648 l = 2;
2649 } else {
2650 /* 8 bit write access */
2651 val = ldub_p(buf);
2652 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2653 l = 1;
2654 }
2655 } else {
2656 unsigned long addr1;
2657 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2658 /* RAM case */
2659 ptr = phys_ram_base + addr1;
2660 memcpy(ptr, buf, l);
2661 if (!cpu_physical_memory_is_dirty(addr1)) {
2662 /* invalidate code */
2663 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2664 /* set dirty bit */
2665 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2666 (0xff & ~CODE_DIRTY_FLAG);
2667 }
2668 }
2669 } else {
2670 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2671 !(pd & IO_MEM_ROMD)) {
2672 /* I/O case */
2673 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2674 if (l >= 4 && ((addr & 3) == 0)) {
2675 /* 32 bit read access */
2676 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2677 stl_p(buf, val);
2678 l = 4;
2679 } else if (l >= 2 && ((addr & 1) == 0)) {
2680 /* 16 bit read access */
2681 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2682 stw_p(buf, val);
2683 l = 2;
2684 } else {
2685 /* 8 bit read access */
2686 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2687 stb_p(buf, val);
2688 l = 1;
2689 }
2690 } else {
2691 /* RAM case */
2692 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2693 (addr & ~TARGET_PAGE_MASK);
2694 memcpy(buf, ptr, l);
2695 }
2696 }
2697 len -= l;
2698 buf += l;
2699 addr += l;
2700 }
2701 }
2702
2703 /* used for ROM loading : can write in RAM and ROM */
2704 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2705 const uint8_t *buf, int len)
2706 {
2707 int l;
2708 uint8_t *ptr;
2709 target_phys_addr_t page;
2710 unsigned long pd;
2711 PhysPageDesc *p;
2712
2713 while (len > 0) {
2714 page = addr & TARGET_PAGE_MASK;
2715 l = (page + TARGET_PAGE_SIZE) - addr;
2716 if (l > len)
2717 l = len;
2718 p = phys_page_find(page >> TARGET_PAGE_BITS);
2719 if (!p) {
2720 pd = IO_MEM_UNASSIGNED;
2721 } else {
2722 pd = p->phys_offset;
2723 }
2724
2725 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2726 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2727 !(pd & IO_MEM_ROMD)) {
2728 /* do nothing */
2729 } else {
2730 unsigned long addr1;
2731 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2732 /* ROM/RAM case */
2733 ptr = phys_ram_base + addr1;
2734 memcpy(ptr, buf, l);
2735 }
2736 len -= l;
2737 buf += l;
2738 addr += l;
2739 }
2740 }
2741
2742
2743 /* warning: addr must be aligned */
2744 uint32_t ldl_phys(target_phys_addr_t addr)
2745 {
2746 int io_index;
2747 uint8_t *ptr;
2748 uint32_t val;
2749 unsigned long pd;
2750 PhysPageDesc *p;
2751
2752 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2753 if (!p) {
2754 pd = IO_MEM_UNASSIGNED;
2755 } else {
2756 pd = p->phys_offset;
2757 }
2758
2759 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2760 !(pd & IO_MEM_ROMD)) {
2761 /* I/O case */
2762 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2763 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2764 } else {
2765 /* RAM case */
2766 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2767 (addr & ~TARGET_PAGE_MASK);
2768 val = ldl_p(ptr);
2769 }
2770 return val;
2771 }
2772
2773 /* warning: addr must be aligned */
2774 uint64_t ldq_phys(target_phys_addr_t addr)
2775 {
2776 int io_index;
2777 uint8_t *ptr;
2778 uint64_t val;
2779 unsigned long pd;
2780 PhysPageDesc *p;
2781
2782 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2783 if (!p) {
2784 pd = IO_MEM_UNASSIGNED;
2785 } else {
2786 pd = p->phys_offset;
2787 }
2788
2789 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2790 !(pd & IO_MEM_ROMD)) {
2791 /* I/O case */
2792 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2793 #ifdef TARGET_WORDS_BIGENDIAN
2794 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2795 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2796 #else
2797 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2798 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2799 #endif
2800 } else {
2801 /* RAM case */
2802 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2803 (addr & ~TARGET_PAGE_MASK);
2804 val = ldq_p(ptr);
2805 }
2806 return val;
2807 }
2808
2809 /* XXX: optimize */
2810 uint32_t ldub_phys(target_phys_addr_t addr)
2811 {
2812 uint8_t val;
2813 cpu_physical_memory_read(addr, &val, 1);
2814 return val;
2815 }
2816
2817 /* XXX: optimize */
2818 uint32_t lduw_phys(target_phys_addr_t addr)
2819 {
2820 uint16_t val;
2821 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2822 return tswap16(val);
2823 }
2824
2825 /* warning: addr must be aligned. The ram page is not masked as dirty
2826 and the code inside is not invalidated. It is useful if the dirty
2827 bits are used to track modified PTEs */
2828 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2829 {
2830 int io_index;
2831 uint8_t *ptr;
2832 unsigned long pd;
2833 PhysPageDesc *p;
2834
2835 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2836 if (!p) {
2837 pd = IO_MEM_UNASSIGNED;
2838 } else {
2839 pd = p->phys_offset;
2840 }
2841
2842 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2843 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2844 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2845 } else {
2846 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2847 (addr & ~TARGET_PAGE_MASK);
2848 stl_p(ptr, val);
2849 }
2850 }
2851
2852 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2853 {
2854 int io_index;
2855 uint8_t *ptr;
2856 unsigned long pd;
2857 PhysPageDesc *p;
2858
2859 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2860 if (!p) {
2861 pd = IO_MEM_UNASSIGNED;
2862 } else {
2863 pd = p->phys_offset;
2864 }
2865
2866 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2867 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2868 #ifdef TARGET_WORDS_BIGENDIAN
2869 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2870 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2871 #else
2872 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2873 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2874 #endif
2875 } else {
2876 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2877 (addr & ~TARGET_PAGE_MASK);
2878 stq_p(ptr, val);
2879 }
2880 }
2881
2882 /* warning: addr must be aligned */
2883 void stl_phys(target_phys_addr_t addr, uint32_t val)
2884 {
2885 int io_index;
2886 uint8_t *ptr;
2887 unsigned long pd;
2888 PhysPageDesc *p;
2889
2890 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2891 if (!p) {
2892 pd = IO_MEM_UNASSIGNED;
2893 } else {
2894 pd = p->phys_offset;
2895 }
2896
2897 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2898 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2899 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2900 } else {
2901 unsigned long addr1;
2902 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2903 /* RAM case */
2904 ptr = phys_ram_base + addr1;
2905 stl_p(ptr, val);
2906 if (!cpu_physical_memory_is_dirty(addr1)) {
2907 /* invalidate code */
2908 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2909 /* set dirty bit */
2910 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2911 (0xff & ~CODE_DIRTY_FLAG);
2912 }
2913 }
2914 }
2915
2916 /* XXX: optimize */
2917 void stb_phys(target_phys_addr_t addr, uint32_t val)
2918 {
2919 uint8_t v = val;
2920 cpu_physical_memory_write(addr, &v, 1);
2921 }
2922
2923 /* XXX: optimize */
2924 void stw_phys(target_phys_addr_t addr, uint32_t val)
2925 {
2926 uint16_t v = tswap16(val);
2927 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2928 }
2929
2930 /* XXX: optimize */
2931 void stq_phys(target_phys_addr_t addr, uint64_t val)
2932 {
2933 val = tswap64(val);
2934 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2935 }
2936
2937 #endif
2938
2939 /* virtual memory access for debug */
2940 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2941 uint8_t *buf, int len, int is_write)
2942 {
2943 int l;
2944 target_phys_addr_t phys_addr;
2945 target_ulong page;
2946
2947 while (len > 0) {
2948 page = addr & TARGET_PAGE_MASK;
2949 phys_addr = cpu_get_phys_page_debug(env, page);
2950 /* if no physical page mapped, return an error */
2951 if (phys_addr == -1)
2952 return -1;
2953 l = (page + TARGET_PAGE_SIZE) - addr;
2954 if (l > len)
2955 l = len;
2956 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2957 buf, l, is_write);
2958 len -= l;
2959 buf += l;
2960 addr += l;
2961 }
2962 return 0;
2963 }
2964
2965 void dump_exec_info(FILE *f,
2966 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2967 {
2968 int i, target_code_size, max_target_code_size;
2969 int direct_jmp_count, direct_jmp2_count, cross_page;
2970 TranslationBlock *tb;
2971
2972 target_code_size = 0;
2973 max_target_code_size = 0;
2974 cross_page = 0;
2975 direct_jmp_count = 0;
2976 direct_jmp2_count = 0;
2977 for(i = 0; i < nb_tbs; i++) {
2978 tb = &tbs[i];
2979 target_code_size += tb->size;
2980 if (tb->size > max_target_code_size)
2981 max_target_code_size = tb->size;
2982 if (tb->page_addr[1] != -1)
2983 cross_page++;
2984 if (tb->tb_next_offset[0] != 0xffff) {
2985 direct_jmp_count++;
2986 if (tb->tb_next_offset[1] != 0xffff) {
2987 direct_jmp2_count++;
2988 }
2989 }
2990 }
2991 /* XXX: avoid using doubles ? */
2992 cpu_fprintf(f, "Translation buffer state:\n");
2993 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2994 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2995 nb_tbs ? target_code_size / nb_tbs : 0,
2996 max_target_code_size);
2997 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2998 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2999 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3000 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3001 cross_page,
3002 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3003 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3004 direct_jmp_count,
3005 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3006 direct_jmp2_count,
3007 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3008 cpu_fprintf(f, "\nStatistics:\n");
3009 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3010 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3011 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3012 tcg_dump_info(f, cpu_fprintf);
3013 }
3014
3015 #if !defined(CONFIG_USER_ONLY)
3016
3017 #define MMUSUFFIX _cmmu
3018 #define GETPC() NULL
3019 #define env cpu_single_env
3020 #define SOFTMMU_CODE_ACCESS
3021
3022 #define SHIFT 0
3023 #include "softmmu_template.h"
3024
3025 #define SHIFT 1
3026 #include "softmmu_template.h"
3027
3028 #define SHIFT 2
3029 #include "softmmu_template.h"
3030
3031 #define SHIFT 3
3032 #include "softmmu_template.h"
3033
3034 #undef env
3035
3036 #endif