]> git.proxmox.com Git - mirror_qemu.git/blob - exec.c
Use correct types to enable > 2G support, based on a patch from
[mirror_qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
35
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #if defined(CONFIG_USER_ONLY)
40 #include <qemu.h>
41 #endif
42
43 //#define DEBUG_TB_INVALIDATE
44 //#define DEBUG_FLUSH
45 //#define DEBUG_TLB
46 //#define DEBUG_UNASSIGNED
47
48 /* make various TB consistency checks */
49 //#define DEBUG_TB_CHECK
50 //#define DEBUG_TLB_CHECK
51
52 //#define DEBUG_IOPORT
53 //#define DEBUG_SUBPAGE
54
55 #if !defined(CONFIG_USER_ONLY)
56 /* TB consistency checks only implemented for usermode emulation. */
57 #undef DEBUG_TB_CHECK
58 #endif
59
60 /* threshold to flush the translated code buffer */
61 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
62
63 #define SMC_BITMAP_USE_THRESHOLD 10
64
65 #define MMAP_AREA_START 0x00000000
66 #define MMAP_AREA_END 0xa8000000
67
68 #if defined(TARGET_SPARC64)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 41
70 #elif defined(TARGET_SPARC)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 36
72 #elif defined(TARGET_ALPHA)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #define TARGET_VIRT_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_PPC64)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 40
79 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 36
81 #else
82 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
83 #define TARGET_PHYS_ADDR_SPACE_BITS 32
84 #endif
85
86 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
87 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88 int nb_tbs;
89 /* any access to the tbs or the page table must use this lock */
90 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91
92 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
93 uint8_t *code_gen_ptr;
94
95 ram_addr_t phys_ram_size;
96 int phys_ram_fd;
97 uint8_t *phys_ram_base;
98 uint8_t *phys_ram_dirty;
99 static ram_addr_t phys_ram_alloc_offset = 0;
100
101 CPUState *first_cpu;
102 /* current CPU in the current thread. It is only valid inside
103 cpu_exec() */
104 CPUState *cpu_single_env;
105
106 typedef struct PageDesc {
107 /* list of TBs intersecting this ram page */
108 TranslationBlock *first_tb;
109 /* in order to optimize self modifying code, we count the number
110 of lookups we do to a given page to use a bitmap */
111 unsigned int code_write_count;
112 uint8_t *code_bitmap;
113 #if defined(CONFIG_USER_ONLY)
114 unsigned long flags;
115 #endif
116 } PageDesc;
117
118 typedef struct PhysPageDesc {
119 /* offset in host memory of the page + io_index in the low 12 bits */
120 ram_addr_t phys_offset;
121 } PhysPageDesc;
122
123 #define L2_BITS 10
124 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
125 /* XXX: this is a temporary hack for alpha target.
126 * In the future, this is to be replaced by a multi-level table
127 * to actually be able to handle the complete 64 bits address space.
128 */
129 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
130 #else
131 #define L1_BITS (TARGET_PHYS_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
132 #endif
133
134 #undef L1_BITS
135 #undef L2_BITS
136 #define L1_BITS 13
137 #define L2_BITS 13
138
139 #define L1_SIZE (1 << L1_BITS)
140 #define L2_SIZE (1 << L2_BITS)
141
142 static void io_mem_init(void);
143
144 unsigned long qemu_real_host_page_size;
145 unsigned long qemu_host_page_bits;
146 unsigned long qemu_host_page_size;
147 unsigned long qemu_host_page_mask;
148
149 /* XXX: for system emulation, it could just be an array */
150 static PageDesc *l1_map[L1_SIZE];
151 PhysPageDesc **l1_phys_map;
152
153 /* io memory support */
154 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
155 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
156 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
157 static int io_mem_nb;
158 #if defined(CONFIG_SOFTMMU)
159 static int io_mem_watch;
160 #endif
161
162 /* log support */
163 char *logfilename = "/tmp/qemu.log";
164 FILE *logfile;
165 int loglevel;
166 static int log_append = 0;
167
168 /* statistics */
169 static int tlb_flush_count;
170 static int tb_flush_count;
171 static int tb_phys_invalidate_count;
172
173 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
174 typedef struct subpage_t {
175 target_phys_addr_t base;
176 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
177 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
178 void *opaque[TARGET_PAGE_SIZE][2][4];
179 } subpage_t;
180
181 static void page_init(void)
182 {
183 /* NOTE: we can always suppose that qemu_host_page_size >=
184 TARGET_PAGE_SIZE */
185 #ifdef _WIN32
186 {
187 SYSTEM_INFO system_info;
188 DWORD old_protect;
189
190 GetSystemInfo(&system_info);
191 qemu_real_host_page_size = system_info.dwPageSize;
192
193 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
194 PAGE_EXECUTE_READWRITE, &old_protect);
195 }
196 #else
197 qemu_real_host_page_size = getpagesize();
198 {
199 unsigned long start, end;
200
201 start = (unsigned long)code_gen_buffer;
202 start &= ~(qemu_real_host_page_size - 1);
203
204 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
205 end += qemu_real_host_page_size - 1;
206 end &= ~(qemu_real_host_page_size - 1);
207
208 mprotect((void *)start, end - start,
209 PROT_READ | PROT_WRITE | PROT_EXEC);
210 }
211 #endif
212
213 if (qemu_host_page_size == 0)
214 qemu_host_page_size = qemu_real_host_page_size;
215 if (qemu_host_page_size < TARGET_PAGE_SIZE)
216 qemu_host_page_size = TARGET_PAGE_SIZE;
217 qemu_host_page_bits = 0;
218 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
219 qemu_host_page_bits++;
220 qemu_host_page_mask = ~(qemu_host_page_size - 1);
221 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
222 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
223
224 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
225 {
226 long long startaddr, endaddr;
227 FILE *f;
228 int n;
229
230 f = fopen("/proc/self/maps", "r");
231 if (f) {
232 do {
233 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
234 if (n == 2) {
235 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
236 TARGET_PAGE_ALIGN(endaddr),
237 PAGE_RESERVED);
238 }
239 } while (!feof(f));
240 fclose(f);
241 }
242 }
243 #endif
244 }
245
246 static inline PageDesc *page_find_alloc(target_ulong index)
247 {
248 PageDesc **lp, *p;
249
250 lp = &l1_map[index >> L2_BITS];
251 p = *lp;
252 if (!p) {
253 /* allocate if not found */
254 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
255 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
256 *lp = p;
257 }
258 return p + (index & (L2_SIZE - 1));
259 }
260
261 static inline PageDesc *page_find(target_ulong index)
262 {
263 PageDesc *p;
264
265 p = l1_map[index >> L2_BITS];
266 if (!p)
267 return 0;
268 return p + (index & (L2_SIZE - 1));
269 }
270
271 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
272 {
273 void **lp, **p;
274 PhysPageDesc *pd;
275
276 p = (void **)l1_phys_map;
277 #if 0
278 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
279
280 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
281 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
282 #endif
283 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
284 p = *lp;
285 if (!p) {
286 /* allocate if not found */
287 if (!alloc)
288 return NULL;
289 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
290 memset(p, 0, sizeof(void *) * L1_SIZE);
291 *lp = p;
292 }
293 #endif
294 #endif
295 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
296 pd = *lp;
297 if (!pd) {
298 int i;
299 /* allocate if not found */
300 if (!alloc)
301 return NULL;
302 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
303 *lp = pd;
304 for (i = 0; i < L2_SIZE; i++)
305 pd[i].phys_offset = IO_MEM_UNASSIGNED;
306 }
307 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
308 }
309
310 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
311 {
312 return phys_page_find_alloc(index, 0);
313 }
314
315 #if !defined(CONFIG_USER_ONLY)
316 static void tlb_protect_code(ram_addr_t ram_addr);
317 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
318 target_ulong vaddr);
319 #endif
320
321 void cpu_exec_init(CPUState *env)
322 {
323 CPUState **penv;
324 int cpu_index;
325
326 if (!code_gen_ptr) {
327 cpu_gen_init();
328 code_gen_ptr = code_gen_buffer;
329 page_init();
330 io_mem_init();
331 }
332 env->next_cpu = NULL;
333 penv = &first_cpu;
334 cpu_index = 0;
335 while (*penv != NULL) {
336 penv = (CPUState **)&(*penv)->next_cpu;
337 cpu_index++;
338 }
339 env->cpu_index = cpu_index;
340 env->nb_watchpoints = 0;
341 *penv = env;
342 }
343
344 static inline void invalidate_page_bitmap(PageDesc *p)
345 {
346 if (p->code_bitmap) {
347 qemu_free(p->code_bitmap);
348 p->code_bitmap = NULL;
349 }
350 p->code_write_count = 0;
351 }
352
353 /* set to NULL all the 'first_tb' fields in all PageDescs */
354 static void page_flush_tb(void)
355 {
356 int i, j;
357 PageDesc *p;
358
359 for(i = 0; i < L1_SIZE; i++) {
360 p = l1_map[i];
361 if (p) {
362 for(j = 0; j < L2_SIZE; j++) {
363 p->first_tb = NULL;
364 invalidate_page_bitmap(p);
365 p++;
366 }
367 }
368 }
369 }
370
371 /* flush all the translation blocks */
372 /* XXX: tb_flush is currently not thread safe */
373 void tb_flush(CPUState *env1)
374 {
375 CPUState *env;
376 #if defined(DEBUG_FLUSH)
377 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
378 (unsigned long)(code_gen_ptr - code_gen_buffer),
379 nb_tbs, nb_tbs > 0 ?
380 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
381 #endif
382 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
383 cpu_abort(env1, "Internal error: code buffer overflow\n");
384
385 nb_tbs = 0;
386
387 for(env = first_cpu; env != NULL; env = env->next_cpu) {
388 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
389 }
390
391 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
392 page_flush_tb();
393
394 code_gen_ptr = code_gen_buffer;
395 /* XXX: flush processor icache at this point if cache flush is
396 expensive */
397 tb_flush_count++;
398 }
399
400 #ifdef DEBUG_TB_CHECK
401
402 static void tb_invalidate_check(target_ulong address)
403 {
404 TranslationBlock *tb;
405 int i;
406 address &= TARGET_PAGE_MASK;
407 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
408 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
409 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
410 address >= tb->pc + tb->size)) {
411 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
412 address, (long)tb->pc, tb->size);
413 }
414 }
415 }
416 }
417
418 /* verify that all the pages have correct rights for code */
419 static void tb_page_check(void)
420 {
421 TranslationBlock *tb;
422 int i, flags1, flags2;
423
424 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
425 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
426 flags1 = page_get_flags(tb->pc);
427 flags2 = page_get_flags(tb->pc + tb->size - 1);
428 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
429 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
430 (long)tb->pc, tb->size, flags1, flags2);
431 }
432 }
433 }
434 }
435
436 void tb_jmp_check(TranslationBlock *tb)
437 {
438 TranslationBlock *tb1;
439 unsigned int n1;
440
441 /* suppress any remaining jumps to this TB */
442 tb1 = tb->jmp_first;
443 for(;;) {
444 n1 = (long)tb1 & 3;
445 tb1 = (TranslationBlock *)((long)tb1 & ~3);
446 if (n1 == 2)
447 break;
448 tb1 = tb1->jmp_next[n1];
449 }
450 /* check end of list */
451 if (tb1 != tb) {
452 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
453 }
454 }
455
456 #endif
457
458 /* invalidate one TB */
459 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
460 int next_offset)
461 {
462 TranslationBlock *tb1;
463 for(;;) {
464 tb1 = *ptb;
465 if (tb1 == tb) {
466 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
467 break;
468 }
469 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
470 }
471 }
472
473 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
474 {
475 TranslationBlock *tb1;
476 unsigned int n1;
477
478 for(;;) {
479 tb1 = *ptb;
480 n1 = (long)tb1 & 3;
481 tb1 = (TranslationBlock *)((long)tb1 & ~3);
482 if (tb1 == tb) {
483 *ptb = tb1->page_next[n1];
484 break;
485 }
486 ptb = &tb1->page_next[n1];
487 }
488 }
489
490 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
491 {
492 TranslationBlock *tb1, **ptb;
493 unsigned int n1;
494
495 ptb = &tb->jmp_next[n];
496 tb1 = *ptb;
497 if (tb1) {
498 /* find tb(n) in circular list */
499 for(;;) {
500 tb1 = *ptb;
501 n1 = (long)tb1 & 3;
502 tb1 = (TranslationBlock *)((long)tb1 & ~3);
503 if (n1 == n && tb1 == tb)
504 break;
505 if (n1 == 2) {
506 ptb = &tb1->jmp_first;
507 } else {
508 ptb = &tb1->jmp_next[n1];
509 }
510 }
511 /* now we can suppress tb(n) from the list */
512 *ptb = tb->jmp_next[n];
513
514 tb->jmp_next[n] = NULL;
515 }
516 }
517
518 /* reset the jump entry 'n' of a TB so that it is not chained to
519 another TB */
520 static inline void tb_reset_jump(TranslationBlock *tb, int n)
521 {
522 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
523 }
524
525 static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
526 {
527 CPUState *env;
528 PageDesc *p;
529 unsigned int h, n1;
530 target_phys_addr_t phys_pc;
531 TranslationBlock *tb1, *tb2;
532
533 /* remove the TB from the hash list */
534 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
535 h = tb_phys_hash_func(phys_pc);
536 tb_remove(&tb_phys_hash[h], tb,
537 offsetof(TranslationBlock, phys_hash_next));
538
539 /* remove the TB from the page list */
540 if (tb->page_addr[0] != page_addr) {
541 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
542 tb_page_remove(&p->first_tb, tb);
543 invalidate_page_bitmap(p);
544 }
545 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
546 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
547 tb_page_remove(&p->first_tb, tb);
548 invalidate_page_bitmap(p);
549 }
550
551 tb_invalidated_flag = 1;
552
553 /* remove the TB from the hash list */
554 h = tb_jmp_cache_hash_func(tb->pc);
555 for(env = first_cpu; env != NULL; env = env->next_cpu) {
556 if (env->tb_jmp_cache[h] == tb)
557 env->tb_jmp_cache[h] = NULL;
558 }
559
560 /* suppress this TB from the two jump lists */
561 tb_jmp_remove(tb, 0);
562 tb_jmp_remove(tb, 1);
563
564 /* suppress any remaining jumps to this TB */
565 tb1 = tb->jmp_first;
566 for(;;) {
567 n1 = (long)tb1 & 3;
568 if (n1 == 2)
569 break;
570 tb1 = (TranslationBlock *)((long)tb1 & ~3);
571 tb2 = tb1->jmp_next[n1];
572 tb_reset_jump(tb1, n1);
573 tb1->jmp_next[n1] = NULL;
574 tb1 = tb2;
575 }
576 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
577
578 tb_phys_invalidate_count++;
579 }
580
581 static inline void set_bits(uint8_t *tab, int start, int len)
582 {
583 int end, mask, end1;
584
585 end = start + len;
586 tab += start >> 3;
587 mask = 0xff << (start & 7);
588 if ((start & ~7) == (end & ~7)) {
589 if (start < end) {
590 mask &= ~(0xff << (end & 7));
591 *tab |= mask;
592 }
593 } else {
594 *tab++ |= mask;
595 start = (start + 8) & ~7;
596 end1 = end & ~7;
597 while (start < end1) {
598 *tab++ = 0xff;
599 start += 8;
600 }
601 if (start < end) {
602 mask = ~(0xff << (end & 7));
603 *tab |= mask;
604 }
605 }
606 }
607
608 static void build_page_bitmap(PageDesc *p)
609 {
610 int n, tb_start, tb_end;
611 TranslationBlock *tb;
612
613 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
614 if (!p->code_bitmap)
615 return;
616 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
617
618 tb = p->first_tb;
619 while (tb != NULL) {
620 n = (long)tb & 3;
621 tb = (TranslationBlock *)((long)tb & ~3);
622 /* NOTE: this is subtle as a TB may span two physical pages */
623 if (n == 0) {
624 /* NOTE: tb_end may be after the end of the page, but
625 it is not a problem */
626 tb_start = tb->pc & ~TARGET_PAGE_MASK;
627 tb_end = tb_start + tb->size;
628 if (tb_end > TARGET_PAGE_SIZE)
629 tb_end = TARGET_PAGE_SIZE;
630 } else {
631 tb_start = 0;
632 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
633 }
634 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
635 tb = tb->page_next[n];
636 }
637 }
638
639 #ifdef TARGET_HAS_PRECISE_SMC
640
641 static void tb_gen_code(CPUState *env,
642 target_ulong pc, target_ulong cs_base, int flags,
643 int cflags)
644 {
645 TranslationBlock *tb;
646 uint8_t *tc_ptr;
647 target_ulong phys_pc, phys_page2, virt_page2;
648 int code_gen_size;
649
650 phys_pc = get_phys_addr_code(env, pc);
651 tb = tb_alloc(pc);
652 if (!tb) {
653 /* flush must be done */
654 tb_flush(env);
655 /* cannot fail at this point */
656 tb = tb_alloc(pc);
657 }
658 tc_ptr = code_gen_ptr;
659 tb->tc_ptr = tc_ptr;
660 tb->cs_base = cs_base;
661 tb->flags = flags;
662 tb->cflags = cflags;
663 cpu_gen_code(env, tb, &code_gen_size);
664 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
665
666 /* check next page if needed */
667 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
668 phys_page2 = -1;
669 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
670 phys_page2 = get_phys_addr_code(env, virt_page2);
671 }
672 tb_link_phys(tb, phys_pc, phys_page2);
673 }
674 #endif
675
676 /* invalidate all TBs which intersect with the target physical page
677 starting in range [start;end[. NOTE: start and end must refer to
678 the same physical page. 'is_cpu_write_access' should be true if called
679 from a real cpu write access: the virtual CPU will exit the current
680 TB if code is modified inside this TB. */
681 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
682 int is_cpu_write_access)
683 {
684 int n, current_tb_modified, current_tb_not_found, current_flags;
685 CPUState *env = cpu_single_env;
686 PageDesc *p;
687 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
688 target_ulong tb_start, tb_end;
689 target_ulong current_pc, current_cs_base;
690
691 p = page_find(start >> TARGET_PAGE_BITS);
692 if (!p)
693 return;
694 if (!p->code_bitmap &&
695 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
696 is_cpu_write_access) {
697 /* build code bitmap */
698 build_page_bitmap(p);
699 }
700
701 /* we remove all the TBs in the range [start, end[ */
702 /* XXX: see if in some cases it could be faster to invalidate all the code */
703 current_tb_not_found = is_cpu_write_access;
704 current_tb_modified = 0;
705 current_tb = NULL; /* avoid warning */
706 current_pc = 0; /* avoid warning */
707 current_cs_base = 0; /* avoid warning */
708 current_flags = 0; /* avoid warning */
709 tb = p->first_tb;
710 while (tb != NULL) {
711 n = (long)tb & 3;
712 tb = (TranslationBlock *)((long)tb & ~3);
713 tb_next = tb->page_next[n];
714 /* NOTE: this is subtle as a TB may span two physical pages */
715 if (n == 0) {
716 /* NOTE: tb_end may be after the end of the page, but
717 it is not a problem */
718 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
719 tb_end = tb_start + tb->size;
720 } else {
721 tb_start = tb->page_addr[1];
722 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
723 }
724 if (!(tb_end <= start || tb_start >= end)) {
725 #ifdef TARGET_HAS_PRECISE_SMC
726 if (current_tb_not_found) {
727 current_tb_not_found = 0;
728 current_tb = NULL;
729 if (env->mem_write_pc) {
730 /* now we have a real cpu fault */
731 current_tb = tb_find_pc(env->mem_write_pc);
732 }
733 }
734 if (current_tb == tb &&
735 !(current_tb->cflags & CF_SINGLE_INSN)) {
736 /* If we are modifying the current TB, we must stop
737 its execution. We could be more precise by checking
738 that the modification is after the current PC, but it
739 would require a specialized function to partially
740 restore the CPU state */
741
742 current_tb_modified = 1;
743 cpu_restore_state(current_tb, env,
744 env->mem_write_pc, NULL);
745 #if defined(TARGET_I386)
746 current_flags = env->hflags;
747 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
748 current_cs_base = (target_ulong)env->segs[R_CS].base;
749 current_pc = current_cs_base + env->eip;
750 #else
751 #error unsupported CPU
752 #endif
753 }
754 #endif /* TARGET_HAS_PRECISE_SMC */
755 /* we need to do that to handle the case where a signal
756 occurs while doing tb_phys_invalidate() */
757 saved_tb = NULL;
758 if (env) {
759 saved_tb = env->current_tb;
760 env->current_tb = NULL;
761 }
762 tb_phys_invalidate(tb, -1);
763 if (env) {
764 env->current_tb = saved_tb;
765 if (env->interrupt_request && env->current_tb)
766 cpu_interrupt(env, env->interrupt_request);
767 }
768 }
769 tb = tb_next;
770 }
771 #if !defined(CONFIG_USER_ONLY)
772 /* if no code remaining, no need to continue to use slow writes */
773 if (!p->first_tb) {
774 invalidate_page_bitmap(p);
775 if (is_cpu_write_access) {
776 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
777 }
778 }
779 #endif
780 #ifdef TARGET_HAS_PRECISE_SMC
781 if (current_tb_modified) {
782 /* we generate a block containing just the instruction
783 modifying the memory. It will ensure that it cannot modify
784 itself */
785 env->current_tb = NULL;
786 tb_gen_code(env, current_pc, current_cs_base, current_flags,
787 CF_SINGLE_INSN);
788 cpu_resume_from_signal(env, NULL);
789 }
790 #endif
791 }
792
793 /* len must be <= 8 and start must be a multiple of len */
794 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
795 {
796 PageDesc *p;
797 int offset, b;
798 #if 0
799 if (1) {
800 if (loglevel) {
801 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
802 cpu_single_env->mem_write_vaddr, len,
803 cpu_single_env->eip,
804 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
805 }
806 }
807 #endif
808 p = page_find(start >> TARGET_PAGE_BITS);
809 if (!p)
810 return;
811 if (p->code_bitmap) {
812 offset = start & ~TARGET_PAGE_MASK;
813 b = p->code_bitmap[offset >> 3] >> (offset & 7);
814 if (b & ((1 << len) - 1))
815 goto do_invalidate;
816 } else {
817 do_invalidate:
818 tb_invalidate_phys_page_range(start, start + len, 1);
819 }
820 }
821
822 #if !defined(CONFIG_SOFTMMU)
823 static void tb_invalidate_phys_page(target_phys_addr_t addr,
824 unsigned long pc, void *puc)
825 {
826 int n, current_flags, current_tb_modified;
827 target_ulong current_pc, current_cs_base;
828 PageDesc *p;
829 TranslationBlock *tb, *current_tb;
830 #ifdef TARGET_HAS_PRECISE_SMC
831 CPUState *env = cpu_single_env;
832 #endif
833
834 addr &= TARGET_PAGE_MASK;
835 p = page_find(addr >> TARGET_PAGE_BITS);
836 if (!p)
837 return;
838 tb = p->first_tb;
839 current_tb_modified = 0;
840 current_tb = NULL;
841 current_pc = 0; /* avoid warning */
842 current_cs_base = 0; /* avoid warning */
843 current_flags = 0; /* avoid warning */
844 #ifdef TARGET_HAS_PRECISE_SMC
845 if (tb && pc != 0) {
846 current_tb = tb_find_pc(pc);
847 }
848 #endif
849 while (tb != NULL) {
850 n = (long)tb & 3;
851 tb = (TranslationBlock *)((long)tb & ~3);
852 #ifdef TARGET_HAS_PRECISE_SMC
853 if (current_tb == tb &&
854 !(current_tb->cflags & CF_SINGLE_INSN)) {
855 /* If we are modifying the current TB, we must stop
856 its execution. We could be more precise by checking
857 that the modification is after the current PC, but it
858 would require a specialized function to partially
859 restore the CPU state */
860
861 current_tb_modified = 1;
862 cpu_restore_state(current_tb, env, pc, puc);
863 #if defined(TARGET_I386)
864 current_flags = env->hflags;
865 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
866 current_cs_base = (target_ulong)env->segs[R_CS].base;
867 current_pc = current_cs_base + env->eip;
868 #else
869 #error unsupported CPU
870 #endif
871 }
872 #endif /* TARGET_HAS_PRECISE_SMC */
873 tb_phys_invalidate(tb, addr);
874 tb = tb->page_next[n];
875 }
876 p->first_tb = NULL;
877 #ifdef TARGET_HAS_PRECISE_SMC
878 if (current_tb_modified) {
879 /* we generate a block containing just the instruction
880 modifying the memory. It will ensure that it cannot modify
881 itself */
882 env->current_tb = NULL;
883 tb_gen_code(env, current_pc, current_cs_base, current_flags,
884 CF_SINGLE_INSN);
885 cpu_resume_from_signal(env, puc);
886 }
887 #endif
888 }
889 #endif
890
891 /* add the tb in the target page and protect it if necessary */
892 static inline void tb_alloc_page(TranslationBlock *tb,
893 unsigned int n, target_ulong page_addr)
894 {
895 PageDesc *p;
896 TranslationBlock *last_first_tb;
897
898 tb->page_addr[n] = page_addr;
899 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
900 tb->page_next[n] = p->first_tb;
901 last_first_tb = p->first_tb;
902 p->first_tb = (TranslationBlock *)((long)tb | n);
903 invalidate_page_bitmap(p);
904
905 #if defined(TARGET_HAS_SMC) || 1
906
907 #if defined(CONFIG_USER_ONLY)
908 if (p->flags & PAGE_WRITE) {
909 target_ulong addr;
910 PageDesc *p2;
911 int prot;
912
913 /* force the host page as non writable (writes will have a
914 page fault + mprotect overhead) */
915 page_addr &= qemu_host_page_mask;
916 prot = 0;
917 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
918 addr += TARGET_PAGE_SIZE) {
919
920 p2 = page_find (addr >> TARGET_PAGE_BITS);
921 if (!p2)
922 continue;
923 prot |= p2->flags;
924 p2->flags &= ~PAGE_WRITE;
925 page_get_flags(addr);
926 }
927 mprotect(g2h(page_addr), qemu_host_page_size,
928 (prot & PAGE_BITS) & ~PAGE_WRITE);
929 #ifdef DEBUG_TB_INVALIDATE
930 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
931 page_addr);
932 #endif
933 }
934 #else
935 /* if some code is already present, then the pages are already
936 protected. So we handle the case where only the first TB is
937 allocated in a physical page */
938 if (!last_first_tb) {
939 tlb_protect_code(page_addr);
940 }
941 #endif
942
943 #endif /* TARGET_HAS_SMC */
944 }
945
946 /* Allocate a new translation block. Flush the translation buffer if
947 too many translation blocks or too much generated code. */
948 TranslationBlock *tb_alloc(target_ulong pc)
949 {
950 TranslationBlock *tb;
951
952 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
953 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
954 return NULL;
955 tb = &tbs[nb_tbs++];
956 tb->pc = pc;
957 tb->cflags = 0;
958 return tb;
959 }
960
961 /* add a new TB and link it to the physical page tables. phys_page2 is
962 (-1) to indicate that only one page contains the TB. */
963 void tb_link_phys(TranslationBlock *tb,
964 target_ulong phys_pc, target_ulong phys_page2)
965 {
966 unsigned int h;
967 TranslationBlock **ptb;
968
969 /* add in the physical hash table */
970 h = tb_phys_hash_func(phys_pc);
971 ptb = &tb_phys_hash[h];
972 tb->phys_hash_next = *ptb;
973 *ptb = tb;
974
975 /* add in the page list */
976 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
977 if (phys_page2 != -1)
978 tb_alloc_page(tb, 1, phys_page2);
979 else
980 tb->page_addr[1] = -1;
981
982 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
983 tb->jmp_next[0] = NULL;
984 tb->jmp_next[1] = NULL;
985
986 /* init original jump addresses */
987 if (tb->tb_next_offset[0] != 0xffff)
988 tb_reset_jump(tb, 0);
989 if (tb->tb_next_offset[1] != 0xffff)
990 tb_reset_jump(tb, 1);
991
992 #ifdef DEBUG_TB_CHECK
993 tb_page_check();
994 #endif
995 }
996
997 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
998 tb[1].tc_ptr. Return NULL if not found */
999 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1000 {
1001 int m_min, m_max, m;
1002 unsigned long v;
1003 TranslationBlock *tb;
1004
1005 if (nb_tbs <= 0)
1006 return NULL;
1007 if (tc_ptr < (unsigned long)code_gen_buffer ||
1008 tc_ptr >= (unsigned long)code_gen_ptr)
1009 return NULL;
1010 /* binary search (cf Knuth) */
1011 m_min = 0;
1012 m_max = nb_tbs - 1;
1013 while (m_min <= m_max) {
1014 m = (m_min + m_max) >> 1;
1015 tb = &tbs[m];
1016 v = (unsigned long)tb->tc_ptr;
1017 if (v == tc_ptr)
1018 return tb;
1019 else if (tc_ptr < v) {
1020 m_max = m - 1;
1021 } else {
1022 m_min = m + 1;
1023 }
1024 }
1025 return &tbs[m_max];
1026 }
1027
1028 static void tb_reset_jump_recursive(TranslationBlock *tb);
1029
1030 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1031 {
1032 TranslationBlock *tb1, *tb_next, **ptb;
1033 unsigned int n1;
1034
1035 tb1 = tb->jmp_next[n];
1036 if (tb1 != NULL) {
1037 /* find head of list */
1038 for(;;) {
1039 n1 = (long)tb1 & 3;
1040 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1041 if (n1 == 2)
1042 break;
1043 tb1 = tb1->jmp_next[n1];
1044 }
1045 /* we are now sure now that tb jumps to tb1 */
1046 tb_next = tb1;
1047
1048 /* remove tb from the jmp_first list */
1049 ptb = &tb_next->jmp_first;
1050 for(;;) {
1051 tb1 = *ptb;
1052 n1 = (long)tb1 & 3;
1053 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1054 if (n1 == n && tb1 == tb)
1055 break;
1056 ptb = &tb1->jmp_next[n1];
1057 }
1058 *ptb = tb->jmp_next[n];
1059 tb->jmp_next[n] = NULL;
1060
1061 /* suppress the jump to next tb in generated code */
1062 tb_reset_jump(tb, n);
1063
1064 /* suppress jumps in the tb on which we could have jumped */
1065 tb_reset_jump_recursive(tb_next);
1066 }
1067 }
1068
1069 static void tb_reset_jump_recursive(TranslationBlock *tb)
1070 {
1071 tb_reset_jump_recursive2(tb, 0);
1072 tb_reset_jump_recursive2(tb, 1);
1073 }
1074
1075 #if defined(TARGET_HAS_ICE)
1076 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1077 {
1078 target_phys_addr_t addr;
1079 target_ulong pd;
1080 ram_addr_t ram_addr;
1081 PhysPageDesc *p;
1082
1083 addr = cpu_get_phys_page_debug(env, pc);
1084 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1085 if (!p) {
1086 pd = IO_MEM_UNASSIGNED;
1087 } else {
1088 pd = p->phys_offset;
1089 }
1090 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1091 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1092 }
1093 #endif
1094
1095 /* Add a watchpoint. */
1096 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1097 {
1098 int i;
1099
1100 for (i = 0; i < env->nb_watchpoints; i++) {
1101 if (addr == env->watchpoint[i].vaddr)
1102 return 0;
1103 }
1104 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1105 return -1;
1106
1107 i = env->nb_watchpoints++;
1108 env->watchpoint[i].vaddr = addr;
1109 tlb_flush_page(env, addr);
1110 /* FIXME: This flush is needed because of the hack to make memory ops
1111 terminate the TB. It can be removed once the proper IO trap and
1112 re-execute bits are in. */
1113 tb_flush(env);
1114 return i;
1115 }
1116
1117 /* Remove a watchpoint. */
1118 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1119 {
1120 int i;
1121
1122 for (i = 0; i < env->nb_watchpoints; i++) {
1123 if (addr == env->watchpoint[i].vaddr) {
1124 env->nb_watchpoints--;
1125 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1126 tlb_flush_page(env, addr);
1127 return 0;
1128 }
1129 }
1130 return -1;
1131 }
1132
1133 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1134 breakpoint is reached */
1135 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1136 {
1137 #if defined(TARGET_HAS_ICE)
1138 int i;
1139
1140 for(i = 0; i < env->nb_breakpoints; i++) {
1141 if (env->breakpoints[i] == pc)
1142 return 0;
1143 }
1144
1145 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1146 return -1;
1147 env->breakpoints[env->nb_breakpoints++] = pc;
1148
1149 breakpoint_invalidate(env, pc);
1150 return 0;
1151 #else
1152 return -1;
1153 #endif
1154 }
1155
1156 /* remove a breakpoint */
1157 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1158 {
1159 #if defined(TARGET_HAS_ICE)
1160 int i;
1161 for(i = 0; i < env->nb_breakpoints; i++) {
1162 if (env->breakpoints[i] == pc)
1163 goto found;
1164 }
1165 return -1;
1166 found:
1167 env->nb_breakpoints--;
1168 if (i < env->nb_breakpoints)
1169 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1170
1171 breakpoint_invalidate(env, pc);
1172 return 0;
1173 #else
1174 return -1;
1175 #endif
1176 }
1177
1178 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1179 CPU loop after each instruction */
1180 void cpu_single_step(CPUState *env, int enabled)
1181 {
1182 #if defined(TARGET_HAS_ICE)
1183 if (env->singlestep_enabled != enabled) {
1184 env->singlestep_enabled = enabled;
1185 /* must flush all the translated code to avoid inconsistancies */
1186 /* XXX: only flush what is necessary */
1187 tb_flush(env);
1188 }
1189 #endif
1190 }
1191
1192 /* enable or disable low levels log */
1193 void cpu_set_log(int log_flags)
1194 {
1195 loglevel = log_flags;
1196 if (loglevel && !logfile) {
1197 logfile = fopen(logfilename, log_append ? "a" : "w");
1198 if (!logfile) {
1199 perror(logfilename);
1200 _exit(1);
1201 }
1202 #if !defined(CONFIG_SOFTMMU)
1203 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1204 {
1205 static uint8_t logfile_buf[4096];
1206 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1207 }
1208 #else
1209 setvbuf(logfile, NULL, _IOLBF, 0);
1210 #endif
1211 log_append = 1;
1212 }
1213 if (!loglevel && logfile) {
1214 fclose(logfile);
1215 logfile = NULL;
1216 }
1217 }
1218
1219 void cpu_set_log_filename(const char *filename)
1220 {
1221 logfilename = strdup(filename);
1222 if (logfile) {
1223 fclose(logfile);
1224 logfile = NULL;
1225 }
1226 cpu_set_log(loglevel);
1227 }
1228
1229 /* mask must never be zero, except for A20 change call */
1230 void cpu_interrupt(CPUState *env, int mask)
1231 {
1232 TranslationBlock *tb;
1233 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1234
1235 env->interrupt_request |= mask;
1236 /* if the cpu is currently executing code, we must unlink it and
1237 all the potentially executing TB */
1238 tb = env->current_tb;
1239 if (tb && !testandset(&interrupt_lock)) {
1240 env->current_tb = NULL;
1241 tb_reset_jump_recursive(tb);
1242 resetlock(&interrupt_lock);
1243 }
1244 }
1245
1246 void cpu_reset_interrupt(CPUState *env, int mask)
1247 {
1248 env->interrupt_request &= ~mask;
1249 }
1250
1251 CPULogItem cpu_log_items[] = {
1252 { CPU_LOG_TB_OUT_ASM, "out_asm",
1253 "show generated host assembly code for each compiled TB" },
1254 { CPU_LOG_TB_IN_ASM, "in_asm",
1255 "show target assembly code for each compiled TB" },
1256 { CPU_LOG_TB_OP, "op",
1257 "show micro ops for each compiled TB" },
1258 { CPU_LOG_TB_OP_OPT, "op_opt",
1259 "show micro ops "
1260 #ifdef TARGET_I386
1261 "before eflags optimization and "
1262 #endif
1263 "after liveness analysis" },
1264 { CPU_LOG_INT, "int",
1265 "show interrupts/exceptions in short format" },
1266 { CPU_LOG_EXEC, "exec",
1267 "show trace before each executed TB (lots of logs)" },
1268 { CPU_LOG_TB_CPU, "cpu",
1269 "show CPU state before block translation" },
1270 #ifdef TARGET_I386
1271 { CPU_LOG_PCALL, "pcall",
1272 "show protected mode far calls/returns/exceptions" },
1273 #endif
1274 #ifdef DEBUG_IOPORT
1275 { CPU_LOG_IOPORT, "ioport",
1276 "show all i/o ports accesses" },
1277 #endif
1278 { 0, NULL, NULL },
1279 };
1280
1281 static int cmp1(const char *s1, int n, const char *s2)
1282 {
1283 if (strlen(s2) != n)
1284 return 0;
1285 return memcmp(s1, s2, n) == 0;
1286 }
1287
1288 /* takes a comma separated list of log masks. Return 0 if error. */
1289 int cpu_str_to_log_mask(const char *str)
1290 {
1291 CPULogItem *item;
1292 int mask;
1293 const char *p, *p1;
1294
1295 p = str;
1296 mask = 0;
1297 for(;;) {
1298 p1 = strchr(p, ',');
1299 if (!p1)
1300 p1 = p + strlen(p);
1301 if(cmp1(p,p1-p,"all")) {
1302 for(item = cpu_log_items; item->mask != 0; item++) {
1303 mask |= item->mask;
1304 }
1305 } else {
1306 for(item = cpu_log_items; item->mask != 0; item++) {
1307 if (cmp1(p, p1 - p, item->name))
1308 goto found;
1309 }
1310 return 0;
1311 }
1312 found:
1313 mask |= item->mask;
1314 if (*p1 != ',')
1315 break;
1316 p = p1 + 1;
1317 }
1318 return mask;
1319 }
1320
1321 void cpu_abort(CPUState *env, const char *fmt, ...)
1322 {
1323 va_list ap;
1324 va_list ap2;
1325
1326 va_start(ap, fmt);
1327 va_copy(ap2, ap);
1328 fprintf(stderr, "qemu: fatal: ");
1329 vfprintf(stderr, fmt, ap);
1330 fprintf(stderr, "\n");
1331 #ifdef TARGET_I386
1332 if(env->intercept & INTERCEPT_SVM_MASK) {
1333 /* most probably the virtual machine should not
1334 be shut down but rather caught by the VMM */
1335 vmexit(SVM_EXIT_SHUTDOWN, 0);
1336 }
1337 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1338 #else
1339 cpu_dump_state(env, stderr, fprintf, 0);
1340 #endif
1341 if (logfile) {
1342 fprintf(logfile, "qemu: fatal: ");
1343 vfprintf(logfile, fmt, ap2);
1344 fprintf(logfile, "\n");
1345 #ifdef TARGET_I386
1346 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1347 #else
1348 cpu_dump_state(env, logfile, fprintf, 0);
1349 #endif
1350 fflush(logfile);
1351 fclose(logfile);
1352 }
1353 va_end(ap2);
1354 va_end(ap);
1355 abort();
1356 }
1357
1358 CPUState *cpu_copy(CPUState *env)
1359 {
1360 CPUState *new_env = cpu_init(env->cpu_model_str);
1361 /* preserve chaining and index */
1362 CPUState *next_cpu = new_env->next_cpu;
1363 int cpu_index = new_env->cpu_index;
1364 memcpy(new_env, env, sizeof(CPUState));
1365 new_env->next_cpu = next_cpu;
1366 new_env->cpu_index = cpu_index;
1367 return new_env;
1368 }
1369
1370 #if !defined(CONFIG_USER_ONLY)
1371
1372 /* NOTE: if flush_global is true, also flush global entries (not
1373 implemented yet) */
1374 void tlb_flush(CPUState *env, int flush_global)
1375 {
1376 int i;
1377
1378 #if defined(DEBUG_TLB)
1379 printf("tlb_flush:\n");
1380 #endif
1381 /* must reset current TB so that interrupts cannot modify the
1382 links while we are modifying them */
1383 env->current_tb = NULL;
1384
1385 for(i = 0; i < CPU_TLB_SIZE; i++) {
1386 env->tlb_table[0][i].addr_read = -1;
1387 env->tlb_table[0][i].addr_write = -1;
1388 env->tlb_table[0][i].addr_code = -1;
1389 env->tlb_table[1][i].addr_read = -1;
1390 env->tlb_table[1][i].addr_write = -1;
1391 env->tlb_table[1][i].addr_code = -1;
1392 #if (NB_MMU_MODES >= 3)
1393 env->tlb_table[2][i].addr_read = -1;
1394 env->tlb_table[2][i].addr_write = -1;
1395 env->tlb_table[2][i].addr_code = -1;
1396 #if (NB_MMU_MODES == 4)
1397 env->tlb_table[3][i].addr_read = -1;
1398 env->tlb_table[3][i].addr_write = -1;
1399 env->tlb_table[3][i].addr_code = -1;
1400 #endif
1401 #endif
1402 }
1403
1404 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1405
1406 #if !defined(CONFIG_SOFTMMU)
1407 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1408 #endif
1409 #ifdef USE_KQEMU
1410 if (env->kqemu_enabled) {
1411 kqemu_flush(env, flush_global);
1412 }
1413 #endif
1414 tlb_flush_count++;
1415 }
1416
1417 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1418 {
1419 if (addr == (tlb_entry->addr_read &
1420 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1421 addr == (tlb_entry->addr_write &
1422 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1423 addr == (tlb_entry->addr_code &
1424 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1425 tlb_entry->addr_read = -1;
1426 tlb_entry->addr_write = -1;
1427 tlb_entry->addr_code = -1;
1428 }
1429 }
1430
1431 void tlb_flush_page(CPUState *env, target_ulong addr)
1432 {
1433 int i;
1434 TranslationBlock *tb;
1435
1436 #if defined(DEBUG_TLB)
1437 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1438 #endif
1439 /* must reset current TB so that interrupts cannot modify the
1440 links while we are modifying them */
1441 env->current_tb = NULL;
1442
1443 addr &= TARGET_PAGE_MASK;
1444 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1445 tlb_flush_entry(&env->tlb_table[0][i], addr);
1446 tlb_flush_entry(&env->tlb_table[1][i], addr);
1447 #if (NB_MMU_MODES >= 3)
1448 tlb_flush_entry(&env->tlb_table[2][i], addr);
1449 #if (NB_MMU_MODES == 4)
1450 tlb_flush_entry(&env->tlb_table[3][i], addr);
1451 #endif
1452 #endif
1453
1454 /* Discard jump cache entries for any tb which might potentially
1455 overlap the flushed page. */
1456 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1457 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1458
1459 i = tb_jmp_cache_hash_page(addr);
1460 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1461
1462 #if !defined(CONFIG_SOFTMMU)
1463 if (addr < MMAP_AREA_END)
1464 munmap((void *)addr, TARGET_PAGE_SIZE);
1465 #endif
1466 #ifdef USE_KQEMU
1467 if (env->kqemu_enabled) {
1468 kqemu_flush_page(env, addr);
1469 }
1470 #endif
1471 }
1472
1473 /* update the TLBs so that writes to code in the virtual page 'addr'
1474 can be detected */
1475 static void tlb_protect_code(ram_addr_t ram_addr)
1476 {
1477 cpu_physical_memory_reset_dirty(ram_addr,
1478 ram_addr + TARGET_PAGE_SIZE,
1479 CODE_DIRTY_FLAG);
1480 }
1481
1482 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1483 tested for self modifying code */
1484 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1485 target_ulong vaddr)
1486 {
1487 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1488 }
1489
1490 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1491 unsigned long start, unsigned long length)
1492 {
1493 unsigned long addr;
1494 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1495 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1496 if ((addr - start) < length) {
1497 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1498 }
1499 }
1500 }
1501
1502 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1503 int dirty_flags)
1504 {
1505 CPUState *env;
1506 unsigned long length, start1;
1507 int i, mask, len;
1508 uint8_t *p;
1509
1510 start &= TARGET_PAGE_MASK;
1511 end = TARGET_PAGE_ALIGN(end);
1512
1513 length = end - start;
1514 if (length == 0)
1515 return;
1516 len = length >> TARGET_PAGE_BITS;
1517 #ifdef USE_KQEMU
1518 /* XXX: should not depend on cpu context */
1519 env = first_cpu;
1520 if (env->kqemu_enabled) {
1521 ram_addr_t addr;
1522 addr = start;
1523 for(i = 0; i < len; i++) {
1524 kqemu_set_notdirty(env, addr);
1525 addr += TARGET_PAGE_SIZE;
1526 }
1527 }
1528 #endif
1529 mask = ~dirty_flags;
1530 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1531 for(i = 0; i < len; i++)
1532 p[i] &= mask;
1533
1534 /* we modify the TLB cache so that the dirty bit will be set again
1535 when accessing the range */
1536 start1 = start + (unsigned long)phys_ram_base;
1537 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1538 for(i = 0; i < CPU_TLB_SIZE; i++)
1539 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1540 for(i = 0; i < CPU_TLB_SIZE; i++)
1541 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1542 #if (NB_MMU_MODES >= 3)
1543 for(i = 0; i < CPU_TLB_SIZE; i++)
1544 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1545 #if (NB_MMU_MODES == 4)
1546 for(i = 0; i < CPU_TLB_SIZE; i++)
1547 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1548 #endif
1549 #endif
1550 }
1551
1552 #if !defined(CONFIG_SOFTMMU)
1553 /* XXX: this is expensive */
1554 {
1555 VirtPageDesc *p;
1556 int j;
1557 target_ulong addr;
1558
1559 for(i = 0; i < L1_SIZE; i++) {
1560 p = l1_virt_map[i];
1561 if (p) {
1562 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1563 for(j = 0; j < L2_SIZE; j++) {
1564 if (p->valid_tag == virt_valid_tag &&
1565 p->phys_addr >= start && p->phys_addr < end &&
1566 (p->prot & PROT_WRITE)) {
1567 if (addr < MMAP_AREA_END) {
1568 mprotect((void *)addr, TARGET_PAGE_SIZE,
1569 p->prot & ~PROT_WRITE);
1570 }
1571 }
1572 addr += TARGET_PAGE_SIZE;
1573 p++;
1574 }
1575 }
1576 }
1577 }
1578 #endif
1579 }
1580
1581 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1582 {
1583 ram_addr_t ram_addr;
1584
1585 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1586 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1587 tlb_entry->addend - (unsigned long)phys_ram_base;
1588 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1589 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1590 }
1591 }
1592 }
1593
1594 /* update the TLB according to the current state of the dirty bits */
1595 void cpu_tlb_update_dirty(CPUState *env)
1596 {
1597 int i;
1598 for(i = 0; i < CPU_TLB_SIZE; i++)
1599 tlb_update_dirty(&env->tlb_table[0][i]);
1600 for(i = 0; i < CPU_TLB_SIZE; i++)
1601 tlb_update_dirty(&env->tlb_table[1][i]);
1602 #if (NB_MMU_MODES >= 3)
1603 for(i = 0; i < CPU_TLB_SIZE; i++)
1604 tlb_update_dirty(&env->tlb_table[2][i]);
1605 #if (NB_MMU_MODES == 4)
1606 for(i = 0; i < CPU_TLB_SIZE; i++)
1607 tlb_update_dirty(&env->tlb_table[3][i]);
1608 #endif
1609 #endif
1610 }
1611
1612 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1613 unsigned long start)
1614 {
1615 unsigned long addr;
1616 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1617 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1618 if (addr == start) {
1619 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1620 }
1621 }
1622 }
1623
1624 /* update the TLB corresponding to virtual page vaddr and phys addr
1625 addr so that it is no longer dirty */
1626 static inline void tlb_set_dirty(CPUState *env,
1627 unsigned long addr, target_ulong vaddr)
1628 {
1629 int i;
1630
1631 addr &= TARGET_PAGE_MASK;
1632 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1633 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1634 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1635 #if (NB_MMU_MODES >= 3)
1636 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1637 #if (NB_MMU_MODES == 4)
1638 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1639 #endif
1640 #endif
1641 }
1642
1643 /* add a new TLB entry. At most one entry for a given virtual address
1644 is permitted. Return 0 if OK or 2 if the page could not be mapped
1645 (can only happen in non SOFTMMU mode for I/O pages or pages
1646 conflicting with the host address space). */
1647 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1648 target_phys_addr_t paddr, int prot,
1649 int mmu_idx, int is_softmmu)
1650 {
1651 PhysPageDesc *p;
1652 unsigned long pd;
1653 unsigned int index;
1654 target_ulong address;
1655 target_phys_addr_t addend;
1656 int ret;
1657 CPUTLBEntry *te;
1658 int i;
1659
1660 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1661 if (!p) {
1662 pd = IO_MEM_UNASSIGNED;
1663 } else {
1664 pd = p->phys_offset;
1665 }
1666 #if defined(DEBUG_TLB)
1667 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1668 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1669 #endif
1670
1671 ret = 0;
1672 #if !defined(CONFIG_SOFTMMU)
1673 if (is_softmmu)
1674 #endif
1675 {
1676 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1677 /* IO memory case */
1678 address = vaddr | pd;
1679 addend = paddr;
1680 } else {
1681 /* standard memory */
1682 address = vaddr;
1683 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1684 }
1685
1686 /* Make accesses to pages with watchpoints go via the
1687 watchpoint trap routines. */
1688 for (i = 0; i < env->nb_watchpoints; i++) {
1689 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1690 if (address & ~TARGET_PAGE_MASK) {
1691 env->watchpoint[i].addend = 0;
1692 address = vaddr | io_mem_watch;
1693 } else {
1694 env->watchpoint[i].addend = pd - paddr +
1695 (unsigned long) phys_ram_base;
1696 /* TODO: Figure out how to make read watchpoints coexist
1697 with code. */
1698 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1699 }
1700 }
1701 }
1702
1703 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1704 addend -= vaddr;
1705 te = &env->tlb_table[mmu_idx][index];
1706 te->addend = addend;
1707 if (prot & PAGE_READ) {
1708 te->addr_read = address;
1709 } else {
1710 te->addr_read = -1;
1711 }
1712 if (prot & PAGE_EXEC) {
1713 te->addr_code = address;
1714 } else {
1715 te->addr_code = -1;
1716 }
1717 if (prot & PAGE_WRITE) {
1718 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1719 (pd & IO_MEM_ROMD)) {
1720 /* write access calls the I/O callback */
1721 te->addr_write = vaddr |
1722 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1723 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1724 !cpu_physical_memory_is_dirty(pd)) {
1725 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1726 } else {
1727 te->addr_write = address;
1728 }
1729 } else {
1730 te->addr_write = -1;
1731 }
1732 }
1733 #if !defined(CONFIG_SOFTMMU)
1734 else {
1735 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1736 /* IO access: no mapping is done as it will be handled by the
1737 soft MMU */
1738 if (!(env->hflags & HF_SOFTMMU_MASK))
1739 ret = 2;
1740 } else {
1741 void *map_addr;
1742
1743 if (vaddr >= MMAP_AREA_END) {
1744 ret = 2;
1745 } else {
1746 if (prot & PROT_WRITE) {
1747 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1748 #if defined(TARGET_HAS_SMC) || 1
1749 first_tb ||
1750 #endif
1751 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1752 !cpu_physical_memory_is_dirty(pd))) {
1753 /* ROM: we do as if code was inside */
1754 /* if code is present, we only map as read only and save the
1755 original mapping */
1756 VirtPageDesc *vp;
1757
1758 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1759 vp->phys_addr = pd;
1760 vp->prot = prot;
1761 vp->valid_tag = virt_valid_tag;
1762 prot &= ~PAGE_WRITE;
1763 }
1764 }
1765 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1766 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1767 if (map_addr == MAP_FAILED) {
1768 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1769 paddr, vaddr);
1770 }
1771 }
1772 }
1773 }
1774 #endif
1775 return ret;
1776 }
1777
1778 /* called from signal handler: invalidate the code and unprotect the
1779 page. Return TRUE if the fault was succesfully handled. */
1780 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1781 {
1782 #if !defined(CONFIG_SOFTMMU)
1783 VirtPageDesc *vp;
1784
1785 #if defined(DEBUG_TLB)
1786 printf("page_unprotect: addr=0x%08x\n", addr);
1787 #endif
1788 addr &= TARGET_PAGE_MASK;
1789
1790 /* if it is not mapped, no need to worry here */
1791 if (addr >= MMAP_AREA_END)
1792 return 0;
1793 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1794 if (!vp)
1795 return 0;
1796 /* NOTE: in this case, validate_tag is _not_ tested as it
1797 validates only the code TLB */
1798 if (vp->valid_tag != virt_valid_tag)
1799 return 0;
1800 if (!(vp->prot & PAGE_WRITE))
1801 return 0;
1802 #if defined(DEBUG_TLB)
1803 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1804 addr, vp->phys_addr, vp->prot);
1805 #endif
1806 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1807 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1808 (unsigned long)addr, vp->prot);
1809 /* set the dirty bit */
1810 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1811 /* flush the code inside */
1812 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1813 return 1;
1814 #else
1815 return 0;
1816 #endif
1817 }
1818
1819 #else
1820
1821 void tlb_flush(CPUState *env, int flush_global)
1822 {
1823 }
1824
1825 void tlb_flush_page(CPUState *env, target_ulong addr)
1826 {
1827 }
1828
1829 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1830 target_phys_addr_t paddr, int prot,
1831 int mmu_idx, int is_softmmu)
1832 {
1833 return 0;
1834 }
1835
1836 /* dump memory mappings */
1837 void page_dump(FILE *f)
1838 {
1839 unsigned long start, end;
1840 int i, j, prot, prot1;
1841 PageDesc *p;
1842
1843 fprintf(f, "%-8s %-8s %-8s %s\n",
1844 "start", "end", "size", "prot");
1845 start = -1;
1846 end = -1;
1847 prot = 0;
1848 for(i = 0; i <= L1_SIZE; i++) {
1849 if (i < L1_SIZE)
1850 p = l1_map[i];
1851 else
1852 p = NULL;
1853 for(j = 0;j < L2_SIZE; j++) {
1854 if (!p)
1855 prot1 = 0;
1856 else
1857 prot1 = p[j].flags;
1858 if (prot1 != prot) {
1859 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1860 if (start != -1) {
1861 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1862 start, end, end - start,
1863 prot & PAGE_READ ? 'r' : '-',
1864 prot & PAGE_WRITE ? 'w' : '-',
1865 prot & PAGE_EXEC ? 'x' : '-');
1866 }
1867 if (prot1 != 0)
1868 start = end;
1869 else
1870 start = -1;
1871 prot = prot1;
1872 }
1873 if (!p)
1874 break;
1875 }
1876 }
1877 }
1878
1879 int page_get_flags(target_ulong address)
1880 {
1881 PageDesc *p;
1882
1883 p = page_find(address >> TARGET_PAGE_BITS);
1884 if (!p)
1885 return 0;
1886 return p->flags;
1887 }
1888
1889 /* modify the flags of a page and invalidate the code if
1890 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1891 depending on PAGE_WRITE */
1892 void page_set_flags(target_ulong start, target_ulong end, int flags)
1893 {
1894 PageDesc *p;
1895 target_ulong addr;
1896
1897 start = start & TARGET_PAGE_MASK;
1898 end = TARGET_PAGE_ALIGN(end);
1899 if (flags & PAGE_WRITE)
1900 flags |= PAGE_WRITE_ORG;
1901 spin_lock(&tb_lock);
1902 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1903 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1904 /* if the write protection is set, then we invalidate the code
1905 inside */
1906 if (!(p->flags & PAGE_WRITE) &&
1907 (flags & PAGE_WRITE) &&
1908 p->first_tb) {
1909 tb_invalidate_phys_page(addr, 0, NULL);
1910 }
1911 p->flags = flags;
1912 }
1913 spin_unlock(&tb_lock);
1914 }
1915
1916 int page_check_range(target_ulong start, target_ulong len, int flags)
1917 {
1918 PageDesc *p;
1919 target_ulong end;
1920 target_ulong addr;
1921
1922 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1923 start = start & TARGET_PAGE_MASK;
1924
1925 if( end < start )
1926 /* we've wrapped around */
1927 return -1;
1928 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1929 p = page_find(addr >> TARGET_PAGE_BITS);
1930 if( !p )
1931 return -1;
1932 if( !(p->flags & PAGE_VALID) )
1933 return -1;
1934
1935 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1936 return -1;
1937 if (flags & PAGE_WRITE) {
1938 if (!(p->flags & PAGE_WRITE_ORG))
1939 return -1;
1940 /* unprotect the page if it was put read-only because it
1941 contains translated code */
1942 if (!(p->flags & PAGE_WRITE)) {
1943 if (!page_unprotect(addr, 0, NULL))
1944 return -1;
1945 }
1946 return 0;
1947 }
1948 }
1949 return 0;
1950 }
1951
1952 /* called from signal handler: invalidate the code and unprotect the
1953 page. Return TRUE if the fault was succesfully handled. */
1954 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1955 {
1956 unsigned int page_index, prot, pindex;
1957 PageDesc *p, *p1;
1958 target_ulong host_start, host_end, addr;
1959
1960 host_start = address & qemu_host_page_mask;
1961 page_index = host_start >> TARGET_PAGE_BITS;
1962 p1 = page_find(page_index);
1963 if (!p1)
1964 return 0;
1965 host_end = host_start + qemu_host_page_size;
1966 p = p1;
1967 prot = 0;
1968 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1969 prot |= p->flags;
1970 p++;
1971 }
1972 /* if the page was really writable, then we change its
1973 protection back to writable */
1974 if (prot & PAGE_WRITE_ORG) {
1975 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1976 if (!(p1[pindex].flags & PAGE_WRITE)) {
1977 mprotect((void *)g2h(host_start), qemu_host_page_size,
1978 (prot & PAGE_BITS) | PAGE_WRITE);
1979 p1[pindex].flags |= PAGE_WRITE;
1980 /* and since the content will be modified, we must invalidate
1981 the corresponding translated code. */
1982 tb_invalidate_phys_page(address, pc, puc);
1983 #ifdef DEBUG_TB_CHECK
1984 tb_invalidate_check(address);
1985 #endif
1986 return 1;
1987 }
1988 }
1989 return 0;
1990 }
1991
1992 static inline void tlb_set_dirty(CPUState *env,
1993 unsigned long addr, target_ulong vaddr)
1994 {
1995 }
1996 #endif /* defined(CONFIG_USER_ONLY) */
1997
1998 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1999 int memory);
2000 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2001 int orig_memory);
2002 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2003 need_subpage) \
2004 do { \
2005 if (addr > start_addr) \
2006 start_addr2 = 0; \
2007 else { \
2008 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2009 if (start_addr2 > 0) \
2010 need_subpage = 1; \
2011 } \
2012 \
2013 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2014 end_addr2 = TARGET_PAGE_SIZE - 1; \
2015 else { \
2016 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2017 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2018 need_subpage = 1; \
2019 } \
2020 } while (0)
2021
2022 /* register physical memory. 'size' must be a multiple of the target
2023 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2024 io memory page */
2025 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2026 ram_addr_t size,
2027 ram_addr_t phys_offset)
2028 {
2029 target_phys_addr_t addr, end_addr;
2030 PhysPageDesc *p;
2031 CPUState *env;
2032 ram_addr_t orig_size = size;
2033 void *subpage;
2034
2035 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2036 end_addr = start_addr + (target_phys_addr_t)size;
2037 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2038 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2039 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2040 ram_addr_t orig_memory = p->phys_offset;
2041 target_phys_addr_t start_addr2, end_addr2;
2042 int need_subpage = 0;
2043
2044 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2045 need_subpage);
2046 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2047 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2048 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2049 &p->phys_offset, orig_memory);
2050 } else {
2051 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2052 >> IO_MEM_SHIFT];
2053 }
2054 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2055 } else {
2056 p->phys_offset = phys_offset;
2057 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2058 (phys_offset & IO_MEM_ROMD))
2059 phys_offset += TARGET_PAGE_SIZE;
2060 }
2061 } else {
2062 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2063 p->phys_offset = phys_offset;
2064 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2065 (phys_offset & IO_MEM_ROMD))
2066 phys_offset += TARGET_PAGE_SIZE;
2067 else {
2068 target_phys_addr_t start_addr2, end_addr2;
2069 int need_subpage = 0;
2070
2071 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2072 end_addr2, need_subpage);
2073
2074 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2075 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2076 &p->phys_offset, IO_MEM_UNASSIGNED);
2077 subpage_register(subpage, start_addr2, end_addr2,
2078 phys_offset);
2079 }
2080 }
2081 }
2082 }
2083
2084 /* since each CPU stores ram addresses in its TLB cache, we must
2085 reset the modified entries */
2086 /* XXX: slow ! */
2087 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2088 tlb_flush(env, 1);
2089 }
2090 }
2091
2092 /* XXX: temporary until new memory mapping API */
2093 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2094 {
2095 PhysPageDesc *p;
2096
2097 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2098 if (!p)
2099 return IO_MEM_UNASSIGNED;
2100 return p->phys_offset;
2101 }
2102
2103 /* XXX: better than nothing */
2104 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2105 {
2106 ram_addr_t addr;
2107 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2108 fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %" PRIu64 ")\n",
2109 size, (uint64_t)phys_ram_size);
2110 abort();
2111 }
2112 addr = phys_ram_alloc_offset;
2113 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2114 return addr;
2115 }
2116
2117 void qemu_ram_free(ram_addr_t addr)
2118 {
2119 }
2120
2121 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2122 {
2123 #ifdef DEBUG_UNASSIGNED
2124 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2125 #endif
2126 #ifdef TARGET_SPARC
2127 do_unassigned_access(addr, 0, 0, 0);
2128 #elif TARGET_CRIS
2129 do_unassigned_access(addr, 0, 0, 0);
2130 #endif
2131 return 0;
2132 }
2133
2134 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2135 {
2136 #ifdef DEBUG_UNASSIGNED
2137 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2138 #endif
2139 #ifdef TARGET_SPARC
2140 do_unassigned_access(addr, 1, 0, 0);
2141 #elif TARGET_CRIS
2142 do_unassigned_access(addr, 1, 0, 0);
2143 #endif
2144 }
2145
2146 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2147 unassigned_mem_readb,
2148 unassigned_mem_readb,
2149 unassigned_mem_readb,
2150 };
2151
2152 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2153 unassigned_mem_writeb,
2154 unassigned_mem_writeb,
2155 unassigned_mem_writeb,
2156 };
2157
2158 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2159 {
2160 unsigned long ram_addr;
2161 int dirty_flags;
2162 ram_addr = addr - (unsigned long)phys_ram_base;
2163 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2164 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2165 #if !defined(CONFIG_USER_ONLY)
2166 tb_invalidate_phys_page_fast(ram_addr, 1);
2167 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2168 #endif
2169 }
2170 stb_p((uint8_t *)(long)addr, val);
2171 #ifdef USE_KQEMU
2172 if (cpu_single_env->kqemu_enabled &&
2173 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2174 kqemu_modify_page(cpu_single_env, ram_addr);
2175 #endif
2176 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2177 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2178 /* we remove the notdirty callback only if the code has been
2179 flushed */
2180 if (dirty_flags == 0xff)
2181 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2182 }
2183
2184 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2185 {
2186 unsigned long ram_addr;
2187 int dirty_flags;
2188 ram_addr = addr - (unsigned long)phys_ram_base;
2189 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2190 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2191 #if !defined(CONFIG_USER_ONLY)
2192 tb_invalidate_phys_page_fast(ram_addr, 2);
2193 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2194 #endif
2195 }
2196 stw_p((uint8_t *)(long)addr, val);
2197 #ifdef USE_KQEMU
2198 if (cpu_single_env->kqemu_enabled &&
2199 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2200 kqemu_modify_page(cpu_single_env, ram_addr);
2201 #endif
2202 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2203 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2204 /* we remove the notdirty callback only if the code has been
2205 flushed */
2206 if (dirty_flags == 0xff)
2207 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2208 }
2209
2210 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2211 {
2212 unsigned long ram_addr;
2213 int dirty_flags;
2214 ram_addr = addr - (unsigned long)phys_ram_base;
2215 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2216 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2217 #if !defined(CONFIG_USER_ONLY)
2218 tb_invalidate_phys_page_fast(ram_addr, 4);
2219 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2220 #endif
2221 }
2222 stl_p((uint8_t *)(long)addr, val);
2223 #ifdef USE_KQEMU
2224 if (cpu_single_env->kqemu_enabled &&
2225 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2226 kqemu_modify_page(cpu_single_env, ram_addr);
2227 #endif
2228 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2229 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2230 /* we remove the notdirty callback only if the code has been
2231 flushed */
2232 if (dirty_flags == 0xff)
2233 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2234 }
2235
2236 static CPUReadMemoryFunc *error_mem_read[3] = {
2237 NULL, /* never used */
2238 NULL, /* never used */
2239 NULL, /* never used */
2240 };
2241
2242 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2243 notdirty_mem_writeb,
2244 notdirty_mem_writew,
2245 notdirty_mem_writel,
2246 };
2247
2248 #if defined(CONFIG_SOFTMMU)
2249 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2250 so these check for a hit then pass through to the normal out-of-line
2251 phys routines. */
2252 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2253 {
2254 return ldub_phys(addr);
2255 }
2256
2257 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2258 {
2259 return lduw_phys(addr);
2260 }
2261
2262 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2263 {
2264 return ldl_phys(addr);
2265 }
2266
2267 /* Generate a debug exception if a watchpoint has been hit.
2268 Returns the real physical address of the access. addr will be a host
2269 address in case of a RAM location. */
2270 static target_ulong check_watchpoint(target_phys_addr_t addr)
2271 {
2272 CPUState *env = cpu_single_env;
2273 target_ulong watch;
2274 target_ulong retaddr;
2275 int i;
2276
2277 retaddr = addr;
2278 for (i = 0; i < env->nb_watchpoints; i++) {
2279 watch = env->watchpoint[i].vaddr;
2280 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2281 retaddr = addr - env->watchpoint[i].addend;
2282 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2283 cpu_single_env->watchpoint_hit = i + 1;
2284 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2285 break;
2286 }
2287 }
2288 }
2289 return retaddr;
2290 }
2291
2292 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2293 uint32_t val)
2294 {
2295 addr = check_watchpoint(addr);
2296 stb_phys(addr, val);
2297 }
2298
2299 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2300 uint32_t val)
2301 {
2302 addr = check_watchpoint(addr);
2303 stw_phys(addr, val);
2304 }
2305
2306 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2307 uint32_t val)
2308 {
2309 addr = check_watchpoint(addr);
2310 stl_phys(addr, val);
2311 }
2312
2313 static CPUReadMemoryFunc *watch_mem_read[3] = {
2314 watch_mem_readb,
2315 watch_mem_readw,
2316 watch_mem_readl,
2317 };
2318
2319 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2320 watch_mem_writeb,
2321 watch_mem_writew,
2322 watch_mem_writel,
2323 };
2324 #endif
2325
2326 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2327 unsigned int len)
2328 {
2329 uint32_t ret;
2330 unsigned int idx;
2331
2332 idx = SUBPAGE_IDX(addr - mmio->base);
2333 #if defined(DEBUG_SUBPAGE)
2334 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2335 mmio, len, addr, idx);
2336 #endif
2337 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2338
2339 return ret;
2340 }
2341
2342 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2343 uint32_t value, unsigned int len)
2344 {
2345 unsigned int idx;
2346
2347 idx = SUBPAGE_IDX(addr - mmio->base);
2348 #if defined(DEBUG_SUBPAGE)
2349 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2350 mmio, len, addr, idx, value);
2351 #endif
2352 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2353 }
2354
2355 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2356 {
2357 #if defined(DEBUG_SUBPAGE)
2358 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2359 #endif
2360
2361 return subpage_readlen(opaque, addr, 0);
2362 }
2363
2364 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2365 uint32_t value)
2366 {
2367 #if defined(DEBUG_SUBPAGE)
2368 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2369 #endif
2370 subpage_writelen(opaque, addr, value, 0);
2371 }
2372
2373 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2374 {
2375 #if defined(DEBUG_SUBPAGE)
2376 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2377 #endif
2378
2379 return subpage_readlen(opaque, addr, 1);
2380 }
2381
2382 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2383 uint32_t value)
2384 {
2385 #if defined(DEBUG_SUBPAGE)
2386 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2387 #endif
2388 subpage_writelen(opaque, addr, value, 1);
2389 }
2390
2391 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2392 {
2393 #if defined(DEBUG_SUBPAGE)
2394 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2395 #endif
2396
2397 return subpage_readlen(opaque, addr, 2);
2398 }
2399
2400 static void subpage_writel (void *opaque,
2401 target_phys_addr_t addr, uint32_t value)
2402 {
2403 #if defined(DEBUG_SUBPAGE)
2404 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2405 #endif
2406 subpage_writelen(opaque, addr, value, 2);
2407 }
2408
2409 static CPUReadMemoryFunc *subpage_read[] = {
2410 &subpage_readb,
2411 &subpage_readw,
2412 &subpage_readl,
2413 };
2414
2415 static CPUWriteMemoryFunc *subpage_write[] = {
2416 &subpage_writeb,
2417 &subpage_writew,
2418 &subpage_writel,
2419 };
2420
2421 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2422 int memory)
2423 {
2424 int idx, eidx;
2425 unsigned int i;
2426
2427 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2428 return -1;
2429 idx = SUBPAGE_IDX(start);
2430 eidx = SUBPAGE_IDX(end);
2431 #if defined(DEBUG_SUBPAGE)
2432 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2433 mmio, start, end, idx, eidx, memory);
2434 #endif
2435 memory >>= IO_MEM_SHIFT;
2436 for (; idx <= eidx; idx++) {
2437 for (i = 0; i < 4; i++) {
2438 if (io_mem_read[memory][i]) {
2439 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2440 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2441 }
2442 if (io_mem_write[memory][i]) {
2443 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2444 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2445 }
2446 }
2447 }
2448
2449 return 0;
2450 }
2451
2452 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2453 int orig_memory)
2454 {
2455 subpage_t *mmio;
2456 int subpage_memory;
2457
2458 mmio = qemu_mallocz(sizeof(subpage_t));
2459 if (mmio != NULL) {
2460 mmio->base = base;
2461 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2462 #if defined(DEBUG_SUBPAGE)
2463 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2464 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2465 #endif
2466 *phys = subpage_memory | IO_MEM_SUBPAGE;
2467 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2468 }
2469
2470 return mmio;
2471 }
2472
2473 static void io_mem_init(void)
2474 {
2475 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2476 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2477 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2478 io_mem_nb = 5;
2479
2480 #if defined(CONFIG_SOFTMMU)
2481 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2482 watch_mem_write, NULL);
2483 #endif
2484 /* alloc dirty bits array */
2485 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2486 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2487 }
2488
2489 /* mem_read and mem_write are arrays of functions containing the
2490 function to access byte (index 0), word (index 1) and dword (index
2491 2). Functions can be omitted with a NULL function pointer. The
2492 registered functions may be modified dynamically later.
2493 If io_index is non zero, the corresponding io zone is
2494 modified. If it is zero, a new io zone is allocated. The return
2495 value can be used with cpu_register_physical_memory(). (-1) is
2496 returned if error. */
2497 int cpu_register_io_memory(int io_index,
2498 CPUReadMemoryFunc **mem_read,
2499 CPUWriteMemoryFunc **mem_write,
2500 void *opaque)
2501 {
2502 int i, subwidth = 0;
2503
2504 if (io_index <= 0) {
2505 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2506 return -1;
2507 io_index = io_mem_nb++;
2508 } else {
2509 if (io_index >= IO_MEM_NB_ENTRIES)
2510 return -1;
2511 }
2512
2513 for(i = 0;i < 3; i++) {
2514 if (!mem_read[i] || !mem_write[i])
2515 subwidth = IO_MEM_SUBWIDTH;
2516 io_mem_read[io_index][i] = mem_read[i];
2517 io_mem_write[io_index][i] = mem_write[i];
2518 }
2519 io_mem_opaque[io_index] = opaque;
2520 return (io_index << IO_MEM_SHIFT) | subwidth;
2521 }
2522
2523 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2524 {
2525 return io_mem_write[io_index >> IO_MEM_SHIFT];
2526 }
2527
2528 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2529 {
2530 return io_mem_read[io_index >> IO_MEM_SHIFT];
2531 }
2532
2533 /* physical memory access (slow version, mainly for debug) */
2534 #if defined(CONFIG_USER_ONLY)
2535 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2536 int len, int is_write)
2537 {
2538 int l, flags;
2539 target_ulong page;
2540 void * p;
2541
2542 while (len > 0) {
2543 page = addr & TARGET_PAGE_MASK;
2544 l = (page + TARGET_PAGE_SIZE) - addr;
2545 if (l > len)
2546 l = len;
2547 flags = page_get_flags(page);
2548 if (!(flags & PAGE_VALID))
2549 return;
2550 if (is_write) {
2551 if (!(flags & PAGE_WRITE))
2552 return;
2553 /* XXX: this code should not depend on lock_user */
2554 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2555 /* FIXME - should this return an error rather than just fail? */
2556 return;
2557 memcpy(p, buf, len);
2558 unlock_user(p, addr, len);
2559 } else {
2560 if (!(flags & PAGE_READ))
2561 return;
2562 /* XXX: this code should not depend on lock_user */
2563 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2564 /* FIXME - should this return an error rather than just fail? */
2565 return;
2566 memcpy(buf, p, len);
2567 unlock_user(p, addr, 0);
2568 }
2569 len -= l;
2570 buf += l;
2571 addr += l;
2572 }
2573 }
2574
2575 #else
2576 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2577 int len, int is_write)
2578 {
2579 int l, io_index;
2580 uint8_t *ptr;
2581 uint32_t val;
2582 target_phys_addr_t page;
2583 unsigned long pd;
2584 PhysPageDesc *p;
2585
2586 while (len > 0) {
2587 page = addr & TARGET_PAGE_MASK;
2588 l = (page + TARGET_PAGE_SIZE) - addr;
2589 if (l > len)
2590 l = len;
2591 p = phys_page_find(page >> TARGET_PAGE_BITS);
2592 if (!p) {
2593 pd = IO_MEM_UNASSIGNED;
2594 } else {
2595 pd = p->phys_offset;
2596 }
2597
2598 if (is_write) {
2599 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2600 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2601 /* XXX: could force cpu_single_env to NULL to avoid
2602 potential bugs */
2603 if (l >= 4 && ((addr & 3) == 0)) {
2604 /* 32 bit write access */
2605 val = ldl_p(buf);
2606 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2607 l = 4;
2608 } else if (l >= 2 && ((addr & 1) == 0)) {
2609 /* 16 bit write access */
2610 val = lduw_p(buf);
2611 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2612 l = 2;
2613 } else {
2614 /* 8 bit write access */
2615 val = ldub_p(buf);
2616 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2617 l = 1;
2618 }
2619 } else {
2620 unsigned long addr1;
2621 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2622 /* RAM case */
2623 ptr = phys_ram_base + addr1;
2624 memcpy(ptr, buf, l);
2625 if (!cpu_physical_memory_is_dirty(addr1)) {
2626 /* invalidate code */
2627 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2628 /* set dirty bit */
2629 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2630 (0xff & ~CODE_DIRTY_FLAG);
2631 }
2632 }
2633 } else {
2634 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2635 !(pd & IO_MEM_ROMD)) {
2636 /* I/O case */
2637 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2638 if (l >= 4 && ((addr & 3) == 0)) {
2639 /* 32 bit read access */
2640 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2641 stl_p(buf, val);
2642 l = 4;
2643 } else if (l >= 2 && ((addr & 1) == 0)) {
2644 /* 16 bit read access */
2645 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2646 stw_p(buf, val);
2647 l = 2;
2648 } else {
2649 /* 8 bit read access */
2650 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2651 stb_p(buf, val);
2652 l = 1;
2653 }
2654 } else {
2655 /* RAM case */
2656 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2657 (addr & ~TARGET_PAGE_MASK);
2658 memcpy(buf, ptr, l);
2659 }
2660 }
2661 len -= l;
2662 buf += l;
2663 addr += l;
2664 }
2665 }
2666
2667 /* used for ROM loading : can write in RAM and ROM */
2668 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2669 const uint8_t *buf, int len)
2670 {
2671 int l;
2672 uint8_t *ptr;
2673 target_phys_addr_t page;
2674 unsigned long pd;
2675 PhysPageDesc *p;
2676
2677 while (len > 0) {
2678 page = addr & TARGET_PAGE_MASK;
2679 l = (page + TARGET_PAGE_SIZE) - addr;
2680 if (l > len)
2681 l = len;
2682 p = phys_page_find(page >> TARGET_PAGE_BITS);
2683 if (!p) {
2684 pd = IO_MEM_UNASSIGNED;
2685 } else {
2686 pd = p->phys_offset;
2687 }
2688
2689 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2690 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2691 !(pd & IO_MEM_ROMD)) {
2692 /* do nothing */
2693 } else {
2694 unsigned long addr1;
2695 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2696 /* ROM/RAM case */
2697 ptr = phys_ram_base + addr1;
2698 memcpy(ptr, buf, l);
2699 }
2700 len -= l;
2701 buf += l;
2702 addr += l;
2703 }
2704 }
2705
2706
2707 /* warning: addr must be aligned */
2708 uint32_t ldl_phys(target_phys_addr_t addr)
2709 {
2710 int io_index;
2711 uint8_t *ptr;
2712 uint32_t val;
2713 unsigned long pd;
2714 PhysPageDesc *p;
2715
2716 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2717 if (!p) {
2718 pd = IO_MEM_UNASSIGNED;
2719 } else {
2720 pd = p->phys_offset;
2721 }
2722
2723 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2724 !(pd & IO_MEM_ROMD)) {
2725 /* I/O case */
2726 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2727 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2728 } else {
2729 /* RAM case */
2730 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2731 (addr & ~TARGET_PAGE_MASK);
2732 val = ldl_p(ptr);
2733 }
2734 return val;
2735 }
2736
2737 /* warning: addr must be aligned */
2738 uint64_t ldq_phys(target_phys_addr_t addr)
2739 {
2740 int io_index;
2741 uint8_t *ptr;
2742 uint64_t val;
2743 unsigned long pd;
2744 PhysPageDesc *p;
2745
2746 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2747 if (!p) {
2748 pd = IO_MEM_UNASSIGNED;
2749 } else {
2750 pd = p->phys_offset;
2751 }
2752
2753 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2754 !(pd & IO_MEM_ROMD)) {
2755 /* I/O case */
2756 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2757 #ifdef TARGET_WORDS_BIGENDIAN
2758 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2759 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2760 #else
2761 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2762 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2763 #endif
2764 } else {
2765 /* RAM case */
2766 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2767 (addr & ~TARGET_PAGE_MASK);
2768 val = ldq_p(ptr);
2769 }
2770 return val;
2771 }
2772
2773 /* XXX: optimize */
2774 uint32_t ldub_phys(target_phys_addr_t addr)
2775 {
2776 uint8_t val;
2777 cpu_physical_memory_read(addr, &val, 1);
2778 return val;
2779 }
2780
2781 /* XXX: optimize */
2782 uint32_t lduw_phys(target_phys_addr_t addr)
2783 {
2784 uint16_t val;
2785 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2786 return tswap16(val);
2787 }
2788
2789 /* warning: addr must be aligned. The ram page is not masked as dirty
2790 and the code inside is not invalidated. It is useful if the dirty
2791 bits are used to track modified PTEs */
2792 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2793 {
2794 int io_index;
2795 uint8_t *ptr;
2796 unsigned long pd;
2797 PhysPageDesc *p;
2798
2799 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2800 if (!p) {
2801 pd = IO_MEM_UNASSIGNED;
2802 } else {
2803 pd = p->phys_offset;
2804 }
2805
2806 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2807 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2808 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2809 } else {
2810 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2811 (addr & ~TARGET_PAGE_MASK);
2812 stl_p(ptr, val);
2813 }
2814 }
2815
2816 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2817 {
2818 int io_index;
2819 uint8_t *ptr;
2820 unsigned long pd;
2821 PhysPageDesc *p;
2822
2823 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2824 if (!p) {
2825 pd = IO_MEM_UNASSIGNED;
2826 } else {
2827 pd = p->phys_offset;
2828 }
2829
2830 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2831 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2832 #ifdef TARGET_WORDS_BIGENDIAN
2833 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2834 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2835 #else
2836 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2837 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2838 #endif
2839 } else {
2840 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2841 (addr & ~TARGET_PAGE_MASK);
2842 stq_p(ptr, val);
2843 }
2844 }
2845
2846 /* warning: addr must be aligned */
2847 void stl_phys(target_phys_addr_t addr, uint32_t val)
2848 {
2849 int io_index;
2850 uint8_t *ptr;
2851 unsigned long pd;
2852 PhysPageDesc *p;
2853
2854 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2855 if (!p) {
2856 pd = IO_MEM_UNASSIGNED;
2857 } else {
2858 pd = p->phys_offset;
2859 }
2860
2861 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2862 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2863 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2864 } else {
2865 unsigned long addr1;
2866 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2867 /* RAM case */
2868 ptr = phys_ram_base + addr1;
2869 stl_p(ptr, val);
2870 if (!cpu_physical_memory_is_dirty(addr1)) {
2871 /* invalidate code */
2872 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2873 /* set dirty bit */
2874 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2875 (0xff & ~CODE_DIRTY_FLAG);
2876 }
2877 }
2878 }
2879
2880 /* XXX: optimize */
2881 void stb_phys(target_phys_addr_t addr, uint32_t val)
2882 {
2883 uint8_t v = val;
2884 cpu_physical_memory_write(addr, &v, 1);
2885 }
2886
2887 /* XXX: optimize */
2888 void stw_phys(target_phys_addr_t addr, uint32_t val)
2889 {
2890 uint16_t v = tswap16(val);
2891 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2892 }
2893
2894 /* XXX: optimize */
2895 void stq_phys(target_phys_addr_t addr, uint64_t val)
2896 {
2897 val = tswap64(val);
2898 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2899 }
2900
2901 #endif
2902
2903 /* virtual memory access for debug */
2904 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2905 uint8_t *buf, int len, int is_write)
2906 {
2907 int l;
2908 target_phys_addr_t phys_addr;
2909 target_ulong page;
2910
2911 while (len > 0) {
2912 page = addr & TARGET_PAGE_MASK;
2913 phys_addr = cpu_get_phys_page_debug(env, page);
2914 /* if no physical page mapped, return an error */
2915 if (phys_addr == -1)
2916 return -1;
2917 l = (page + TARGET_PAGE_SIZE) - addr;
2918 if (l > len)
2919 l = len;
2920 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2921 buf, l, is_write);
2922 len -= l;
2923 buf += l;
2924 addr += l;
2925 }
2926 return 0;
2927 }
2928
2929 void dump_exec_info(FILE *f,
2930 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2931 {
2932 int i, target_code_size, max_target_code_size;
2933 int direct_jmp_count, direct_jmp2_count, cross_page;
2934 TranslationBlock *tb;
2935
2936 target_code_size = 0;
2937 max_target_code_size = 0;
2938 cross_page = 0;
2939 direct_jmp_count = 0;
2940 direct_jmp2_count = 0;
2941 for(i = 0; i < nb_tbs; i++) {
2942 tb = &tbs[i];
2943 target_code_size += tb->size;
2944 if (tb->size > max_target_code_size)
2945 max_target_code_size = tb->size;
2946 if (tb->page_addr[1] != -1)
2947 cross_page++;
2948 if (tb->tb_next_offset[0] != 0xffff) {
2949 direct_jmp_count++;
2950 if (tb->tb_next_offset[1] != 0xffff) {
2951 direct_jmp2_count++;
2952 }
2953 }
2954 }
2955 /* XXX: avoid using doubles ? */
2956 cpu_fprintf(f, "Translation buffer state:\n");
2957 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2958 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2959 nb_tbs ? target_code_size / nb_tbs : 0,
2960 max_target_code_size);
2961 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2962 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2963 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2964 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2965 cross_page,
2966 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2967 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2968 direct_jmp_count,
2969 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2970 direct_jmp2_count,
2971 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2972 cpu_fprintf(f, "\nStatistics:\n");
2973 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2974 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2975 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2976 #ifdef CONFIG_PROFILER
2977 {
2978 int64_t tot;
2979 tot = dyngen_interm_time + dyngen_code_time;
2980 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2981 tot, tot / 2.4e9);
2982 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2983 dyngen_tb_count,
2984 dyngen_tb_count1 - dyngen_tb_count,
2985 dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
2986 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2987 dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
2988 cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
2989 dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
2990 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2991 dyngen_tb_count ?
2992 (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
2993 cpu_fprintf(f, "cycles/op %0.1f\n",
2994 dyngen_op_count ? (double)tot / dyngen_op_count : 0);
2995 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2996 dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
2997 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2998 dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
2999 if (tot == 0)
3000 tot = 1;
3001 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
3002 (double)dyngen_interm_time / tot * 100.0);
3003 cpu_fprintf(f, " gen_code time %0.1f%%\n",
3004 (double)dyngen_code_time / tot * 100.0);
3005 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
3006 dyngen_restore_count);
3007 cpu_fprintf(f, " avg cycles %0.1f\n",
3008 dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
3009 {
3010 extern void dump_op_count(void);
3011 dump_op_count();
3012 }
3013 }
3014 #endif
3015 }
3016
3017 #if !defined(CONFIG_USER_ONLY)
3018
3019 #define MMUSUFFIX _cmmu
3020 #define GETPC() NULL
3021 #define env cpu_single_env
3022 #define SOFTMMU_CODE_ACCESS
3023
3024 #define SHIFT 0
3025 #include "softmmu_template.h"
3026
3027 #define SHIFT 1
3028 #include "softmmu_template.h"
3029
3030 #define SHIFT 2
3031 #include "softmmu_template.h"
3032
3033 #define SHIFT 3
3034 #include "softmmu_template.h"
3035
3036 #undef env
3037
3038 #endif