]> git.proxmox.com Git - qemu.git/blob - exec.c
kqemu API change - allow use of kqemu with 32 bit QEMU on a 64 bit host
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
35
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #if defined(CONFIG_USER_ONLY)
41 #include <qemu.h>
42 #endif
43
44 //#define DEBUG_TB_INVALIDATE
45 //#define DEBUG_FLUSH
46 //#define DEBUG_TLB
47 //#define DEBUG_UNASSIGNED
48
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
52
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
55
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
58 #undef DEBUG_TB_CHECK
59 #endif
60
61 #define SMC_BITMAP_USE_THRESHOLD 10
62
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
65
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 #else
80 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81 #define TARGET_PHYS_ADDR_SPACE_BITS 32
82 #endif
83
84 TranslationBlock *tbs;
85 int code_gen_max_blocks;
86 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87 int nb_tbs;
88 /* any access to the tbs or the page table must use this lock */
89 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
90
91 uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
92 uint8_t *code_gen_buffer;
93 unsigned long code_gen_buffer_size;
94 /* threshold to flush the translated code buffer */
95 unsigned long code_gen_buffer_max_size;
96 uint8_t *code_gen_ptr;
97
98 ram_addr_t phys_ram_size;
99 int phys_ram_fd;
100 uint8_t *phys_ram_base;
101 uint8_t *phys_ram_dirty;
102 static ram_addr_t phys_ram_alloc_offset = 0;
103
104 CPUState *first_cpu;
105 /* current CPU in the current thread. It is only valid inside
106 cpu_exec() */
107 CPUState *cpu_single_env;
108
109 typedef struct PageDesc {
110 /* list of TBs intersecting this ram page */
111 TranslationBlock *first_tb;
112 /* in order to optimize self modifying code, we count the number
113 of lookups we do to a given page to use a bitmap */
114 unsigned int code_write_count;
115 uint8_t *code_bitmap;
116 #if defined(CONFIG_USER_ONLY)
117 unsigned long flags;
118 #endif
119 } PageDesc;
120
121 typedef struct PhysPageDesc {
122 /* offset in host memory of the page + io_index in the low 12 bits */
123 ram_addr_t phys_offset;
124 } PhysPageDesc;
125
126 #define L2_BITS 10
127 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
128 /* XXX: this is a temporary hack for alpha target.
129 * In the future, this is to be replaced by a multi-level table
130 * to actually be able to handle the complete 64 bits address space.
131 */
132 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
133 #else
134 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
135 #endif
136
137 #define L1_SIZE (1 << L1_BITS)
138 #define L2_SIZE (1 << L2_BITS)
139
140 static void io_mem_init(void);
141
142 unsigned long qemu_real_host_page_size;
143 unsigned long qemu_host_page_bits;
144 unsigned long qemu_host_page_size;
145 unsigned long qemu_host_page_mask;
146
147 /* XXX: for system emulation, it could just be an array */
148 static PageDesc *l1_map[L1_SIZE];
149 PhysPageDesc **l1_phys_map;
150
151 /* io memory support */
152 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
153 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
154 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
155 static int io_mem_nb;
156 #if defined(CONFIG_SOFTMMU)
157 static int io_mem_watch;
158 #endif
159
160 /* log support */
161 char *logfilename = "/tmp/qemu.log";
162 FILE *logfile;
163 int loglevel;
164 static int log_append = 0;
165
166 /* statistics */
167 static int tlb_flush_count;
168 static int tb_flush_count;
169 static int tb_phys_invalidate_count;
170
171 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
172 typedef struct subpage_t {
173 target_phys_addr_t base;
174 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
175 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
176 void *opaque[TARGET_PAGE_SIZE][2][4];
177 } subpage_t;
178
179 #ifdef _WIN32
180 static void map_exec(void *addr, long size)
181 {
182 DWORD old_protect;
183 VirtualProtect(addr, size,
184 PAGE_EXECUTE_READWRITE, &old_protect);
185
186 }
187 #else
188 static void map_exec(void *addr, long size)
189 {
190 unsigned long start, end, page_size;
191
192 page_size = getpagesize();
193 start = (unsigned long)addr;
194 start &= ~(page_size - 1);
195
196 end = (unsigned long)addr + size;
197 end += page_size - 1;
198 end &= ~(page_size - 1);
199
200 mprotect((void *)start, end - start,
201 PROT_READ | PROT_WRITE | PROT_EXEC);
202 }
203 #endif
204
205 static void page_init(void)
206 {
207 /* NOTE: we can always suppose that qemu_host_page_size >=
208 TARGET_PAGE_SIZE */
209 #ifdef _WIN32
210 {
211 SYSTEM_INFO system_info;
212 DWORD old_protect;
213
214 GetSystemInfo(&system_info);
215 qemu_real_host_page_size = system_info.dwPageSize;
216 }
217 #else
218 qemu_real_host_page_size = getpagesize();
219 #endif
220 if (qemu_host_page_size == 0)
221 qemu_host_page_size = qemu_real_host_page_size;
222 if (qemu_host_page_size < TARGET_PAGE_SIZE)
223 qemu_host_page_size = TARGET_PAGE_SIZE;
224 qemu_host_page_bits = 0;
225 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
226 qemu_host_page_bits++;
227 qemu_host_page_mask = ~(qemu_host_page_size - 1);
228 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
229 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
230
231 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
232 {
233 long long startaddr, endaddr;
234 FILE *f;
235 int n;
236
237 f = fopen("/proc/self/maps", "r");
238 if (f) {
239 do {
240 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
241 if (n == 2) {
242 startaddr = MIN(startaddr,
243 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
244 endaddr = MIN(endaddr,
245 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
246 page_set_flags(startaddr & TARGET_PAGE_MASK,
247 TARGET_PAGE_ALIGN(endaddr),
248 PAGE_RESERVED);
249 }
250 } while (!feof(f));
251 fclose(f);
252 }
253 }
254 #endif
255 }
256
257 static inline PageDesc *page_find_alloc(target_ulong index)
258 {
259 PageDesc **lp, *p;
260
261 lp = &l1_map[index >> L2_BITS];
262 p = *lp;
263 if (!p) {
264 /* allocate if not found */
265 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
266 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
267 *lp = p;
268 }
269 return p + (index & (L2_SIZE - 1));
270 }
271
272 static inline PageDesc *page_find(target_ulong index)
273 {
274 PageDesc *p;
275
276 p = l1_map[index >> L2_BITS];
277 if (!p)
278 return 0;
279 return p + (index & (L2_SIZE - 1));
280 }
281
282 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
283 {
284 void **lp, **p;
285 PhysPageDesc *pd;
286
287 p = (void **)l1_phys_map;
288 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
289
290 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
291 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
292 #endif
293 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
294 p = *lp;
295 if (!p) {
296 /* allocate if not found */
297 if (!alloc)
298 return NULL;
299 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
300 memset(p, 0, sizeof(void *) * L1_SIZE);
301 *lp = p;
302 }
303 #endif
304 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
305 pd = *lp;
306 if (!pd) {
307 int i;
308 /* allocate if not found */
309 if (!alloc)
310 return NULL;
311 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
312 *lp = pd;
313 for (i = 0; i < L2_SIZE; i++)
314 pd[i].phys_offset = IO_MEM_UNASSIGNED;
315 }
316 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
317 }
318
319 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
320 {
321 return phys_page_find_alloc(index, 0);
322 }
323
324 #if !defined(CONFIG_USER_ONLY)
325 static void tlb_protect_code(ram_addr_t ram_addr);
326 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
327 target_ulong vaddr);
328 #endif
329
330 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
331
332 #if defined(CONFIG_USER_ONLY)
333 /* Currently it is not recommanded to allocate big chunks of data in
334 user mode. It will change when a dedicated libc will be used */
335 #define USE_STATIC_CODE_GEN_BUFFER
336 #endif
337
338 #ifdef USE_STATIC_CODE_GEN_BUFFER
339 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
340 #endif
341
342 void code_gen_alloc(unsigned long tb_size)
343 {
344 #ifdef USE_STATIC_CODE_GEN_BUFFER
345 code_gen_buffer = static_code_gen_buffer;
346 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
347 map_exec(code_gen_buffer, code_gen_buffer_size);
348 #else
349 code_gen_buffer_size = tb_size;
350 if (code_gen_buffer_size == 0) {
351 #if defined(CONFIG_USER_ONLY)
352 /* in user mode, phys_ram_size is not meaningful */
353 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
354 #else
355 /* XXX: needs ajustments */
356 code_gen_buffer_size = (int)(phys_ram_size / 4);
357 #endif
358 }
359 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
360 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
361 /* The code gen buffer location may have constraints depending on
362 the host cpu and OS */
363 #if defined(__linux__)
364 {
365 int flags;
366 flags = MAP_PRIVATE | MAP_ANONYMOUS;
367 #if defined(__x86_64__)
368 flags |= MAP_32BIT;
369 /* Cannot map more than that */
370 if (code_gen_buffer_size > (800 * 1024 * 1024))
371 code_gen_buffer_size = (800 * 1024 * 1024);
372 #endif
373 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
374 PROT_WRITE | PROT_READ | PROT_EXEC,
375 flags, -1, 0);
376 if (code_gen_buffer == MAP_FAILED) {
377 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
378 exit(1);
379 }
380 }
381 #else
382 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
383 if (!code_gen_buffer) {
384 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
385 exit(1);
386 }
387 map_exec(code_gen_buffer, code_gen_buffer_size);
388 #endif
389 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
390 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
391 code_gen_buffer_max_size = code_gen_buffer_size -
392 code_gen_max_block_size();
393 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
394 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
395 }
396
397 /* Must be called before using the QEMU cpus. 'tb_size' is the size
398 (in bytes) allocated to the translation buffer. Zero means default
399 size. */
400 void cpu_exec_init_all(unsigned long tb_size)
401 {
402 cpu_gen_init();
403 code_gen_alloc(tb_size);
404 code_gen_ptr = code_gen_buffer;
405 page_init();
406 io_mem_init();
407 }
408
409 void cpu_exec_init(CPUState *env)
410 {
411 CPUState **penv;
412 int cpu_index;
413
414 env->next_cpu = NULL;
415 penv = &first_cpu;
416 cpu_index = 0;
417 while (*penv != NULL) {
418 penv = (CPUState **)&(*penv)->next_cpu;
419 cpu_index++;
420 }
421 env->cpu_index = cpu_index;
422 env->nb_watchpoints = 0;
423 *penv = env;
424 }
425
426 static inline void invalidate_page_bitmap(PageDesc *p)
427 {
428 if (p->code_bitmap) {
429 qemu_free(p->code_bitmap);
430 p->code_bitmap = NULL;
431 }
432 p->code_write_count = 0;
433 }
434
435 /* set to NULL all the 'first_tb' fields in all PageDescs */
436 static void page_flush_tb(void)
437 {
438 int i, j;
439 PageDesc *p;
440
441 for(i = 0; i < L1_SIZE; i++) {
442 p = l1_map[i];
443 if (p) {
444 for(j = 0; j < L2_SIZE; j++) {
445 p->first_tb = NULL;
446 invalidate_page_bitmap(p);
447 p++;
448 }
449 }
450 }
451 }
452
453 /* flush all the translation blocks */
454 /* XXX: tb_flush is currently not thread safe */
455 void tb_flush(CPUState *env1)
456 {
457 CPUState *env;
458 #if defined(DEBUG_FLUSH)
459 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
460 (unsigned long)(code_gen_ptr - code_gen_buffer),
461 nb_tbs, nb_tbs > 0 ?
462 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
463 #endif
464 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
465 cpu_abort(env1, "Internal error: code buffer overflow\n");
466
467 nb_tbs = 0;
468
469 for(env = first_cpu; env != NULL; env = env->next_cpu) {
470 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
471 }
472
473 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
474 page_flush_tb();
475
476 code_gen_ptr = code_gen_buffer;
477 /* XXX: flush processor icache at this point if cache flush is
478 expensive */
479 tb_flush_count++;
480 }
481
482 #ifdef DEBUG_TB_CHECK
483
484 static void tb_invalidate_check(target_ulong address)
485 {
486 TranslationBlock *tb;
487 int i;
488 address &= TARGET_PAGE_MASK;
489 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
490 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
491 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
492 address >= tb->pc + tb->size)) {
493 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
494 address, (long)tb->pc, tb->size);
495 }
496 }
497 }
498 }
499
500 /* verify that all the pages have correct rights for code */
501 static void tb_page_check(void)
502 {
503 TranslationBlock *tb;
504 int i, flags1, flags2;
505
506 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
507 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
508 flags1 = page_get_flags(tb->pc);
509 flags2 = page_get_flags(tb->pc + tb->size - 1);
510 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
511 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
512 (long)tb->pc, tb->size, flags1, flags2);
513 }
514 }
515 }
516 }
517
518 void tb_jmp_check(TranslationBlock *tb)
519 {
520 TranslationBlock *tb1;
521 unsigned int n1;
522
523 /* suppress any remaining jumps to this TB */
524 tb1 = tb->jmp_first;
525 for(;;) {
526 n1 = (long)tb1 & 3;
527 tb1 = (TranslationBlock *)((long)tb1 & ~3);
528 if (n1 == 2)
529 break;
530 tb1 = tb1->jmp_next[n1];
531 }
532 /* check end of list */
533 if (tb1 != tb) {
534 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
535 }
536 }
537
538 #endif
539
540 /* invalidate one TB */
541 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
542 int next_offset)
543 {
544 TranslationBlock *tb1;
545 for(;;) {
546 tb1 = *ptb;
547 if (tb1 == tb) {
548 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
549 break;
550 }
551 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
552 }
553 }
554
555 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
556 {
557 TranslationBlock *tb1;
558 unsigned int n1;
559
560 for(;;) {
561 tb1 = *ptb;
562 n1 = (long)tb1 & 3;
563 tb1 = (TranslationBlock *)((long)tb1 & ~3);
564 if (tb1 == tb) {
565 *ptb = tb1->page_next[n1];
566 break;
567 }
568 ptb = &tb1->page_next[n1];
569 }
570 }
571
572 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
573 {
574 TranslationBlock *tb1, **ptb;
575 unsigned int n1;
576
577 ptb = &tb->jmp_next[n];
578 tb1 = *ptb;
579 if (tb1) {
580 /* find tb(n) in circular list */
581 for(;;) {
582 tb1 = *ptb;
583 n1 = (long)tb1 & 3;
584 tb1 = (TranslationBlock *)((long)tb1 & ~3);
585 if (n1 == n && tb1 == tb)
586 break;
587 if (n1 == 2) {
588 ptb = &tb1->jmp_first;
589 } else {
590 ptb = &tb1->jmp_next[n1];
591 }
592 }
593 /* now we can suppress tb(n) from the list */
594 *ptb = tb->jmp_next[n];
595
596 tb->jmp_next[n] = NULL;
597 }
598 }
599
600 /* reset the jump entry 'n' of a TB so that it is not chained to
601 another TB */
602 static inline void tb_reset_jump(TranslationBlock *tb, int n)
603 {
604 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
605 }
606
607 static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
608 {
609 CPUState *env;
610 PageDesc *p;
611 unsigned int h, n1;
612 target_phys_addr_t phys_pc;
613 TranslationBlock *tb1, *tb2;
614
615 /* remove the TB from the hash list */
616 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
617 h = tb_phys_hash_func(phys_pc);
618 tb_remove(&tb_phys_hash[h], tb,
619 offsetof(TranslationBlock, phys_hash_next));
620
621 /* remove the TB from the page list */
622 if (tb->page_addr[0] != page_addr) {
623 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
624 tb_page_remove(&p->first_tb, tb);
625 invalidate_page_bitmap(p);
626 }
627 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
628 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
629 tb_page_remove(&p->first_tb, tb);
630 invalidate_page_bitmap(p);
631 }
632
633 tb_invalidated_flag = 1;
634
635 /* remove the TB from the hash list */
636 h = tb_jmp_cache_hash_func(tb->pc);
637 for(env = first_cpu; env != NULL; env = env->next_cpu) {
638 if (env->tb_jmp_cache[h] == tb)
639 env->tb_jmp_cache[h] = NULL;
640 }
641
642 /* suppress this TB from the two jump lists */
643 tb_jmp_remove(tb, 0);
644 tb_jmp_remove(tb, 1);
645
646 /* suppress any remaining jumps to this TB */
647 tb1 = tb->jmp_first;
648 for(;;) {
649 n1 = (long)tb1 & 3;
650 if (n1 == 2)
651 break;
652 tb1 = (TranslationBlock *)((long)tb1 & ~3);
653 tb2 = tb1->jmp_next[n1];
654 tb_reset_jump(tb1, n1);
655 tb1->jmp_next[n1] = NULL;
656 tb1 = tb2;
657 }
658 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
659
660 tb_phys_invalidate_count++;
661 }
662
663 static inline void set_bits(uint8_t *tab, int start, int len)
664 {
665 int end, mask, end1;
666
667 end = start + len;
668 tab += start >> 3;
669 mask = 0xff << (start & 7);
670 if ((start & ~7) == (end & ~7)) {
671 if (start < end) {
672 mask &= ~(0xff << (end & 7));
673 *tab |= mask;
674 }
675 } else {
676 *tab++ |= mask;
677 start = (start + 8) & ~7;
678 end1 = end & ~7;
679 while (start < end1) {
680 *tab++ = 0xff;
681 start += 8;
682 }
683 if (start < end) {
684 mask = ~(0xff << (end & 7));
685 *tab |= mask;
686 }
687 }
688 }
689
690 static void build_page_bitmap(PageDesc *p)
691 {
692 int n, tb_start, tb_end;
693 TranslationBlock *tb;
694
695 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
696 if (!p->code_bitmap)
697 return;
698 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
699
700 tb = p->first_tb;
701 while (tb != NULL) {
702 n = (long)tb & 3;
703 tb = (TranslationBlock *)((long)tb & ~3);
704 /* NOTE: this is subtle as a TB may span two physical pages */
705 if (n == 0) {
706 /* NOTE: tb_end may be after the end of the page, but
707 it is not a problem */
708 tb_start = tb->pc & ~TARGET_PAGE_MASK;
709 tb_end = tb_start + tb->size;
710 if (tb_end > TARGET_PAGE_SIZE)
711 tb_end = TARGET_PAGE_SIZE;
712 } else {
713 tb_start = 0;
714 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
715 }
716 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
717 tb = tb->page_next[n];
718 }
719 }
720
721 #ifdef TARGET_HAS_PRECISE_SMC
722
723 static void tb_gen_code(CPUState *env,
724 target_ulong pc, target_ulong cs_base, int flags,
725 int cflags)
726 {
727 TranslationBlock *tb;
728 uint8_t *tc_ptr;
729 target_ulong phys_pc, phys_page2, virt_page2;
730 int code_gen_size;
731
732 phys_pc = get_phys_addr_code(env, pc);
733 tb = tb_alloc(pc);
734 if (!tb) {
735 /* flush must be done */
736 tb_flush(env);
737 /* cannot fail at this point */
738 tb = tb_alloc(pc);
739 }
740 tc_ptr = code_gen_ptr;
741 tb->tc_ptr = tc_ptr;
742 tb->cs_base = cs_base;
743 tb->flags = flags;
744 tb->cflags = cflags;
745 cpu_gen_code(env, tb, &code_gen_size);
746 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
747
748 /* check next page if needed */
749 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
750 phys_page2 = -1;
751 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
752 phys_page2 = get_phys_addr_code(env, virt_page2);
753 }
754 tb_link_phys(tb, phys_pc, phys_page2);
755 }
756 #endif
757
758 /* invalidate all TBs which intersect with the target physical page
759 starting in range [start;end[. NOTE: start and end must refer to
760 the same physical page. 'is_cpu_write_access' should be true if called
761 from a real cpu write access: the virtual CPU will exit the current
762 TB if code is modified inside this TB. */
763 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
764 int is_cpu_write_access)
765 {
766 int n, current_tb_modified, current_tb_not_found, current_flags;
767 CPUState *env = cpu_single_env;
768 PageDesc *p;
769 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
770 target_ulong tb_start, tb_end;
771 target_ulong current_pc, current_cs_base;
772
773 p = page_find(start >> TARGET_PAGE_BITS);
774 if (!p)
775 return;
776 if (!p->code_bitmap &&
777 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
778 is_cpu_write_access) {
779 /* build code bitmap */
780 build_page_bitmap(p);
781 }
782
783 /* we remove all the TBs in the range [start, end[ */
784 /* XXX: see if in some cases it could be faster to invalidate all the code */
785 current_tb_not_found = is_cpu_write_access;
786 current_tb_modified = 0;
787 current_tb = NULL; /* avoid warning */
788 current_pc = 0; /* avoid warning */
789 current_cs_base = 0; /* avoid warning */
790 current_flags = 0; /* avoid warning */
791 tb = p->first_tb;
792 while (tb != NULL) {
793 n = (long)tb & 3;
794 tb = (TranslationBlock *)((long)tb & ~3);
795 tb_next = tb->page_next[n];
796 /* NOTE: this is subtle as a TB may span two physical pages */
797 if (n == 0) {
798 /* NOTE: tb_end may be after the end of the page, but
799 it is not a problem */
800 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
801 tb_end = tb_start + tb->size;
802 } else {
803 tb_start = tb->page_addr[1];
804 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
805 }
806 if (!(tb_end <= start || tb_start >= end)) {
807 #ifdef TARGET_HAS_PRECISE_SMC
808 if (current_tb_not_found) {
809 current_tb_not_found = 0;
810 current_tb = NULL;
811 if (env->mem_write_pc) {
812 /* now we have a real cpu fault */
813 current_tb = tb_find_pc(env->mem_write_pc);
814 }
815 }
816 if (current_tb == tb &&
817 !(current_tb->cflags & CF_SINGLE_INSN)) {
818 /* If we are modifying the current TB, we must stop
819 its execution. We could be more precise by checking
820 that the modification is after the current PC, but it
821 would require a specialized function to partially
822 restore the CPU state */
823
824 current_tb_modified = 1;
825 cpu_restore_state(current_tb, env,
826 env->mem_write_pc, NULL);
827 #if defined(TARGET_I386)
828 current_flags = env->hflags;
829 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
830 current_cs_base = (target_ulong)env->segs[R_CS].base;
831 current_pc = current_cs_base + env->eip;
832 #else
833 #error unsupported CPU
834 #endif
835 }
836 #endif /* TARGET_HAS_PRECISE_SMC */
837 /* we need to do that to handle the case where a signal
838 occurs while doing tb_phys_invalidate() */
839 saved_tb = NULL;
840 if (env) {
841 saved_tb = env->current_tb;
842 env->current_tb = NULL;
843 }
844 tb_phys_invalidate(tb, -1);
845 if (env) {
846 env->current_tb = saved_tb;
847 if (env->interrupt_request && env->current_tb)
848 cpu_interrupt(env, env->interrupt_request);
849 }
850 }
851 tb = tb_next;
852 }
853 #if !defined(CONFIG_USER_ONLY)
854 /* if no code remaining, no need to continue to use slow writes */
855 if (!p->first_tb) {
856 invalidate_page_bitmap(p);
857 if (is_cpu_write_access) {
858 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
859 }
860 }
861 #endif
862 #ifdef TARGET_HAS_PRECISE_SMC
863 if (current_tb_modified) {
864 /* we generate a block containing just the instruction
865 modifying the memory. It will ensure that it cannot modify
866 itself */
867 env->current_tb = NULL;
868 tb_gen_code(env, current_pc, current_cs_base, current_flags,
869 CF_SINGLE_INSN);
870 cpu_resume_from_signal(env, NULL);
871 }
872 #endif
873 }
874
875 /* len must be <= 8 and start must be a multiple of len */
876 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
877 {
878 PageDesc *p;
879 int offset, b;
880 #if 0
881 if (1) {
882 if (loglevel) {
883 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
884 cpu_single_env->mem_write_vaddr, len,
885 cpu_single_env->eip,
886 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
887 }
888 }
889 #endif
890 p = page_find(start >> TARGET_PAGE_BITS);
891 if (!p)
892 return;
893 if (p->code_bitmap) {
894 offset = start & ~TARGET_PAGE_MASK;
895 b = p->code_bitmap[offset >> 3] >> (offset & 7);
896 if (b & ((1 << len) - 1))
897 goto do_invalidate;
898 } else {
899 do_invalidate:
900 tb_invalidate_phys_page_range(start, start + len, 1);
901 }
902 }
903
904 #if !defined(CONFIG_SOFTMMU)
905 static void tb_invalidate_phys_page(target_phys_addr_t addr,
906 unsigned long pc, void *puc)
907 {
908 int n, current_flags, current_tb_modified;
909 target_ulong current_pc, current_cs_base;
910 PageDesc *p;
911 TranslationBlock *tb, *current_tb;
912 #ifdef TARGET_HAS_PRECISE_SMC
913 CPUState *env = cpu_single_env;
914 #endif
915
916 addr &= TARGET_PAGE_MASK;
917 p = page_find(addr >> TARGET_PAGE_BITS);
918 if (!p)
919 return;
920 tb = p->first_tb;
921 current_tb_modified = 0;
922 current_tb = NULL;
923 current_pc = 0; /* avoid warning */
924 current_cs_base = 0; /* avoid warning */
925 current_flags = 0; /* avoid warning */
926 #ifdef TARGET_HAS_PRECISE_SMC
927 if (tb && pc != 0) {
928 current_tb = tb_find_pc(pc);
929 }
930 #endif
931 while (tb != NULL) {
932 n = (long)tb & 3;
933 tb = (TranslationBlock *)((long)tb & ~3);
934 #ifdef TARGET_HAS_PRECISE_SMC
935 if (current_tb == tb &&
936 !(current_tb->cflags & CF_SINGLE_INSN)) {
937 /* If we are modifying the current TB, we must stop
938 its execution. We could be more precise by checking
939 that the modification is after the current PC, but it
940 would require a specialized function to partially
941 restore the CPU state */
942
943 current_tb_modified = 1;
944 cpu_restore_state(current_tb, env, pc, puc);
945 #if defined(TARGET_I386)
946 current_flags = env->hflags;
947 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
948 current_cs_base = (target_ulong)env->segs[R_CS].base;
949 current_pc = current_cs_base + env->eip;
950 #else
951 #error unsupported CPU
952 #endif
953 }
954 #endif /* TARGET_HAS_PRECISE_SMC */
955 tb_phys_invalidate(tb, addr);
956 tb = tb->page_next[n];
957 }
958 p->first_tb = NULL;
959 #ifdef TARGET_HAS_PRECISE_SMC
960 if (current_tb_modified) {
961 /* we generate a block containing just the instruction
962 modifying the memory. It will ensure that it cannot modify
963 itself */
964 env->current_tb = NULL;
965 tb_gen_code(env, current_pc, current_cs_base, current_flags,
966 CF_SINGLE_INSN);
967 cpu_resume_from_signal(env, puc);
968 }
969 #endif
970 }
971 #endif
972
973 /* add the tb in the target page and protect it if necessary */
974 static inline void tb_alloc_page(TranslationBlock *tb,
975 unsigned int n, target_ulong page_addr)
976 {
977 PageDesc *p;
978 TranslationBlock *last_first_tb;
979
980 tb->page_addr[n] = page_addr;
981 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
982 tb->page_next[n] = p->first_tb;
983 last_first_tb = p->first_tb;
984 p->first_tb = (TranslationBlock *)((long)tb | n);
985 invalidate_page_bitmap(p);
986
987 #if defined(TARGET_HAS_SMC) || 1
988
989 #if defined(CONFIG_USER_ONLY)
990 if (p->flags & PAGE_WRITE) {
991 target_ulong addr;
992 PageDesc *p2;
993 int prot;
994
995 /* force the host page as non writable (writes will have a
996 page fault + mprotect overhead) */
997 page_addr &= qemu_host_page_mask;
998 prot = 0;
999 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1000 addr += TARGET_PAGE_SIZE) {
1001
1002 p2 = page_find (addr >> TARGET_PAGE_BITS);
1003 if (!p2)
1004 continue;
1005 prot |= p2->flags;
1006 p2->flags &= ~PAGE_WRITE;
1007 page_get_flags(addr);
1008 }
1009 mprotect(g2h(page_addr), qemu_host_page_size,
1010 (prot & PAGE_BITS) & ~PAGE_WRITE);
1011 #ifdef DEBUG_TB_INVALIDATE
1012 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1013 page_addr);
1014 #endif
1015 }
1016 #else
1017 /* if some code is already present, then the pages are already
1018 protected. So we handle the case where only the first TB is
1019 allocated in a physical page */
1020 if (!last_first_tb) {
1021 tlb_protect_code(page_addr);
1022 }
1023 #endif
1024
1025 #endif /* TARGET_HAS_SMC */
1026 }
1027
1028 /* Allocate a new translation block. Flush the translation buffer if
1029 too many translation blocks or too much generated code. */
1030 TranslationBlock *tb_alloc(target_ulong pc)
1031 {
1032 TranslationBlock *tb;
1033
1034 if (nb_tbs >= code_gen_max_blocks ||
1035 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1036 return NULL;
1037 tb = &tbs[nb_tbs++];
1038 tb->pc = pc;
1039 tb->cflags = 0;
1040 return tb;
1041 }
1042
1043 /* add a new TB and link it to the physical page tables. phys_page2 is
1044 (-1) to indicate that only one page contains the TB. */
1045 void tb_link_phys(TranslationBlock *tb,
1046 target_ulong phys_pc, target_ulong phys_page2)
1047 {
1048 unsigned int h;
1049 TranslationBlock **ptb;
1050
1051 /* add in the physical hash table */
1052 h = tb_phys_hash_func(phys_pc);
1053 ptb = &tb_phys_hash[h];
1054 tb->phys_hash_next = *ptb;
1055 *ptb = tb;
1056
1057 /* add in the page list */
1058 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1059 if (phys_page2 != -1)
1060 tb_alloc_page(tb, 1, phys_page2);
1061 else
1062 tb->page_addr[1] = -1;
1063
1064 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1065 tb->jmp_next[0] = NULL;
1066 tb->jmp_next[1] = NULL;
1067
1068 /* init original jump addresses */
1069 if (tb->tb_next_offset[0] != 0xffff)
1070 tb_reset_jump(tb, 0);
1071 if (tb->tb_next_offset[1] != 0xffff)
1072 tb_reset_jump(tb, 1);
1073
1074 #ifdef DEBUG_TB_CHECK
1075 tb_page_check();
1076 #endif
1077 }
1078
1079 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1080 tb[1].tc_ptr. Return NULL if not found */
1081 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1082 {
1083 int m_min, m_max, m;
1084 unsigned long v;
1085 TranslationBlock *tb;
1086
1087 if (nb_tbs <= 0)
1088 return NULL;
1089 if (tc_ptr < (unsigned long)code_gen_buffer ||
1090 tc_ptr >= (unsigned long)code_gen_ptr)
1091 return NULL;
1092 /* binary search (cf Knuth) */
1093 m_min = 0;
1094 m_max = nb_tbs - 1;
1095 while (m_min <= m_max) {
1096 m = (m_min + m_max) >> 1;
1097 tb = &tbs[m];
1098 v = (unsigned long)tb->tc_ptr;
1099 if (v == tc_ptr)
1100 return tb;
1101 else if (tc_ptr < v) {
1102 m_max = m - 1;
1103 } else {
1104 m_min = m + 1;
1105 }
1106 }
1107 return &tbs[m_max];
1108 }
1109
1110 static void tb_reset_jump_recursive(TranslationBlock *tb);
1111
1112 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1113 {
1114 TranslationBlock *tb1, *tb_next, **ptb;
1115 unsigned int n1;
1116
1117 tb1 = tb->jmp_next[n];
1118 if (tb1 != NULL) {
1119 /* find head of list */
1120 for(;;) {
1121 n1 = (long)tb1 & 3;
1122 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1123 if (n1 == 2)
1124 break;
1125 tb1 = tb1->jmp_next[n1];
1126 }
1127 /* we are now sure now that tb jumps to tb1 */
1128 tb_next = tb1;
1129
1130 /* remove tb from the jmp_first list */
1131 ptb = &tb_next->jmp_first;
1132 for(;;) {
1133 tb1 = *ptb;
1134 n1 = (long)tb1 & 3;
1135 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1136 if (n1 == n && tb1 == tb)
1137 break;
1138 ptb = &tb1->jmp_next[n1];
1139 }
1140 *ptb = tb->jmp_next[n];
1141 tb->jmp_next[n] = NULL;
1142
1143 /* suppress the jump to next tb in generated code */
1144 tb_reset_jump(tb, n);
1145
1146 /* suppress jumps in the tb on which we could have jumped */
1147 tb_reset_jump_recursive(tb_next);
1148 }
1149 }
1150
1151 static void tb_reset_jump_recursive(TranslationBlock *tb)
1152 {
1153 tb_reset_jump_recursive2(tb, 0);
1154 tb_reset_jump_recursive2(tb, 1);
1155 }
1156
1157 #if defined(TARGET_HAS_ICE)
1158 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1159 {
1160 target_phys_addr_t addr;
1161 target_ulong pd;
1162 ram_addr_t ram_addr;
1163 PhysPageDesc *p;
1164
1165 addr = cpu_get_phys_page_debug(env, pc);
1166 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1167 if (!p) {
1168 pd = IO_MEM_UNASSIGNED;
1169 } else {
1170 pd = p->phys_offset;
1171 }
1172 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1173 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1174 }
1175 #endif
1176
1177 /* Add a watchpoint. */
1178 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1179 {
1180 int i;
1181
1182 for (i = 0; i < env->nb_watchpoints; i++) {
1183 if (addr == env->watchpoint[i].vaddr)
1184 return 0;
1185 }
1186 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1187 return -1;
1188
1189 i = env->nb_watchpoints++;
1190 env->watchpoint[i].vaddr = addr;
1191 tlb_flush_page(env, addr);
1192 /* FIXME: This flush is needed because of the hack to make memory ops
1193 terminate the TB. It can be removed once the proper IO trap and
1194 re-execute bits are in. */
1195 tb_flush(env);
1196 return i;
1197 }
1198
1199 /* Remove a watchpoint. */
1200 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1201 {
1202 int i;
1203
1204 for (i = 0; i < env->nb_watchpoints; i++) {
1205 if (addr == env->watchpoint[i].vaddr) {
1206 env->nb_watchpoints--;
1207 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1208 tlb_flush_page(env, addr);
1209 return 0;
1210 }
1211 }
1212 return -1;
1213 }
1214
1215 /* Remove all watchpoints. */
1216 void cpu_watchpoint_remove_all(CPUState *env) {
1217 int i;
1218
1219 for (i = 0; i < env->nb_watchpoints; i++) {
1220 tlb_flush_page(env, env->watchpoint[i].vaddr);
1221 }
1222 env->nb_watchpoints = 0;
1223 }
1224
1225 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1226 breakpoint is reached */
1227 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1228 {
1229 #if defined(TARGET_HAS_ICE)
1230 int i;
1231
1232 for(i = 0; i < env->nb_breakpoints; i++) {
1233 if (env->breakpoints[i] == pc)
1234 return 0;
1235 }
1236
1237 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1238 return -1;
1239 env->breakpoints[env->nb_breakpoints++] = pc;
1240
1241 breakpoint_invalidate(env, pc);
1242 return 0;
1243 #else
1244 return -1;
1245 #endif
1246 }
1247
1248 /* remove all breakpoints */
1249 void cpu_breakpoint_remove_all(CPUState *env) {
1250 #if defined(TARGET_HAS_ICE)
1251 int i;
1252 for(i = 0; i < env->nb_breakpoints; i++) {
1253 breakpoint_invalidate(env, env->breakpoints[i]);
1254 }
1255 env->nb_breakpoints = 0;
1256 #endif
1257 }
1258
1259 /* remove a breakpoint */
1260 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1261 {
1262 #if defined(TARGET_HAS_ICE)
1263 int i;
1264 for(i = 0; i < env->nb_breakpoints; i++) {
1265 if (env->breakpoints[i] == pc)
1266 goto found;
1267 }
1268 return -1;
1269 found:
1270 env->nb_breakpoints--;
1271 if (i < env->nb_breakpoints)
1272 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1273
1274 breakpoint_invalidate(env, pc);
1275 return 0;
1276 #else
1277 return -1;
1278 #endif
1279 }
1280
1281 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1282 CPU loop after each instruction */
1283 void cpu_single_step(CPUState *env, int enabled)
1284 {
1285 #if defined(TARGET_HAS_ICE)
1286 if (env->singlestep_enabled != enabled) {
1287 env->singlestep_enabled = enabled;
1288 /* must flush all the translated code to avoid inconsistancies */
1289 /* XXX: only flush what is necessary */
1290 tb_flush(env);
1291 }
1292 #endif
1293 }
1294
1295 /* enable or disable low levels log */
1296 void cpu_set_log(int log_flags)
1297 {
1298 loglevel = log_flags;
1299 if (loglevel && !logfile) {
1300 logfile = fopen(logfilename, log_append ? "a" : "w");
1301 if (!logfile) {
1302 perror(logfilename);
1303 _exit(1);
1304 }
1305 #if !defined(CONFIG_SOFTMMU)
1306 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1307 {
1308 static uint8_t logfile_buf[4096];
1309 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1310 }
1311 #else
1312 setvbuf(logfile, NULL, _IOLBF, 0);
1313 #endif
1314 log_append = 1;
1315 }
1316 if (!loglevel && logfile) {
1317 fclose(logfile);
1318 logfile = NULL;
1319 }
1320 }
1321
1322 void cpu_set_log_filename(const char *filename)
1323 {
1324 logfilename = strdup(filename);
1325 if (logfile) {
1326 fclose(logfile);
1327 logfile = NULL;
1328 }
1329 cpu_set_log(loglevel);
1330 }
1331
1332 /* mask must never be zero, except for A20 change call */
1333 void cpu_interrupt(CPUState *env, int mask)
1334 {
1335 TranslationBlock *tb;
1336 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1337
1338 env->interrupt_request |= mask;
1339 /* if the cpu is currently executing code, we must unlink it and
1340 all the potentially executing TB */
1341 tb = env->current_tb;
1342 if (tb && !testandset(&interrupt_lock)) {
1343 env->current_tb = NULL;
1344 tb_reset_jump_recursive(tb);
1345 resetlock(&interrupt_lock);
1346 }
1347 }
1348
1349 void cpu_reset_interrupt(CPUState *env, int mask)
1350 {
1351 env->interrupt_request &= ~mask;
1352 }
1353
1354 CPULogItem cpu_log_items[] = {
1355 { CPU_LOG_TB_OUT_ASM, "out_asm",
1356 "show generated host assembly code for each compiled TB" },
1357 { CPU_LOG_TB_IN_ASM, "in_asm",
1358 "show target assembly code for each compiled TB" },
1359 { CPU_LOG_TB_OP, "op",
1360 "show micro ops for each compiled TB" },
1361 { CPU_LOG_TB_OP_OPT, "op_opt",
1362 "show micro ops "
1363 #ifdef TARGET_I386
1364 "before eflags optimization and "
1365 #endif
1366 "after liveness analysis" },
1367 { CPU_LOG_INT, "int",
1368 "show interrupts/exceptions in short format" },
1369 { CPU_LOG_EXEC, "exec",
1370 "show trace before each executed TB (lots of logs)" },
1371 { CPU_LOG_TB_CPU, "cpu",
1372 "show CPU state before block translation" },
1373 #ifdef TARGET_I386
1374 { CPU_LOG_PCALL, "pcall",
1375 "show protected mode far calls/returns/exceptions" },
1376 #endif
1377 #ifdef DEBUG_IOPORT
1378 { CPU_LOG_IOPORT, "ioport",
1379 "show all i/o ports accesses" },
1380 #endif
1381 { 0, NULL, NULL },
1382 };
1383
1384 static int cmp1(const char *s1, int n, const char *s2)
1385 {
1386 if (strlen(s2) != n)
1387 return 0;
1388 return memcmp(s1, s2, n) == 0;
1389 }
1390
1391 /* takes a comma separated list of log masks. Return 0 if error. */
1392 int cpu_str_to_log_mask(const char *str)
1393 {
1394 CPULogItem *item;
1395 int mask;
1396 const char *p, *p1;
1397
1398 p = str;
1399 mask = 0;
1400 for(;;) {
1401 p1 = strchr(p, ',');
1402 if (!p1)
1403 p1 = p + strlen(p);
1404 if(cmp1(p,p1-p,"all")) {
1405 for(item = cpu_log_items; item->mask != 0; item++) {
1406 mask |= item->mask;
1407 }
1408 } else {
1409 for(item = cpu_log_items; item->mask != 0; item++) {
1410 if (cmp1(p, p1 - p, item->name))
1411 goto found;
1412 }
1413 return 0;
1414 }
1415 found:
1416 mask |= item->mask;
1417 if (*p1 != ',')
1418 break;
1419 p = p1 + 1;
1420 }
1421 return mask;
1422 }
1423
1424 void cpu_abort(CPUState *env, const char *fmt, ...)
1425 {
1426 va_list ap;
1427 va_list ap2;
1428
1429 va_start(ap, fmt);
1430 va_copy(ap2, ap);
1431 fprintf(stderr, "qemu: fatal: ");
1432 vfprintf(stderr, fmt, ap);
1433 fprintf(stderr, "\n");
1434 #ifdef TARGET_I386
1435 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1436 #else
1437 cpu_dump_state(env, stderr, fprintf, 0);
1438 #endif
1439 if (logfile) {
1440 fprintf(logfile, "qemu: fatal: ");
1441 vfprintf(logfile, fmt, ap2);
1442 fprintf(logfile, "\n");
1443 #ifdef TARGET_I386
1444 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1445 #else
1446 cpu_dump_state(env, logfile, fprintf, 0);
1447 #endif
1448 fflush(logfile);
1449 fclose(logfile);
1450 }
1451 va_end(ap2);
1452 va_end(ap);
1453 abort();
1454 }
1455
1456 CPUState *cpu_copy(CPUState *env)
1457 {
1458 CPUState *new_env = cpu_init(env->cpu_model_str);
1459 /* preserve chaining and index */
1460 CPUState *next_cpu = new_env->next_cpu;
1461 int cpu_index = new_env->cpu_index;
1462 memcpy(new_env, env, sizeof(CPUState));
1463 new_env->next_cpu = next_cpu;
1464 new_env->cpu_index = cpu_index;
1465 return new_env;
1466 }
1467
1468 #if !defined(CONFIG_USER_ONLY)
1469
1470 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1471 {
1472 unsigned int i;
1473
1474 /* Discard jump cache entries for any tb which might potentially
1475 overlap the flushed page. */
1476 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1477 memset (&env->tb_jmp_cache[i], 0,
1478 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1479
1480 i = tb_jmp_cache_hash_page(addr);
1481 memset (&env->tb_jmp_cache[i], 0,
1482 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1483 }
1484
1485 /* NOTE: if flush_global is true, also flush global entries (not
1486 implemented yet) */
1487 void tlb_flush(CPUState *env, int flush_global)
1488 {
1489 int i;
1490
1491 #if defined(DEBUG_TLB)
1492 printf("tlb_flush:\n");
1493 #endif
1494 /* must reset current TB so that interrupts cannot modify the
1495 links while we are modifying them */
1496 env->current_tb = NULL;
1497
1498 for(i = 0; i < CPU_TLB_SIZE; i++) {
1499 env->tlb_table[0][i].addr_read = -1;
1500 env->tlb_table[0][i].addr_write = -1;
1501 env->tlb_table[0][i].addr_code = -1;
1502 env->tlb_table[1][i].addr_read = -1;
1503 env->tlb_table[1][i].addr_write = -1;
1504 env->tlb_table[1][i].addr_code = -1;
1505 #if (NB_MMU_MODES >= 3)
1506 env->tlb_table[2][i].addr_read = -1;
1507 env->tlb_table[2][i].addr_write = -1;
1508 env->tlb_table[2][i].addr_code = -1;
1509 #if (NB_MMU_MODES == 4)
1510 env->tlb_table[3][i].addr_read = -1;
1511 env->tlb_table[3][i].addr_write = -1;
1512 env->tlb_table[3][i].addr_code = -1;
1513 #endif
1514 #endif
1515 }
1516
1517 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1518
1519 #if !defined(CONFIG_SOFTMMU)
1520 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1521 #endif
1522 #ifdef USE_KQEMU
1523 if (env->kqemu_enabled) {
1524 kqemu_flush(env, flush_global);
1525 }
1526 #endif
1527 tlb_flush_count++;
1528 }
1529
1530 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1531 {
1532 if (addr == (tlb_entry->addr_read &
1533 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1534 addr == (tlb_entry->addr_write &
1535 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1536 addr == (tlb_entry->addr_code &
1537 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1538 tlb_entry->addr_read = -1;
1539 tlb_entry->addr_write = -1;
1540 tlb_entry->addr_code = -1;
1541 }
1542 }
1543
1544 void tlb_flush_page(CPUState *env, target_ulong addr)
1545 {
1546 int i;
1547
1548 #if defined(DEBUG_TLB)
1549 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1550 #endif
1551 /* must reset current TB so that interrupts cannot modify the
1552 links while we are modifying them */
1553 env->current_tb = NULL;
1554
1555 addr &= TARGET_PAGE_MASK;
1556 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1557 tlb_flush_entry(&env->tlb_table[0][i], addr);
1558 tlb_flush_entry(&env->tlb_table[1][i], addr);
1559 #if (NB_MMU_MODES >= 3)
1560 tlb_flush_entry(&env->tlb_table[2][i], addr);
1561 #if (NB_MMU_MODES == 4)
1562 tlb_flush_entry(&env->tlb_table[3][i], addr);
1563 #endif
1564 #endif
1565
1566 tlb_flush_jmp_cache(env, addr);
1567
1568 #if !defined(CONFIG_SOFTMMU)
1569 if (addr < MMAP_AREA_END)
1570 munmap((void *)addr, TARGET_PAGE_SIZE);
1571 #endif
1572 #ifdef USE_KQEMU
1573 if (env->kqemu_enabled) {
1574 kqemu_flush_page(env, addr);
1575 }
1576 #endif
1577 }
1578
1579 /* update the TLBs so that writes to code in the virtual page 'addr'
1580 can be detected */
1581 static void tlb_protect_code(ram_addr_t ram_addr)
1582 {
1583 cpu_physical_memory_reset_dirty(ram_addr,
1584 ram_addr + TARGET_PAGE_SIZE,
1585 CODE_DIRTY_FLAG);
1586 }
1587
1588 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1589 tested for self modifying code */
1590 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1591 target_ulong vaddr)
1592 {
1593 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1594 }
1595
1596 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1597 unsigned long start, unsigned long length)
1598 {
1599 unsigned long addr;
1600 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1601 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1602 if ((addr - start) < length) {
1603 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1604 }
1605 }
1606 }
1607
1608 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1609 int dirty_flags)
1610 {
1611 CPUState *env;
1612 unsigned long length, start1;
1613 int i, mask, len;
1614 uint8_t *p;
1615
1616 start &= TARGET_PAGE_MASK;
1617 end = TARGET_PAGE_ALIGN(end);
1618
1619 length = end - start;
1620 if (length == 0)
1621 return;
1622 len = length >> TARGET_PAGE_BITS;
1623 #ifdef USE_KQEMU
1624 /* XXX: should not depend on cpu context */
1625 env = first_cpu;
1626 if (env->kqemu_enabled) {
1627 ram_addr_t addr;
1628 addr = start;
1629 for(i = 0; i < len; i++) {
1630 kqemu_set_notdirty(env, addr);
1631 addr += TARGET_PAGE_SIZE;
1632 }
1633 }
1634 #endif
1635 mask = ~dirty_flags;
1636 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1637 for(i = 0; i < len; i++)
1638 p[i] &= mask;
1639
1640 /* we modify the TLB cache so that the dirty bit will be set again
1641 when accessing the range */
1642 start1 = start + (unsigned long)phys_ram_base;
1643 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1644 for(i = 0; i < CPU_TLB_SIZE; i++)
1645 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1646 for(i = 0; i < CPU_TLB_SIZE; i++)
1647 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1648 #if (NB_MMU_MODES >= 3)
1649 for(i = 0; i < CPU_TLB_SIZE; i++)
1650 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1651 #if (NB_MMU_MODES == 4)
1652 for(i = 0; i < CPU_TLB_SIZE; i++)
1653 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1654 #endif
1655 #endif
1656 }
1657
1658 #if !defined(CONFIG_SOFTMMU)
1659 /* XXX: this is expensive */
1660 {
1661 VirtPageDesc *p;
1662 int j;
1663 target_ulong addr;
1664
1665 for(i = 0; i < L1_SIZE; i++) {
1666 p = l1_virt_map[i];
1667 if (p) {
1668 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1669 for(j = 0; j < L2_SIZE; j++) {
1670 if (p->valid_tag == virt_valid_tag &&
1671 p->phys_addr >= start && p->phys_addr < end &&
1672 (p->prot & PROT_WRITE)) {
1673 if (addr < MMAP_AREA_END) {
1674 mprotect((void *)addr, TARGET_PAGE_SIZE,
1675 p->prot & ~PROT_WRITE);
1676 }
1677 }
1678 addr += TARGET_PAGE_SIZE;
1679 p++;
1680 }
1681 }
1682 }
1683 }
1684 #endif
1685 }
1686
1687 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1688 {
1689 ram_addr_t ram_addr;
1690
1691 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1692 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1693 tlb_entry->addend - (unsigned long)phys_ram_base;
1694 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1695 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1696 }
1697 }
1698 }
1699
1700 /* update the TLB according to the current state of the dirty bits */
1701 void cpu_tlb_update_dirty(CPUState *env)
1702 {
1703 int i;
1704 for(i = 0; i < CPU_TLB_SIZE; i++)
1705 tlb_update_dirty(&env->tlb_table[0][i]);
1706 for(i = 0; i < CPU_TLB_SIZE; i++)
1707 tlb_update_dirty(&env->tlb_table[1][i]);
1708 #if (NB_MMU_MODES >= 3)
1709 for(i = 0; i < CPU_TLB_SIZE; i++)
1710 tlb_update_dirty(&env->tlb_table[2][i]);
1711 #if (NB_MMU_MODES == 4)
1712 for(i = 0; i < CPU_TLB_SIZE; i++)
1713 tlb_update_dirty(&env->tlb_table[3][i]);
1714 #endif
1715 #endif
1716 }
1717
1718 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1719 unsigned long start)
1720 {
1721 unsigned long addr;
1722 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1723 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1724 if (addr == start) {
1725 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1726 }
1727 }
1728 }
1729
1730 /* update the TLB corresponding to virtual page vaddr and phys addr
1731 addr so that it is no longer dirty */
1732 static inline void tlb_set_dirty(CPUState *env,
1733 unsigned long addr, target_ulong vaddr)
1734 {
1735 int i;
1736
1737 addr &= TARGET_PAGE_MASK;
1738 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1739 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1740 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1741 #if (NB_MMU_MODES >= 3)
1742 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1743 #if (NB_MMU_MODES == 4)
1744 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1745 #endif
1746 #endif
1747 }
1748
1749 /* add a new TLB entry. At most one entry for a given virtual address
1750 is permitted. Return 0 if OK or 2 if the page could not be mapped
1751 (can only happen in non SOFTMMU mode for I/O pages or pages
1752 conflicting with the host address space). */
1753 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1754 target_phys_addr_t paddr, int prot,
1755 int mmu_idx, int is_softmmu)
1756 {
1757 PhysPageDesc *p;
1758 unsigned long pd;
1759 unsigned int index;
1760 target_ulong address;
1761 target_phys_addr_t addend;
1762 int ret;
1763 CPUTLBEntry *te;
1764 int i;
1765
1766 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1767 if (!p) {
1768 pd = IO_MEM_UNASSIGNED;
1769 } else {
1770 pd = p->phys_offset;
1771 }
1772 #if defined(DEBUG_TLB)
1773 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1774 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1775 #endif
1776
1777 ret = 0;
1778 #if !defined(CONFIG_SOFTMMU)
1779 if (is_softmmu)
1780 #endif
1781 {
1782 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1783 /* IO memory case */
1784 address = vaddr | pd;
1785 addend = paddr;
1786 } else {
1787 /* standard memory */
1788 address = vaddr;
1789 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1790 }
1791
1792 /* Make accesses to pages with watchpoints go via the
1793 watchpoint trap routines. */
1794 for (i = 0; i < env->nb_watchpoints; i++) {
1795 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1796 if (address & ~TARGET_PAGE_MASK) {
1797 env->watchpoint[i].addend = 0;
1798 address = vaddr | io_mem_watch;
1799 } else {
1800 env->watchpoint[i].addend = pd - paddr +
1801 (unsigned long) phys_ram_base;
1802 /* TODO: Figure out how to make read watchpoints coexist
1803 with code. */
1804 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1805 }
1806 }
1807 }
1808
1809 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1810 addend -= vaddr;
1811 te = &env->tlb_table[mmu_idx][index];
1812 te->addend = addend;
1813 if (prot & PAGE_READ) {
1814 te->addr_read = address;
1815 } else {
1816 te->addr_read = -1;
1817 }
1818
1819 if (prot & PAGE_EXEC) {
1820 te->addr_code = address;
1821 } else {
1822 te->addr_code = -1;
1823 }
1824 if (prot & PAGE_WRITE) {
1825 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1826 (pd & IO_MEM_ROMD)) {
1827 /* write access calls the I/O callback */
1828 te->addr_write = vaddr |
1829 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1830 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1831 !cpu_physical_memory_is_dirty(pd)) {
1832 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1833 } else {
1834 te->addr_write = address;
1835 }
1836 } else {
1837 te->addr_write = -1;
1838 }
1839 }
1840 #if !defined(CONFIG_SOFTMMU)
1841 else {
1842 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1843 /* IO access: no mapping is done as it will be handled by the
1844 soft MMU */
1845 if (!(env->hflags & HF_SOFTMMU_MASK))
1846 ret = 2;
1847 } else {
1848 void *map_addr;
1849
1850 if (vaddr >= MMAP_AREA_END) {
1851 ret = 2;
1852 } else {
1853 if (prot & PROT_WRITE) {
1854 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1855 #if defined(TARGET_HAS_SMC) || 1
1856 first_tb ||
1857 #endif
1858 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1859 !cpu_physical_memory_is_dirty(pd))) {
1860 /* ROM: we do as if code was inside */
1861 /* if code is present, we only map as read only and save the
1862 original mapping */
1863 VirtPageDesc *vp;
1864
1865 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1866 vp->phys_addr = pd;
1867 vp->prot = prot;
1868 vp->valid_tag = virt_valid_tag;
1869 prot &= ~PAGE_WRITE;
1870 }
1871 }
1872 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1873 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1874 if (map_addr == MAP_FAILED) {
1875 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1876 paddr, vaddr);
1877 }
1878 }
1879 }
1880 }
1881 #endif
1882 return ret;
1883 }
1884
1885 /* called from signal handler: invalidate the code and unprotect the
1886 page. Return TRUE if the fault was succesfully handled. */
1887 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1888 {
1889 #if !defined(CONFIG_SOFTMMU)
1890 VirtPageDesc *vp;
1891
1892 #if defined(DEBUG_TLB)
1893 printf("page_unprotect: addr=0x%08x\n", addr);
1894 #endif
1895 addr &= TARGET_PAGE_MASK;
1896
1897 /* if it is not mapped, no need to worry here */
1898 if (addr >= MMAP_AREA_END)
1899 return 0;
1900 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1901 if (!vp)
1902 return 0;
1903 /* NOTE: in this case, validate_tag is _not_ tested as it
1904 validates only the code TLB */
1905 if (vp->valid_tag != virt_valid_tag)
1906 return 0;
1907 if (!(vp->prot & PAGE_WRITE))
1908 return 0;
1909 #if defined(DEBUG_TLB)
1910 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1911 addr, vp->phys_addr, vp->prot);
1912 #endif
1913 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1914 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1915 (unsigned long)addr, vp->prot);
1916 /* set the dirty bit */
1917 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1918 /* flush the code inside */
1919 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1920 return 1;
1921 #else
1922 return 0;
1923 #endif
1924 }
1925
1926 #else
1927
1928 void tlb_flush(CPUState *env, int flush_global)
1929 {
1930 }
1931
1932 void tlb_flush_page(CPUState *env, target_ulong addr)
1933 {
1934 }
1935
1936 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1937 target_phys_addr_t paddr, int prot,
1938 int mmu_idx, int is_softmmu)
1939 {
1940 return 0;
1941 }
1942
1943 /* dump memory mappings */
1944 void page_dump(FILE *f)
1945 {
1946 unsigned long start, end;
1947 int i, j, prot, prot1;
1948 PageDesc *p;
1949
1950 fprintf(f, "%-8s %-8s %-8s %s\n",
1951 "start", "end", "size", "prot");
1952 start = -1;
1953 end = -1;
1954 prot = 0;
1955 for(i = 0; i <= L1_SIZE; i++) {
1956 if (i < L1_SIZE)
1957 p = l1_map[i];
1958 else
1959 p = NULL;
1960 for(j = 0;j < L2_SIZE; j++) {
1961 if (!p)
1962 prot1 = 0;
1963 else
1964 prot1 = p[j].flags;
1965 if (prot1 != prot) {
1966 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1967 if (start != -1) {
1968 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1969 start, end, end - start,
1970 prot & PAGE_READ ? 'r' : '-',
1971 prot & PAGE_WRITE ? 'w' : '-',
1972 prot & PAGE_EXEC ? 'x' : '-');
1973 }
1974 if (prot1 != 0)
1975 start = end;
1976 else
1977 start = -1;
1978 prot = prot1;
1979 }
1980 if (!p)
1981 break;
1982 }
1983 }
1984 }
1985
1986 int page_get_flags(target_ulong address)
1987 {
1988 PageDesc *p;
1989
1990 p = page_find(address >> TARGET_PAGE_BITS);
1991 if (!p)
1992 return 0;
1993 return p->flags;
1994 }
1995
1996 /* modify the flags of a page and invalidate the code if
1997 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1998 depending on PAGE_WRITE */
1999 void page_set_flags(target_ulong start, target_ulong end, int flags)
2000 {
2001 PageDesc *p;
2002 target_ulong addr;
2003
2004 start = start & TARGET_PAGE_MASK;
2005 end = TARGET_PAGE_ALIGN(end);
2006 if (flags & PAGE_WRITE)
2007 flags |= PAGE_WRITE_ORG;
2008 spin_lock(&tb_lock);
2009 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2010 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2011 /* if the write protection is set, then we invalidate the code
2012 inside */
2013 if (!(p->flags & PAGE_WRITE) &&
2014 (flags & PAGE_WRITE) &&
2015 p->first_tb) {
2016 tb_invalidate_phys_page(addr, 0, NULL);
2017 }
2018 p->flags = flags;
2019 }
2020 spin_unlock(&tb_lock);
2021 }
2022
2023 int page_check_range(target_ulong start, target_ulong len, int flags)
2024 {
2025 PageDesc *p;
2026 target_ulong end;
2027 target_ulong addr;
2028
2029 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2030 start = start & TARGET_PAGE_MASK;
2031
2032 if( end < start )
2033 /* we've wrapped around */
2034 return -1;
2035 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2036 p = page_find(addr >> TARGET_PAGE_BITS);
2037 if( !p )
2038 return -1;
2039 if( !(p->flags & PAGE_VALID) )
2040 return -1;
2041
2042 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2043 return -1;
2044 if (flags & PAGE_WRITE) {
2045 if (!(p->flags & PAGE_WRITE_ORG))
2046 return -1;
2047 /* unprotect the page if it was put read-only because it
2048 contains translated code */
2049 if (!(p->flags & PAGE_WRITE)) {
2050 if (!page_unprotect(addr, 0, NULL))
2051 return -1;
2052 }
2053 return 0;
2054 }
2055 }
2056 return 0;
2057 }
2058
2059 /* called from signal handler: invalidate the code and unprotect the
2060 page. Return TRUE if the fault was succesfully handled. */
2061 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2062 {
2063 unsigned int page_index, prot, pindex;
2064 PageDesc *p, *p1;
2065 target_ulong host_start, host_end, addr;
2066
2067 host_start = address & qemu_host_page_mask;
2068 page_index = host_start >> TARGET_PAGE_BITS;
2069 p1 = page_find(page_index);
2070 if (!p1)
2071 return 0;
2072 host_end = host_start + qemu_host_page_size;
2073 p = p1;
2074 prot = 0;
2075 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2076 prot |= p->flags;
2077 p++;
2078 }
2079 /* if the page was really writable, then we change its
2080 protection back to writable */
2081 if (prot & PAGE_WRITE_ORG) {
2082 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2083 if (!(p1[pindex].flags & PAGE_WRITE)) {
2084 mprotect((void *)g2h(host_start), qemu_host_page_size,
2085 (prot & PAGE_BITS) | PAGE_WRITE);
2086 p1[pindex].flags |= PAGE_WRITE;
2087 /* and since the content will be modified, we must invalidate
2088 the corresponding translated code. */
2089 tb_invalidate_phys_page(address, pc, puc);
2090 #ifdef DEBUG_TB_CHECK
2091 tb_invalidate_check(address);
2092 #endif
2093 return 1;
2094 }
2095 }
2096 return 0;
2097 }
2098
2099 static inline void tlb_set_dirty(CPUState *env,
2100 unsigned long addr, target_ulong vaddr)
2101 {
2102 }
2103 #endif /* defined(CONFIG_USER_ONLY) */
2104
2105 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2106 ram_addr_t memory);
2107 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2108 ram_addr_t orig_memory);
2109 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2110 need_subpage) \
2111 do { \
2112 if (addr > start_addr) \
2113 start_addr2 = 0; \
2114 else { \
2115 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2116 if (start_addr2 > 0) \
2117 need_subpage = 1; \
2118 } \
2119 \
2120 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2121 end_addr2 = TARGET_PAGE_SIZE - 1; \
2122 else { \
2123 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2124 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2125 need_subpage = 1; \
2126 } \
2127 } while (0)
2128
2129 /* register physical memory. 'size' must be a multiple of the target
2130 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2131 io memory page */
2132 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2133 ram_addr_t size,
2134 ram_addr_t phys_offset)
2135 {
2136 target_phys_addr_t addr, end_addr;
2137 PhysPageDesc *p;
2138 CPUState *env;
2139 ram_addr_t orig_size = size;
2140 void *subpage;
2141
2142 #ifdef USE_KQEMU
2143 /* XXX: should not depend on cpu context */
2144 env = first_cpu;
2145 if (env->kqemu_enabled) {
2146 kqemu_set_phys_mem(start_addr, size, phys_offset);
2147 }
2148 #endif
2149 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2150 end_addr = start_addr + (target_phys_addr_t)size;
2151 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2152 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2153 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2154 ram_addr_t orig_memory = p->phys_offset;
2155 target_phys_addr_t start_addr2, end_addr2;
2156 int need_subpage = 0;
2157
2158 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2159 need_subpage);
2160 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2161 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2162 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2163 &p->phys_offset, orig_memory);
2164 } else {
2165 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2166 >> IO_MEM_SHIFT];
2167 }
2168 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2169 } else {
2170 p->phys_offset = phys_offset;
2171 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2172 (phys_offset & IO_MEM_ROMD))
2173 phys_offset += TARGET_PAGE_SIZE;
2174 }
2175 } else {
2176 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2177 p->phys_offset = phys_offset;
2178 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2179 (phys_offset & IO_MEM_ROMD))
2180 phys_offset += TARGET_PAGE_SIZE;
2181 else {
2182 target_phys_addr_t start_addr2, end_addr2;
2183 int need_subpage = 0;
2184
2185 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2186 end_addr2, need_subpage);
2187
2188 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2189 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2190 &p->phys_offset, IO_MEM_UNASSIGNED);
2191 subpage_register(subpage, start_addr2, end_addr2,
2192 phys_offset);
2193 }
2194 }
2195 }
2196 }
2197
2198 /* since each CPU stores ram addresses in its TLB cache, we must
2199 reset the modified entries */
2200 /* XXX: slow ! */
2201 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2202 tlb_flush(env, 1);
2203 }
2204 }
2205
2206 /* XXX: temporary until new memory mapping API */
2207 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2208 {
2209 PhysPageDesc *p;
2210
2211 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2212 if (!p)
2213 return IO_MEM_UNASSIGNED;
2214 return p->phys_offset;
2215 }
2216
2217 /* XXX: better than nothing */
2218 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2219 {
2220 ram_addr_t addr;
2221 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2222 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2223 (uint64_t)size, (uint64_t)phys_ram_size);
2224 abort();
2225 }
2226 addr = phys_ram_alloc_offset;
2227 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2228 return addr;
2229 }
2230
2231 void qemu_ram_free(ram_addr_t addr)
2232 {
2233 }
2234
2235 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2236 {
2237 #ifdef DEBUG_UNASSIGNED
2238 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2239 #endif
2240 #ifdef TARGET_SPARC
2241 do_unassigned_access(addr, 0, 0, 0);
2242 #elif TARGET_CRIS
2243 do_unassigned_access(addr, 0, 0, 0);
2244 #endif
2245 return 0;
2246 }
2247
2248 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2249 {
2250 #ifdef DEBUG_UNASSIGNED
2251 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2252 #endif
2253 #ifdef TARGET_SPARC
2254 do_unassigned_access(addr, 1, 0, 0);
2255 #elif TARGET_CRIS
2256 do_unassigned_access(addr, 1, 0, 0);
2257 #endif
2258 }
2259
2260 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2261 unassigned_mem_readb,
2262 unassigned_mem_readb,
2263 unassigned_mem_readb,
2264 };
2265
2266 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2267 unassigned_mem_writeb,
2268 unassigned_mem_writeb,
2269 unassigned_mem_writeb,
2270 };
2271
2272 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2273 {
2274 unsigned long ram_addr;
2275 int dirty_flags;
2276 ram_addr = addr - (unsigned long)phys_ram_base;
2277 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2278 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2279 #if !defined(CONFIG_USER_ONLY)
2280 tb_invalidate_phys_page_fast(ram_addr, 1);
2281 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2282 #endif
2283 }
2284 stb_p((uint8_t *)(long)addr, val);
2285 #ifdef USE_KQEMU
2286 if (cpu_single_env->kqemu_enabled &&
2287 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2288 kqemu_modify_page(cpu_single_env, ram_addr);
2289 #endif
2290 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2291 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2292 /* we remove the notdirty callback only if the code has been
2293 flushed */
2294 if (dirty_flags == 0xff)
2295 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2296 }
2297
2298 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2299 {
2300 unsigned long ram_addr;
2301 int dirty_flags;
2302 ram_addr = addr - (unsigned long)phys_ram_base;
2303 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2304 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2305 #if !defined(CONFIG_USER_ONLY)
2306 tb_invalidate_phys_page_fast(ram_addr, 2);
2307 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2308 #endif
2309 }
2310 stw_p((uint8_t *)(long)addr, val);
2311 #ifdef USE_KQEMU
2312 if (cpu_single_env->kqemu_enabled &&
2313 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2314 kqemu_modify_page(cpu_single_env, ram_addr);
2315 #endif
2316 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2317 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2318 /* we remove the notdirty callback only if the code has been
2319 flushed */
2320 if (dirty_flags == 0xff)
2321 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2322 }
2323
2324 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2325 {
2326 unsigned long ram_addr;
2327 int dirty_flags;
2328 ram_addr = addr - (unsigned long)phys_ram_base;
2329 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2330 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2331 #if !defined(CONFIG_USER_ONLY)
2332 tb_invalidate_phys_page_fast(ram_addr, 4);
2333 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2334 #endif
2335 }
2336 stl_p((uint8_t *)(long)addr, val);
2337 #ifdef USE_KQEMU
2338 if (cpu_single_env->kqemu_enabled &&
2339 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2340 kqemu_modify_page(cpu_single_env, ram_addr);
2341 #endif
2342 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2343 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2344 /* we remove the notdirty callback only if the code has been
2345 flushed */
2346 if (dirty_flags == 0xff)
2347 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2348 }
2349
2350 static CPUReadMemoryFunc *error_mem_read[3] = {
2351 NULL, /* never used */
2352 NULL, /* never used */
2353 NULL, /* never used */
2354 };
2355
2356 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2357 notdirty_mem_writeb,
2358 notdirty_mem_writew,
2359 notdirty_mem_writel,
2360 };
2361
2362 #if defined(CONFIG_SOFTMMU)
2363 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2364 so these check for a hit then pass through to the normal out-of-line
2365 phys routines. */
2366 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2367 {
2368 return ldub_phys(addr);
2369 }
2370
2371 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2372 {
2373 return lduw_phys(addr);
2374 }
2375
2376 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2377 {
2378 return ldl_phys(addr);
2379 }
2380
2381 /* Generate a debug exception if a watchpoint has been hit.
2382 Returns the real physical address of the access. addr will be a host
2383 address in case of a RAM location. */
2384 static target_ulong check_watchpoint(target_phys_addr_t addr)
2385 {
2386 CPUState *env = cpu_single_env;
2387 target_ulong watch;
2388 target_ulong retaddr;
2389 int i;
2390
2391 retaddr = addr;
2392 for (i = 0; i < env->nb_watchpoints; i++) {
2393 watch = env->watchpoint[i].vaddr;
2394 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2395 retaddr = addr - env->watchpoint[i].addend;
2396 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2397 cpu_single_env->watchpoint_hit = i + 1;
2398 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2399 break;
2400 }
2401 }
2402 }
2403 return retaddr;
2404 }
2405
2406 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2407 uint32_t val)
2408 {
2409 addr = check_watchpoint(addr);
2410 stb_phys(addr, val);
2411 }
2412
2413 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2414 uint32_t val)
2415 {
2416 addr = check_watchpoint(addr);
2417 stw_phys(addr, val);
2418 }
2419
2420 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2421 uint32_t val)
2422 {
2423 addr = check_watchpoint(addr);
2424 stl_phys(addr, val);
2425 }
2426
2427 static CPUReadMemoryFunc *watch_mem_read[3] = {
2428 watch_mem_readb,
2429 watch_mem_readw,
2430 watch_mem_readl,
2431 };
2432
2433 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2434 watch_mem_writeb,
2435 watch_mem_writew,
2436 watch_mem_writel,
2437 };
2438 #endif
2439
2440 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2441 unsigned int len)
2442 {
2443 uint32_t ret;
2444 unsigned int idx;
2445
2446 idx = SUBPAGE_IDX(addr - mmio->base);
2447 #if defined(DEBUG_SUBPAGE)
2448 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2449 mmio, len, addr, idx);
2450 #endif
2451 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2452
2453 return ret;
2454 }
2455
2456 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2457 uint32_t value, unsigned int len)
2458 {
2459 unsigned int idx;
2460
2461 idx = SUBPAGE_IDX(addr - mmio->base);
2462 #if defined(DEBUG_SUBPAGE)
2463 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2464 mmio, len, addr, idx, value);
2465 #endif
2466 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2467 }
2468
2469 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2470 {
2471 #if defined(DEBUG_SUBPAGE)
2472 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2473 #endif
2474
2475 return subpage_readlen(opaque, addr, 0);
2476 }
2477
2478 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2479 uint32_t value)
2480 {
2481 #if defined(DEBUG_SUBPAGE)
2482 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2483 #endif
2484 subpage_writelen(opaque, addr, value, 0);
2485 }
2486
2487 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2488 {
2489 #if defined(DEBUG_SUBPAGE)
2490 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2491 #endif
2492
2493 return subpage_readlen(opaque, addr, 1);
2494 }
2495
2496 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2497 uint32_t value)
2498 {
2499 #if defined(DEBUG_SUBPAGE)
2500 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2501 #endif
2502 subpage_writelen(opaque, addr, value, 1);
2503 }
2504
2505 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2506 {
2507 #if defined(DEBUG_SUBPAGE)
2508 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2509 #endif
2510
2511 return subpage_readlen(opaque, addr, 2);
2512 }
2513
2514 static void subpage_writel (void *opaque,
2515 target_phys_addr_t addr, uint32_t value)
2516 {
2517 #if defined(DEBUG_SUBPAGE)
2518 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2519 #endif
2520 subpage_writelen(opaque, addr, value, 2);
2521 }
2522
2523 static CPUReadMemoryFunc *subpage_read[] = {
2524 &subpage_readb,
2525 &subpage_readw,
2526 &subpage_readl,
2527 };
2528
2529 static CPUWriteMemoryFunc *subpage_write[] = {
2530 &subpage_writeb,
2531 &subpage_writew,
2532 &subpage_writel,
2533 };
2534
2535 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2536 ram_addr_t memory)
2537 {
2538 int idx, eidx;
2539 unsigned int i;
2540
2541 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2542 return -1;
2543 idx = SUBPAGE_IDX(start);
2544 eidx = SUBPAGE_IDX(end);
2545 #if defined(DEBUG_SUBPAGE)
2546 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2547 mmio, start, end, idx, eidx, memory);
2548 #endif
2549 memory >>= IO_MEM_SHIFT;
2550 for (; idx <= eidx; idx++) {
2551 for (i = 0; i < 4; i++) {
2552 if (io_mem_read[memory][i]) {
2553 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2554 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2555 }
2556 if (io_mem_write[memory][i]) {
2557 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2558 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2559 }
2560 }
2561 }
2562
2563 return 0;
2564 }
2565
2566 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2567 ram_addr_t orig_memory)
2568 {
2569 subpage_t *mmio;
2570 int subpage_memory;
2571
2572 mmio = qemu_mallocz(sizeof(subpage_t));
2573 if (mmio != NULL) {
2574 mmio->base = base;
2575 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2576 #if defined(DEBUG_SUBPAGE)
2577 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2578 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2579 #endif
2580 *phys = subpage_memory | IO_MEM_SUBPAGE;
2581 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2582 }
2583
2584 return mmio;
2585 }
2586
2587 static void io_mem_init(void)
2588 {
2589 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2590 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2591 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2592 io_mem_nb = 5;
2593
2594 #if defined(CONFIG_SOFTMMU)
2595 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2596 watch_mem_write, NULL);
2597 #endif
2598 /* alloc dirty bits array */
2599 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2600 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2601 }
2602
2603 /* mem_read and mem_write are arrays of functions containing the
2604 function to access byte (index 0), word (index 1) and dword (index
2605 2). Functions can be omitted with a NULL function pointer. The
2606 registered functions may be modified dynamically later.
2607 If io_index is non zero, the corresponding io zone is
2608 modified. If it is zero, a new io zone is allocated. The return
2609 value can be used with cpu_register_physical_memory(). (-1) is
2610 returned if error. */
2611 int cpu_register_io_memory(int io_index,
2612 CPUReadMemoryFunc **mem_read,
2613 CPUWriteMemoryFunc **mem_write,
2614 void *opaque)
2615 {
2616 int i, subwidth = 0;
2617
2618 if (io_index <= 0) {
2619 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2620 return -1;
2621 io_index = io_mem_nb++;
2622 } else {
2623 if (io_index >= IO_MEM_NB_ENTRIES)
2624 return -1;
2625 }
2626
2627 for(i = 0;i < 3; i++) {
2628 if (!mem_read[i] || !mem_write[i])
2629 subwidth = IO_MEM_SUBWIDTH;
2630 io_mem_read[io_index][i] = mem_read[i];
2631 io_mem_write[io_index][i] = mem_write[i];
2632 }
2633 io_mem_opaque[io_index] = opaque;
2634 return (io_index << IO_MEM_SHIFT) | subwidth;
2635 }
2636
2637 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2638 {
2639 return io_mem_write[io_index >> IO_MEM_SHIFT];
2640 }
2641
2642 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2643 {
2644 return io_mem_read[io_index >> IO_MEM_SHIFT];
2645 }
2646
2647 /* physical memory access (slow version, mainly for debug) */
2648 #if defined(CONFIG_USER_ONLY)
2649 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2650 int len, int is_write)
2651 {
2652 int l, flags;
2653 target_ulong page;
2654 void * p;
2655
2656 while (len > 0) {
2657 page = addr & TARGET_PAGE_MASK;
2658 l = (page + TARGET_PAGE_SIZE) - addr;
2659 if (l > len)
2660 l = len;
2661 flags = page_get_flags(page);
2662 if (!(flags & PAGE_VALID))
2663 return;
2664 if (is_write) {
2665 if (!(flags & PAGE_WRITE))
2666 return;
2667 /* XXX: this code should not depend on lock_user */
2668 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2669 /* FIXME - should this return an error rather than just fail? */
2670 return;
2671 memcpy(p, buf, l);
2672 unlock_user(p, addr, l);
2673 } else {
2674 if (!(flags & PAGE_READ))
2675 return;
2676 /* XXX: this code should not depend on lock_user */
2677 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2678 /* FIXME - should this return an error rather than just fail? */
2679 return;
2680 memcpy(buf, p, l);
2681 unlock_user(p, addr, 0);
2682 }
2683 len -= l;
2684 buf += l;
2685 addr += l;
2686 }
2687 }
2688
2689 #else
2690 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2691 int len, int is_write)
2692 {
2693 int l, io_index;
2694 uint8_t *ptr;
2695 uint32_t val;
2696 target_phys_addr_t page;
2697 unsigned long pd;
2698 PhysPageDesc *p;
2699
2700 while (len > 0) {
2701 page = addr & TARGET_PAGE_MASK;
2702 l = (page + TARGET_PAGE_SIZE) - addr;
2703 if (l > len)
2704 l = len;
2705 p = phys_page_find(page >> TARGET_PAGE_BITS);
2706 if (!p) {
2707 pd = IO_MEM_UNASSIGNED;
2708 } else {
2709 pd = p->phys_offset;
2710 }
2711
2712 if (is_write) {
2713 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2714 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2715 /* XXX: could force cpu_single_env to NULL to avoid
2716 potential bugs */
2717 if (l >= 4 && ((addr & 3) == 0)) {
2718 /* 32 bit write access */
2719 val = ldl_p(buf);
2720 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2721 l = 4;
2722 } else if (l >= 2 && ((addr & 1) == 0)) {
2723 /* 16 bit write access */
2724 val = lduw_p(buf);
2725 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2726 l = 2;
2727 } else {
2728 /* 8 bit write access */
2729 val = ldub_p(buf);
2730 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2731 l = 1;
2732 }
2733 } else {
2734 unsigned long addr1;
2735 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2736 /* RAM case */
2737 ptr = phys_ram_base + addr1;
2738 memcpy(ptr, buf, l);
2739 if (!cpu_physical_memory_is_dirty(addr1)) {
2740 /* invalidate code */
2741 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2742 /* set dirty bit */
2743 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2744 (0xff & ~CODE_DIRTY_FLAG);
2745 }
2746 }
2747 } else {
2748 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2749 !(pd & IO_MEM_ROMD)) {
2750 /* I/O case */
2751 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2752 if (l >= 4 && ((addr & 3) == 0)) {
2753 /* 32 bit read access */
2754 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2755 stl_p(buf, val);
2756 l = 4;
2757 } else if (l >= 2 && ((addr & 1) == 0)) {
2758 /* 16 bit read access */
2759 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2760 stw_p(buf, val);
2761 l = 2;
2762 } else {
2763 /* 8 bit read access */
2764 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2765 stb_p(buf, val);
2766 l = 1;
2767 }
2768 } else {
2769 /* RAM case */
2770 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2771 (addr & ~TARGET_PAGE_MASK);
2772 memcpy(buf, ptr, l);
2773 }
2774 }
2775 len -= l;
2776 buf += l;
2777 addr += l;
2778 }
2779 }
2780
2781 /* used for ROM loading : can write in RAM and ROM */
2782 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2783 const uint8_t *buf, int len)
2784 {
2785 int l;
2786 uint8_t *ptr;
2787 target_phys_addr_t page;
2788 unsigned long pd;
2789 PhysPageDesc *p;
2790
2791 while (len > 0) {
2792 page = addr & TARGET_PAGE_MASK;
2793 l = (page + TARGET_PAGE_SIZE) - addr;
2794 if (l > len)
2795 l = len;
2796 p = phys_page_find(page >> TARGET_PAGE_BITS);
2797 if (!p) {
2798 pd = IO_MEM_UNASSIGNED;
2799 } else {
2800 pd = p->phys_offset;
2801 }
2802
2803 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2804 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2805 !(pd & IO_MEM_ROMD)) {
2806 /* do nothing */
2807 } else {
2808 unsigned long addr1;
2809 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2810 /* ROM/RAM case */
2811 ptr = phys_ram_base + addr1;
2812 memcpy(ptr, buf, l);
2813 }
2814 len -= l;
2815 buf += l;
2816 addr += l;
2817 }
2818 }
2819
2820
2821 /* warning: addr must be aligned */
2822 uint32_t ldl_phys(target_phys_addr_t addr)
2823 {
2824 int io_index;
2825 uint8_t *ptr;
2826 uint32_t val;
2827 unsigned long pd;
2828 PhysPageDesc *p;
2829
2830 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2831 if (!p) {
2832 pd = IO_MEM_UNASSIGNED;
2833 } else {
2834 pd = p->phys_offset;
2835 }
2836
2837 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2838 !(pd & IO_MEM_ROMD)) {
2839 /* I/O case */
2840 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2841 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2842 } else {
2843 /* RAM case */
2844 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2845 (addr & ~TARGET_PAGE_MASK);
2846 val = ldl_p(ptr);
2847 }
2848 return val;
2849 }
2850
2851 /* warning: addr must be aligned */
2852 uint64_t ldq_phys(target_phys_addr_t addr)
2853 {
2854 int io_index;
2855 uint8_t *ptr;
2856 uint64_t val;
2857 unsigned long pd;
2858 PhysPageDesc *p;
2859
2860 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2861 if (!p) {
2862 pd = IO_MEM_UNASSIGNED;
2863 } else {
2864 pd = p->phys_offset;
2865 }
2866
2867 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2868 !(pd & IO_MEM_ROMD)) {
2869 /* I/O case */
2870 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2871 #ifdef TARGET_WORDS_BIGENDIAN
2872 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2873 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2874 #else
2875 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2876 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2877 #endif
2878 } else {
2879 /* RAM case */
2880 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2881 (addr & ~TARGET_PAGE_MASK);
2882 val = ldq_p(ptr);
2883 }
2884 return val;
2885 }
2886
2887 /* XXX: optimize */
2888 uint32_t ldub_phys(target_phys_addr_t addr)
2889 {
2890 uint8_t val;
2891 cpu_physical_memory_read(addr, &val, 1);
2892 return val;
2893 }
2894
2895 /* XXX: optimize */
2896 uint32_t lduw_phys(target_phys_addr_t addr)
2897 {
2898 uint16_t val;
2899 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2900 return tswap16(val);
2901 }
2902
2903 /* warning: addr must be aligned. The ram page is not masked as dirty
2904 and the code inside is not invalidated. It is useful if the dirty
2905 bits are used to track modified PTEs */
2906 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2907 {
2908 int io_index;
2909 uint8_t *ptr;
2910 unsigned long pd;
2911 PhysPageDesc *p;
2912
2913 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2914 if (!p) {
2915 pd = IO_MEM_UNASSIGNED;
2916 } else {
2917 pd = p->phys_offset;
2918 }
2919
2920 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2921 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2922 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2923 } else {
2924 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2925 (addr & ~TARGET_PAGE_MASK);
2926 stl_p(ptr, val);
2927 }
2928 }
2929
2930 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2931 {
2932 int io_index;
2933 uint8_t *ptr;
2934 unsigned long pd;
2935 PhysPageDesc *p;
2936
2937 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2938 if (!p) {
2939 pd = IO_MEM_UNASSIGNED;
2940 } else {
2941 pd = p->phys_offset;
2942 }
2943
2944 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2945 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2946 #ifdef TARGET_WORDS_BIGENDIAN
2947 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2948 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2949 #else
2950 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2951 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2952 #endif
2953 } else {
2954 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2955 (addr & ~TARGET_PAGE_MASK);
2956 stq_p(ptr, val);
2957 }
2958 }
2959
2960 /* warning: addr must be aligned */
2961 void stl_phys(target_phys_addr_t addr, uint32_t val)
2962 {
2963 int io_index;
2964 uint8_t *ptr;
2965 unsigned long pd;
2966 PhysPageDesc *p;
2967
2968 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2969 if (!p) {
2970 pd = IO_MEM_UNASSIGNED;
2971 } else {
2972 pd = p->phys_offset;
2973 }
2974
2975 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2976 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2977 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2978 } else {
2979 unsigned long addr1;
2980 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2981 /* RAM case */
2982 ptr = phys_ram_base + addr1;
2983 stl_p(ptr, val);
2984 if (!cpu_physical_memory_is_dirty(addr1)) {
2985 /* invalidate code */
2986 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2987 /* set dirty bit */
2988 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2989 (0xff & ~CODE_DIRTY_FLAG);
2990 }
2991 }
2992 }
2993
2994 /* XXX: optimize */
2995 void stb_phys(target_phys_addr_t addr, uint32_t val)
2996 {
2997 uint8_t v = val;
2998 cpu_physical_memory_write(addr, &v, 1);
2999 }
3000
3001 /* XXX: optimize */
3002 void stw_phys(target_phys_addr_t addr, uint32_t val)
3003 {
3004 uint16_t v = tswap16(val);
3005 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3006 }
3007
3008 /* XXX: optimize */
3009 void stq_phys(target_phys_addr_t addr, uint64_t val)
3010 {
3011 val = tswap64(val);
3012 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3013 }
3014
3015 #endif
3016
3017 /* virtual memory access for debug */
3018 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3019 uint8_t *buf, int len, int is_write)
3020 {
3021 int l;
3022 target_phys_addr_t phys_addr;
3023 target_ulong page;
3024
3025 while (len > 0) {
3026 page = addr & TARGET_PAGE_MASK;
3027 phys_addr = cpu_get_phys_page_debug(env, page);
3028 /* if no physical page mapped, return an error */
3029 if (phys_addr == -1)
3030 return -1;
3031 l = (page + TARGET_PAGE_SIZE) - addr;
3032 if (l > len)
3033 l = len;
3034 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3035 buf, l, is_write);
3036 len -= l;
3037 buf += l;
3038 addr += l;
3039 }
3040 return 0;
3041 }
3042
3043 void dump_exec_info(FILE *f,
3044 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3045 {
3046 int i, target_code_size, max_target_code_size;
3047 int direct_jmp_count, direct_jmp2_count, cross_page;
3048 TranslationBlock *tb;
3049
3050 target_code_size = 0;
3051 max_target_code_size = 0;
3052 cross_page = 0;
3053 direct_jmp_count = 0;
3054 direct_jmp2_count = 0;
3055 for(i = 0; i < nb_tbs; i++) {
3056 tb = &tbs[i];
3057 target_code_size += tb->size;
3058 if (tb->size > max_target_code_size)
3059 max_target_code_size = tb->size;
3060 if (tb->page_addr[1] != -1)
3061 cross_page++;
3062 if (tb->tb_next_offset[0] != 0xffff) {
3063 direct_jmp_count++;
3064 if (tb->tb_next_offset[1] != 0xffff) {
3065 direct_jmp2_count++;
3066 }
3067 }
3068 }
3069 /* XXX: avoid using doubles ? */
3070 cpu_fprintf(f, "Translation buffer state:\n");
3071 cpu_fprintf(f, "gen code size %ld/%ld\n",
3072 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3073 cpu_fprintf(f, "TB count %d/%d\n",
3074 nb_tbs, code_gen_max_blocks);
3075 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3076 nb_tbs ? target_code_size / nb_tbs : 0,
3077 max_target_code_size);
3078 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3079 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3080 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3081 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3082 cross_page,
3083 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3084 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3085 direct_jmp_count,
3086 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3087 direct_jmp2_count,
3088 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3089 cpu_fprintf(f, "\nStatistics:\n");
3090 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3091 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3092 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3093 tcg_dump_info(f, cpu_fprintf);
3094 }
3095
3096 #if !defined(CONFIG_USER_ONLY)
3097
3098 #define MMUSUFFIX _cmmu
3099 #define GETPC() NULL
3100 #define env cpu_single_env
3101 #define SOFTMMU_CODE_ACCESS
3102
3103 #define SHIFT 0
3104 #include "softmmu_template.h"
3105
3106 #define SHIFT 1
3107 #include "softmmu_template.h"
3108
3109 #define SHIFT 2
3110 #include "softmmu_template.h"
3111
3112 #define SHIFT 3
3113 #include "softmmu_template.h"
3114
3115 #undef env
3116
3117 #endif