]> git.proxmox.com Git - qemu.git/blob - exec.c
Multithreaded locking fixes.
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
35
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #if defined(CONFIG_USER_ONLY)
41 #include <qemu.h>
42 #endif
43
44 //#define DEBUG_TB_INVALIDATE
45 //#define DEBUG_FLUSH
46 //#define DEBUG_TLB
47 //#define DEBUG_UNASSIGNED
48
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
52
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
55
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
58 #undef DEBUG_TB_CHECK
59 #endif
60
61 #define SMC_BITMAP_USE_THRESHOLD 10
62
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
65
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 #else
80 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81 #define TARGET_PHYS_ADDR_SPACE_BITS 32
82 #endif
83
84 TranslationBlock *tbs;
85 int code_gen_max_blocks;
86 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87 int nb_tbs;
88 /* any access to the tbs or the page table must use this lock */
89 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
90
91 uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
92 uint8_t *code_gen_buffer;
93 unsigned long code_gen_buffer_size;
94 /* threshold to flush the translated code buffer */
95 unsigned long code_gen_buffer_max_size;
96 uint8_t *code_gen_ptr;
97
98 ram_addr_t phys_ram_size;
99 int phys_ram_fd;
100 uint8_t *phys_ram_base;
101 uint8_t *phys_ram_dirty;
102 static ram_addr_t phys_ram_alloc_offset = 0;
103
104 CPUState *first_cpu;
105 /* current CPU in the current thread. It is only valid inside
106 cpu_exec() */
107 CPUState *cpu_single_env;
108
109 typedef struct PageDesc {
110 /* list of TBs intersecting this ram page */
111 TranslationBlock *first_tb;
112 /* in order to optimize self modifying code, we count the number
113 of lookups we do to a given page to use a bitmap */
114 unsigned int code_write_count;
115 uint8_t *code_bitmap;
116 #if defined(CONFIG_USER_ONLY)
117 unsigned long flags;
118 #endif
119 } PageDesc;
120
121 typedef struct PhysPageDesc {
122 /* offset in host memory of the page + io_index in the low 12 bits */
123 ram_addr_t phys_offset;
124 } PhysPageDesc;
125
126 #define L2_BITS 10
127 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
128 /* XXX: this is a temporary hack for alpha target.
129 * In the future, this is to be replaced by a multi-level table
130 * to actually be able to handle the complete 64 bits address space.
131 */
132 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
133 #else
134 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
135 #endif
136
137 #define L1_SIZE (1 << L1_BITS)
138 #define L2_SIZE (1 << L2_BITS)
139
140 static void io_mem_init(void);
141
142 unsigned long qemu_real_host_page_size;
143 unsigned long qemu_host_page_bits;
144 unsigned long qemu_host_page_size;
145 unsigned long qemu_host_page_mask;
146
147 /* XXX: for system emulation, it could just be an array */
148 static PageDesc *l1_map[L1_SIZE];
149 PhysPageDesc **l1_phys_map;
150
151 /* io memory support */
152 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
153 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
154 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
155 static int io_mem_nb;
156 #if defined(CONFIG_SOFTMMU)
157 static int io_mem_watch;
158 #endif
159
160 /* log support */
161 char *logfilename = "/tmp/qemu.log";
162 FILE *logfile;
163 int loglevel;
164 static int log_append = 0;
165
166 /* statistics */
167 static int tlb_flush_count;
168 static int tb_flush_count;
169 static int tb_phys_invalidate_count;
170
171 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
172 typedef struct subpage_t {
173 target_phys_addr_t base;
174 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
175 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
176 void *opaque[TARGET_PAGE_SIZE][2][4];
177 } subpage_t;
178
179 #ifdef _WIN32
180 static void map_exec(void *addr, long size)
181 {
182 DWORD old_protect;
183 VirtualProtect(addr, size,
184 PAGE_EXECUTE_READWRITE, &old_protect);
185
186 }
187 #else
188 static void map_exec(void *addr, long size)
189 {
190 unsigned long start, end, page_size;
191
192 page_size = getpagesize();
193 start = (unsigned long)addr;
194 start &= ~(page_size - 1);
195
196 end = (unsigned long)addr + size;
197 end += page_size - 1;
198 end &= ~(page_size - 1);
199
200 mprotect((void *)start, end - start,
201 PROT_READ | PROT_WRITE | PROT_EXEC);
202 }
203 #endif
204
205 static void page_init(void)
206 {
207 /* NOTE: we can always suppose that qemu_host_page_size >=
208 TARGET_PAGE_SIZE */
209 #ifdef _WIN32
210 {
211 SYSTEM_INFO system_info;
212 DWORD old_protect;
213
214 GetSystemInfo(&system_info);
215 qemu_real_host_page_size = system_info.dwPageSize;
216 }
217 #else
218 qemu_real_host_page_size = getpagesize();
219 #endif
220 if (qemu_host_page_size == 0)
221 qemu_host_page_size = qemu_real_host_page_size;
222 if (qemu_host_page_size < TARGET_PAGE_SIZE)
223 qemu_host_page_size = TARGET_PAGE_SIZE;
224 qemu_host_page_bits = 0;
225 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
226 qemu_host_page_bits++;
227 qemu_host_page_mask = ~(qemu_host_page_size - 1);
228 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
229 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
230
231 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
232 {
233 long long startaddr, endaddr;
234 FILE *f;
235 int n;
236
237 mmap_lock();
238 last_brk = (unsigned long)sbrk(0);
239 f = fopen("/proc/self/maps", "r");
240 if (f) {
241 do {
242 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
243 if (n == 2) {
244 startaddr = MIN(startaddr,
245 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
246 endaddr = MIN(endaddr,
247 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
248 page_set_flags(startaddr & TARGET_PAGE_MASK,
249 TARGET_PAGE_ALIGN(endaddr),
250 PAGE_RESERVED);
251 }
252 } while (!feof(f));
253 fclose(f);
254 }
255 mmap_unlock();
256 }
257 #endif
258 }
259
260 static inline PageDesc *page_find_alloc(target_ulong index)
261 {
262 PageDesc **lp, *p;
263
264 lp = &l1_map[index >> L2_BITS];
265 p = *lp;
266 if (!p) {
267 /* allocate if not found */
268 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
269 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
270 *lp = p;
271 }
272 return p + (index & (L2_SIZE - 1));
273 }
274
275 static inline PageDesc *page_find(target_ulong index)
276 {
277 PageDesc *p;
278
279 p = l1_map[index >> L2_BITS];
280 if (!p)
281 return 0;
282 return p + (index & (L2_SIZE - 1));
283 }
284
285 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
286 {
287 void **lp, **p;
288 PhysPageDesc *pd;
289
290 p = (void **)l1_phys_map;
291 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
292
293 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
294 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
295 #endif
296 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
297 p = *lp;
298 if (!p) {
299 /* allocate if not found */
300 if (!alloc)
301 return NULL;
302 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
303 memset(p, 0, sizeof(void *) * L1_SIZE);
304 *lp = p;
305 }
306 #endif
307 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
308 pd = *lp;
309 if (!pd) {
310 int i;
311 /* allocate if not found */
312 if (!alloc)
313 return NULL;
314 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
315 *lp = pd;
316 for (i = 0; i < L2_SIZE; i++)
317 pd[i].phys_offset = IO_MEM_UNASSIGNED;
318 }
319 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
320 }
321
322 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
323 {
324 return phys_page_find_alloc(index, 0);
325 }
326
327 #if !defined(CONFIG_USER_ONLY)
328 static void tlb_protect_code(ram_addr_t ram_addr);
329 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
330 target_ulong vaddr);
331 #define mmap_lock() do { } while(0)
332 #define mmap_unlock() do { } while(0)
333 #endif
334
335 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
336
337 #if defined(CONFIG_USER_ONLY)
338 /* Currently it is not recommanded to allocate big chunks of data in
339 user mode. It will change when a dedicated libc will be used */
340 #define USE_STATIC_CODE_GEN_BUFFER
341 #endif
342
343 #ifdef USE_STATIC_CODE_GEN_BUFFER
344 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
345 #endif
346
347 void code_gen_alloc(unsigned long tb_size)
348 {
349 #ifdef USE_STATIC_CODE_GEN_BUFFER
350 code_gen_buffer = static_code_gen_buffer;
351 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
352 map_exec(code_gen_buffer, code_gen_buffer_size);
353 #else
354 code_gen_buffer_size = tb_size;
355 if (code_gen_buffer_size == 0) {
356 #if defined(CONFIG_USER_ONLY)
357 /* in user mode, phys_ram_size is not meaningful */
358 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
359 #else
360 /* XXX: needs ajustments */
361 code_gen_buffer_size = (int)(phys_ram_size / 4);
362 #endif
363 }
364 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
365 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
366 /* The code gen buffer location may have constraints depending on
367 the host cpu and OS */
368 #if defined(__linux__)
369 {
370 int flags;
371 flags = MAP_PRIVATE | MAP_ANONYMOUS;
372 #if defined(__x86_64__)
373 flags |= MAP_32BIT;
374 /* Cannot map more than that */
375 if (code_gen_buffer_size > (800 * 1024 * 1024))
376 code_gen_buffer_size = (800 * 1024 * 1024);
377 #endif
378 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
379 PROT_WRITE | PROT_READ | PROT_EXEC,
380 flags, -1, 0);
381 if (code_gen_buffer == MAP_FAILED) {
382 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
383 exit(1);
384 }
385 }
386 #else
387 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
388 if (!code_gen_buffer) {
389 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
390 exit(1);
391 }
392 map_exec(code_gen_buffer, code_gen_buffer_size);
393 #endif
394 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
395 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
396 code_gen_buffer_max_size = code_gen_buffer_size -
397 code_gen_max_block_size();
398 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
399 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
400 }
401
402 /* Must be called before using the QEMU cpus. 'tb_size' is the size
403 (in bytes) allocated to the translation buffer. Zero means default
404 size. */
405 void cpu_exec_init_all(unsigned long tb_size)
406 {
407 cpu_gen_init();
408 code_gen_alloc(tb_size);
409 code_gen_ptr = code_gen_buffer;
410 page_init();
411 io_mem_init();
412 }
413
414 void cpu_exec_init(CPUState *env)
415 {
416 CPUState **penv;
417 int cpu_index;
418
419 env->next_cpu = NULL;
420 penv = &first_cpu;
421 cpu_index = 0;
422 while (*penv != NULL) {
423 penv = (CPUState **)&(*penv)->next_cpu;
424 cpu_index++;
425 }
426 env->cpu_index = cpu_index;
427 env->nb_watchpoints = 0;
428 *penv = env;
429 }
430
431 static inline void invalidate_page_bitmap(PageDesc *p)
432 {
433 if (p->code_bitmap) {
434 qemu_free(p->code_bitmap);
435 p->code_bitmap = NULL;
436 }
437 p->code_write_count = 0;
438 }
439
440 /* set to NULL all the 'first_tb' fields in all PageDescs */
441 static void page_flush_tb(void)
442 {
443 int i, j;
444 PageDesc *p;
445
446 for(i = 0; i < L1_SIZE; i++) {
447 p = l1_map[i];
448 if (p) {
449 for(j = 0; j < L2_SIZE; j++) {
450 p->first_tb = NULL;
451 invalidate_page_bitmap(p);
452 p++;
453 }
454 }
455 }
456 }
457
458 /* flush all the translation blocks */
459 /* XXX: tb_flush is currently not thread safe */
460 void tb_flush(CPUState *env1)
461 {
462 CPUState *env;
463 #if defined(DEBUG_FLUSH)
464 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
465 (unsigned long)(code_gen_ptr - code_gen_buffer),
466 nb_tbs, nb_tbs > 0 ?
467 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
468 #endif
469 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
470 cpu_abort(env1, "Internal error: code buffer overflow\n");
471
472 nb_tbs = 0;
473
474 for(env = first_cpu; env != NULL; env = env->next_cpu) {
475 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
476 }
477
478 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
479 page_flush_tb();
480
481 code_gen_ptr = code_gen_buffer;
482 /* XXX: flush processor icache at this point if cache flush is
483 expensive */
484 tb_flush_count++;
485 }
486
487 #ifdef DEBUG_TB_CHECK
488
489 static void tb_invalidate_check(target_ulong address)
490 {
491 TranslationBlock *tb;
492 int i;
493 address &= TARGET_PAGE_MASK;
494 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
495 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
496 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
497 address >= tb->pc + tb->size)) {
498 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
499 address, (long)tb->pc, tb->size);
500 }
501 }
502 }
503 }
504
505 /* verify that all the pages have correct rights for code */
506 static void tb_page_check(void)
507 {
508 TranslationBlock *tb;
509 int i, flags1, flags2;
510
511 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
512 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
513 flags1 = page_get_flags(tb->pc);
514 flags2 = page_get_flags(tb->pc + tb->size - 1);
515 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
516 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
517 (long)tb->pc, tb->size, flags1, flags2);
518 }
519 }
520 }
521 }
522
523 void tb_jmp_check(TranslationBlock *tb)
524 {
525 TranslationBlock *tb1;
526 unsigned int n1;
527
528 /* suppress any remaining jumps to this TB */
529 tb1 = tb->jmp_first;
530 for(;;) {
531 n1 = (long)tb1 & 3;
532 tb1 = (TranslationBlock *)((long)tb1 & ~3);
533 if (n1 == 2)
534 break;
535 tb1 = tb1->jmp_next[n1];
536 }
537 /* check end of list */
538 if (tb1 != tb) {
539 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
540 }
541 }
542
543 #endif
544
545 /* invalidate one TB */
546 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
547 int next_offset)
548 {
549 TranslationBlock *tb1;
550 for(;;) {
551 tb1 = *ptb;
552 if (tb1 == tb) {
553 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
554 break;
555 }
556 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
557 }
558 }
559
560 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
561 {
562 TranslationBlock *tb1;
563 unsigned int n1;
564
565 for(;;) {
566 tb1 = *ptb;
567 n1 = (long)tb1 & 3;
568 tb1 = (TranslationBlock *)((long)tb1 & ~3);
569 if (tb1 == tb) {
570 *ptb = tb1->page_next[n1];
571 break;
572 }
573 ptb = &tb1->page_next[n1];
574 }
575 }
576
577 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
578 {
579 TranslationBlock *tb1, **ptb;
580 unsigned int n1;
581
582 ptb = &tb->jmp_next[n];
583 tb1 = *ptb;
584 if (tb1) {
585 /* find tb(n) in circular list */
586 for(;;) {
587 tb1 = *ptb;
588 n1 = (long)tb1 & 3;
589 tb1 = (TranslationBlock *)((long)tb1 & ~3);
590 if (n1 == n && tb1 == tb)
591 break;
592 if (n1 == 2) {
593 ptb = &tb1->jmp_first;
594 } else {
595 ptb = &tb1->jmp_next[n1];
596 }
597 }
598 /* now we can suppress tb(n) from the list */
599 *ptb = tb->jmp_next[n];
600
601 tb->jmp_next[n] = NULL;
602 }
603 }
604
605 /* reset the jump entry 'n' of a TB so that it is not chained to
606 another TB */
607 static inline void tb_reset_jump(TranslationBlock *tb, int n)
608 {
609 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
610 }
611
612 static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
613 {
614 CPUState *env;
615 PageDesc *p;
616 unsigned int h, n1;
617 target_phys_addr_t phys_pc;
618 TranslationBlock *tb1, *tb2;
619
620 /* remove the TB from the hash list */
621 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
622 h = tb_phys_hash_func(phys_pc);
623 tb_remove(&tb_phys_hash[h], tb,
624 offsetof(TranslationBlock, phys_hash_next));
625
626 /* remove the TB from the page list */
627 if (tb->page_addr[0] != page_addr) {
628 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
629 tb_page_remove(&p->first_tb, tb);
630 invalidate_page_bitmap(p);
631 }
632 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
633 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
634 tb_page_remove(&p->first_tb, tb);
635 invalidate_page_bitmap(p);
636 }
637
638 tb_invalidated_flag = 1;
639
640 /* remove the TB from the hash list */
641 h = tb_jmp_cache_hash_func(tb->pc);
642 for(env = first_cpu; env != NULL; env = env->next_cpu) {
643 if (env->tb_jmp_cache[h] == tb)
644 env->tb_jmp_cache[h] = NULL;
645 }
646
647 /* suppress this TB from the two jump lists */
648 tb_jmp_remove(tb, 0);
649 tb_jmp_remove(tb, 1);
650
651 /* suppress any remaining jumps to this TB */
652 tb1 = tb->jmp_first;
653 for(;;) {
654 n1 = (long)tb1 & 3;
655 if (n1 == 2)
656 break;
657 tb1 = (TranslationBlock *)((long)tb1 & ~3);
658 tb2 = tb1->jmp_next[n1];
659 tb_reset_jump(tb1, n1);
660 tb1->jmp_next[n1] = NULL;
661 tb1 = tb2;
662 }
663 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
664
665 tb_phys_invalidate_count++;
666 }
667
668 static inline void set_bits(uint8_t *tab, int start, int len)
669 {
670 int end, mask, end1;
671
672 end = start + len;
673 tab += start >> 3;
674 mask = 0xff << (start & 7);
675 if ((start & ~7) == (end & ~7)) {
676 if (start < end) {
677 mask &= ~(0xff << (end & 7));
678 *tab |= mask;
679 }
680 } else {
681 *tab++ |= mask;
682 start = (start + 8) & ~7;
683 end1 = end & ~7;
684 while (start < end1) {
685 *tab++ = 0xff;
686 start += 8;
687 }
688 if (start < end) {
689 mask = ~(0xff << (end & 7));
690 *tab |= mask;
691 }
692 }
693 }
694
695 static void build_page_bitmap(PageDesc *p)
696 {
697 int n, tb_start, tb_end;
698 TranslationBlock *tb;
699
700 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
701 if (!p->code_bitmap)
702 return;
703 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
704
705 tb = p->first_tb;
706 while (tb != NULL) {
707 n = (long)tb & 3;
708 tb = (TranslationBlock *)((long)tb & ~3);
709 /* NOTE: this is subtle as a TB may span two physical pages */
710 if (n == 0) {
711 /* NOTE: tb_end may be after the end of the page, but
712 it is not a problem */
713 tb_start = tb->pc & ~TARGET_PAGE_MASK;
714 tb_end = tb_start + tb->size;
715 if (tb_end > TARGET_PAGE_SIZE)
716 tb_end = TARGET_PAGE_SIZE;
717 } else {
718 tb_start = 0;
719 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
720 }
721 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
722 tb = tb->page_next[n];
723 }
724 }
725
726 #ifdef TARGET_HAS_PRECISE_SMC
727
728 static void tb_gen_code(CPUState *env,
729 target_ulong pc, target_ulong cs_base, int flags,
730 int cflags)
731 {
732 TranslationBlock *tb;
733 uint8_t *tc_ptr;
734 target_ulong phys_pc, phys_page2, virt_page2;
735 int code_gen_size;
736
737 phys_pc = get_phys_addr_code(env, pc);
738 tb = tb_alloc(pc);
739 if (!tb) {
740 /* flush must be done */
741 tb_flush(env);
742 /* cannot fail at this point */
743 tb = tb_alloc(pc);
744 }
745 tc_ptr = code_gen_ptr;
746 tb->tc_ptr = tc_ptr;
747 tb->cs_base = cs_base;
748 tb->flags = flags;
749 tb->cflags = cflags;
750 cpu_gen_code(env, tb, &code_gen_size);
751 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
752
753 /* check next page if needed */
754 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
755 phys_page2 = -1;
756 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
757 phys_page2 = get_phys_addr_code(env, virt_page2);
758 }
759 tb_link_phys(tb, phys_pc, phys_page2);
760 }
761 #endif
762
763 /* invalidate all TBs which intersect with the target physical page
764 starting in range [start;end[. NOTE: start and end must refer to
765 the same physical page. 'is_cpu_write_access' should be true if called
766 from a real cpu write access: the virtual CPU will exit the current
767 TB if code is modified inside this TB. */
768 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
769 int is_cpu_write_access)
770 {
771 int n, current_tb_modified, current_tb_not_found, current_flags;
772 CPUState *env = cpu_single_env;
773 PageDesc *p;
774 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
775 target_ulong tb_start, tb_end;
776 target_ulong current_pc, current_cs_base;
777
778 p = page_find(start >> TARGET_PAGE_BITS);
779 if (!p)
780 return;
781 if (!p->code_bitmap &&
782 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
783 is_cpu_write_access) {
784 /* build code bitmap */
785 build_page_bitmap(p);
786 }
787
788 /* we remove all the TBs in the range [start, end[ */
789 /* XXX: see if in some cases it could be faster to invalidate all the code */
790 current_tb_not_found = is_cpu_write_access;
791 current_tb_modified = 0;
792 current_tb = NULL; /* avoid warning */
793 current_pc = 0; /* avoid warning */
794 current_cs_base = 0; /* avoid warning */
795 current_flags = 0; /* avoid warning */
796 tb = p->first_tb;
797 while (tb != NULL) {
798 n = (long)tb & 3;
799 tb = (TranslationBlock *)((long)tb & ~3);
800 tb_next = tb->page_next[n];
801 /* NOTE: this is subtle as a TB may span two physical pages */
802 if (n == 0) {
803 /* NOTE: tb_end may be after the end of the page, but
804 it is not a problem */
805 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
806 tb_end = tb_start + tb->size;
807 } else {
808 tb_start = tb->page_addr[1];
809 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
810 }
811 if (!(tb_end <= start || tb_start >= end)) {
812 #ifdef TARGET_HAS_PRECISE_SMC
813 if (current_tb_not_found) {
814 current_tb_not_found = 0;
815 current_tb = NULL;
816 if (env->mem_write_pc) {
817 /* now we have a real cpu fault */
818 current_tb = tb_find_pc(env->mem_write_pc);
819 }
820 }
821 if (current_tb == tb &&
822 !(current_tb->cflags & CF_SINGLE_INSN)) {
823 /* If we are modifying the current TB, we must stop
824 its execution. We could be more precise by checking
825 that the modification is after the current PC, but it
826 would require a specialized function to partially
827 restore the CPU state */
828
829 current_tb_modified = 1;
830 cpu_restore_state(current_tb, env,
831 env->mem_write_pc, NULL);
832 #if defined(TARGET_I386)
833 current_flags = env->hflags;
834 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
835 current_cs_base = (target_ulong)env->segs[R_CS].base;
836 current_pc = current_cs_base + env->eip;
837 #else
838 #error unsupported CPU
839 #endif
840 }
841 #endif /* TARGET_HAS_PRECISE_SMC */
842 /* we need to do that to handle the case where a signal
843 occurs while doing tb_phys_invalidate() */
844 saved_tb = NULL;
845 if (env) {
846 saved_tb = env->current_tb;
847 env->current_tb = NULL;
848 }
849 tb_phys_invalidate(tb, -1);
850 if (env) {
851 env->current_tb = saved_tb;
852 if (env->interrupt_request && env->current_tb)
853 cpu_interrupt(env, env->interrupt_request);
854 }
855 }
856 tb = tb_next;
857 }
858 #if !defined(CONFIG_USER_ONLY)
859 /* if no code remaining, no need to continue to use slow writes */
860 if (!p->first_tb) {
861 invalidate_page_bitmap(p);
862 if (is_cpu_write_access) {
863 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
864 }
865 }
866 #endif
867 #ifdef TARGET_HAS_PRECISE_SMC
868 if (current_tb_modified) {
869 /* we generate a block containing just the instruction
870 modifying the memory. It will ensure that it cannot modify
871 itself */
872 env->current_tb = NULL;
873 tb_gen_code(env, current_pc, current_cs_base, current_flags,
874 CF_SINGLE_INSN);
875 cpu_resume_from_signal(env, NULL);
876 }
877 #endif
878 }
879
880 /* len must be <= 8 and start must be a multiple of len */
881 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
882 {
883 PageDesc *p;
884 int offset, b;
885 #if 0
886 if (1) {
887 if (loglevel) {
888 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
889 cpu_single_env->mem_write_vaddr, len,
890 cpu_single_env->eip,
891 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
892 }
893 }
894 #endif
895 p = page_find(start >> TARGET_PAGE_BITS);
896 if (!p)
897 return;
898 if (p->code_bitmap) {
899 offset = start & ~TARGET_PAGE_MASK;
900 b = p->code_bitmap[offset >> 3] >> (offset & 7);
901 if (b & ((1 << len) - 1))
902 goto do_invalidate;
903 } else {
904 do_invalidate:
905 tb_invalidate_phys_page_range(start, start + len, 1);
906 }
907 }
908
909 #if !defined(CONFIG_SOFTMMU)
910 static void tb_invalidate_phys_page(target_phys_addr_t addr,
911 unsigned long pc, void *puc)
912 {
913 int n, current_flags, current_tb_modified;
914 target_ulong current_pc, current_cs_base;
915 PageDesc *p;
916 TranslationBlock *tb, *current_tb;
917 #ifdef TARGET_HAS_PRECISE_SMC
918 CPUState *env = cpu_single_env;
919 #endif
920
921 addr &= TARGET_PAGE_MASK;
922 p = page_find(addr >> TARGET_PAGE_BITS);
923 if (!p)
924 return;
925 tb = p->first_tb;
926 current_tb_modified = 0;
927 current_tb = NULL;
928 current_pc = 0; /* avoid warning */
929 current_cs_base = 0; /* avoid warning */
930 current_flags = 0; /* avoid warning */
931 #ifdef TARGET_HAS_PRECISE_SMC
932 if (tb && pc != 0) {
933 current_tb = tb_find_pc(pc);
934 }
935 #endif
936 while (tb != NULL) {
937 n = (long)tb & 3;
938 tb = (TranslationBlock *)((long)tb & ~3);
939 #ifdef TARGET_HAS_PRECISE_SMC
940 if (current_tb == tb &&
941 !(current_tb->cflags & CF_SINGLE_INSN)) {
942 /* If we are modifying the current TB, we must stop
943 its execution. We could be more precise by checking
944 that the modification is after the current PC, but it
945 would require a specialized function to partially
946 restore the CPU state */
947
948 current_tb_modified = 1;
949 cpu_restore_state(current_tb, env, pc, puc);
950 #if defined(TARGET_I386)
951 current_flags = env->hflags;
952 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
953 current_cs_base = (target_ulong)env->segs[R_CS].base;
954 current_pc = current_cs_base + env->eip;
955 #else
956 #error unsupported CPU
957 #endif
958 }
959 #endif /* TARGET_HAS_PRECISE_SMC */
960 tb_phys_invalidate(tb, addr);
961 tb = tb->page_next[n];
962 }
963 p->first_tb = NULL;
964 #ifdef TARGET_HAS_PRECISE_SMC
965 if (current_tb_modified) {
966 /* we generate a block containing just the instruction
967 modifying the memory. It will ensure that it cannot modify
968 itself */
969 env->current_tb = NULL;
970 tb_gen_code(env, current_pc, current_cs_base, current_flags,
971 CF_SINGLE_INSN);
972 cpu_resume_from_signal(env, puc);
973 }
974 #endif
975 }
976 #endif
977
978 /* add the tb in the target page and protect it if necessary */
979 static inline void tb_alloc_page(TranslationBlock *tb,
980 unsigned int n, target_ulong page_addr)
981 {
982 PageDesc *p;
983 TranslationBlock *last_first_tb;
984
985 tb->page_addr[n] = page_addr;
986 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
987 tb->page_next[n] = p->first_tb;
988 last_first_tb = p->first_tb;
989 p->first_tb = (TranslationBlock *)((long)tb | n);
990 invalidate_page_bitmap(p);
991
992 #if defined(TARGET_HAS_SMC) || 1
993
994 #if defined(CONFIG_USER_ONLY)
995 if (p->flags & PAGE_WRITE) {
996 target_ulong addr;
997 PageDesc *p2;
998 int prot;
999
1000 /* force the host page as non writable (writes will have a
1001 page fault + mprotect overhead) */
1002 page_addr &= qemu_host_page_mask;
1003 prot = 0;
1004 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1005 addr += TARGET_PAGE_SIZE) {
1006
1007 p2 = page_find (addr >> TARGET_PAGE_BITS);
1008 if (!p2)
1009 continue;
1010 prot |= p2->flags;
1011 p2->flags &= ~PAGE_WRITE;
1012 page_get_flags(addr);
1013 }
1014 mprotect(g2h(page_addr), qemu_host_page_size,
1015 (prot & PAGE_BITS) & ~PAGE_WRITE);
1016 #ifdef DEBUG_TB_INVALIDATE
1017 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1018 page_addr);
1019 #endif
1020 }
1021 #else
1022 /* if some code is already present, then the pages are already
1023 protected. So we handle the case where only the first TB is
1024 allocated in a physical page */
1025 if (!last_first_tb) {
1026 tlb_protect_code(page_addr);
1027 }
1028 #endif
1029
1030 #endif /* TARGET_HAS_SMC */
1031 }
1032
1033 /* Allocate a new translation block. Flush the translation buffer if
1034 too many translation blocks or too much generated code. */
1035 TranslationBlock *tb_alloc(target_ulong pc)
1036 {
1037 TranslationBlock *tb;
1038
1039 if (nb_tbs >= code_gen_max_blocks ||
1040 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1041 return NULL;
1042 tb = &tbs[nb_tbs++];
1043 tb->pc = pc;
1044 tb->cflags = 0;
1045 return tb;
1046 }
1047
1048 /* add a new TB and link it to the physical page tables. phys_page2 is
1049 (-1) to indicate that only one page contains the TB. */
1050 void tb_link_phys(TranslationBlock *tb,
1051 target_ulong phys_pc, target_ulong phys_page2)
1052 {
1053 unsigned int h;
1054 TranslationBlock **ptb;
1055
1056 /* Grab the mmap lock to stop another thread invalidating this TB
1057 before we are done. */
1058 mmap_lock();
1059 /* add in the physical hash table */
1060 h = tb_phys_hash_func(phys_pc);
1061 ptb = &tb_phys_hash[h];
1062 tb->phys_hash_next = *ptb;
1063 *ptb = tb;
1064
1065 /* add in the page list */
1066 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1067 if (phys_page2 != -1)
1068 tb_alloc_page(tb, 1, phys_page2);
1069 else
1070 tb->page_addr[1] = -1;
1071
1072 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1073 tb->jmp_next[0] = NULL;
1074 tb->jmp_next[1] = NULL;
1075
1076 /* init original jump addresses */
1077 if (tb->tb_next_offset[0] != 0xffff)
1078 tb_reset_jump(tb, 0);
1079 if (tb->tb_next_offset[1] != 0xffff)
1080 tb_reset_jump(tb, 1);
1081
1082 #ifdef DEBUG_TB_CHECK
1083 tb_page_check();
1084 #endif
1085 mmap_unlock();
1086 }
1087
1088 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1089 tb[1].tc_ptr. Return NULL if not found */
1090 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1091 {
1092 int m_min, m_max, m;
1093 unsigned long v;
1094 TranslationBlock *tb;
1095
1096 if (nb_tbs <= 0)
1097 return NULL;
1098 if (tc_ptr < (unsigned long)code_gen_buffer ||
1099 tc_ptr >= (unsigned long)code_gen_ptr)
1100 return NULL;
1101 /* binary search (cf Knuth) */
1102 m_min = 0;
1103 m_max = nb_tbs - 1;
1104 while (m_min <= m_max) {
1105 m = (m_min + m_max) >> 1;
1106 tb = &tbs[m];
1107 v = (unsigned long)tb->tc_ptr;
1108 if (v == tc_ptr)
1109 return tb;
1110 else if (tc_ptr < v) {
1111 m_max = m - 1;
1112 } else {
1113 m_min = m + 1;
1114 }
1115 }
1116 return &tbs[m_max];
1117 }
1118
1119 static void tb_reset_jump_recursive(TranslationBlock *tb);
1120
1121 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1122 {
1123 TranslationBlock *tb1, *tb_next, **ptb;
1124 unsigned int n1;
1125
1126 tb1 = tb->jmp_next[n];
1127 if (tb1 != NULL) {
1128 /* find head of list */
1129 for(;;) {
1130 n1 = (long)tb1 & 3;
1131 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1132 if (n1 == 2)
1133 break;
1134 tb1 = tb1->jmp_next[n1];
1135 }
1136 /* we are now sure now that tb jumps to tb1 */
1137 tb_next = tb1;
1138
1139 /* remove tb from the jmp_first list */
1140 ptb = &tb_next->jmp_first;
1141 for(;;) {
1142 tb1 = *ptb;
1143 n1 = (long)tb1 & 3;
1144 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1145 if (n1 == n && tb1 == tb)
1146 break;
1147 ptb = &tb1->jmp_next[n1];
1148 }
1149 *ptb = tb->jmp_next[n];
1150 tb->jmp_next[n] = NULL;
1151
1152 /* suppress the jump to next tb in generated code */
1153 tb_reset_jump(tb, n);
1154
1155 /* suppress jumps in the tb on which we could have jumped */
1156 tb_reset_jump_recursive(tb_next);
1157 }
1158 }
1159
1160 static void tb_reset_jump_recursive(TranslationBlock *tb)
1161 {
1162 tb_reset_jump_recursive2(tb, 0);
1163 tb_reset_jump_recursive2(tb, 1);
1164 }
1165
1166 #if defined(TARGET_HAS_ICE)
1167 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1168 {
1169 target_phys_addr_t addr;
1170 target_ulong pd;
1171 ram_addr_t ram_addr;
1172 PhysPageDesc *p;
1173
1174 addr = cpu_get_phys_page_debug(env, pc);
1175 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1176 if (!p) {
1177 pd = IO_MEM_UNASSIGNED;
1178 } else {
1179 pd = p->phys_offset;
1180 }
1181 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1182 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1183 }
1184 #endif
1185
1186 /* Add a watchpoint. */
1187 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1188 {
1189 int i;
1190
1191 for (i = 0; i < env->nb_watchpoints; i++) {
1192 if (addr == env->watchpoint[i].vaddr)
1193 return 0;
1194 }
1195 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1196 return -1;
1197
1198 i = env->nb_watchpoints++;
1199 env->watchpoint[i].vaddr = addr;
1200 tlb_flush_page(env, addr);
1201 /* FIXME: This flush is needed because of the hack to make memory ops
1202 terminate the TB. It can be removed once the proper IO trap and
1203 re-execute bits are in. */
1204 tb_flush(env);
1205 return i;
1206 }
1207
1208 /* Remove a watchpoint. */
1209 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1210 {
1211 int i;
1212
1213 for (i = 0; i < env->nb_watchpoints; i++) {
1214 if (addr == env->watchpoint[i].vaddr) {
1215 env->nb_watchpoints--;
1216 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1217 tlb_flush_page(env, addr);
1218 return 0;
1219 }
1220 }
1221 return -1;
1222 }
1223
1224 /* Remove all watchpoints. */
1225 void cpu_watchpoint_remove_all(CPUState *env) {
1226 int i;
1227
1228 for (i = 0; i < env->nb_watchpoints; i++) {
1229 tlb_flush_page(env, env->watchpoint[i].vaddr);
1230 }
1231 env->nb_watchpoints = 0;
1232 }
1233
1234 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1235 breakpoint is reached */
1236 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1237 {
1238 #if defined(TARGET_HAS_ICE)
1239 int i;
1240
1241 for(i = 0; i < env->nb_breakpoints; i++) {
1242 if (env->breakpoints[i] == pc)
1243 return 0;
1244 }
1245
1246 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1247 return -1;
1248 env->breakpoints[env->nb_breakpoints++] = pc;
1249
1250 breakpoint_invalidate(env, pc);
1251 return 0;
1252 #else
1253 return -1;
1254 #endif
1255 }
1256
1257 /* remove all breakpoints */
1258 void cpu_breakpoint_remove_all(CPUState *env) {
1259 #if defined(TARGET_HAS_ICE)
1260 int i;
1261 for(i = 0; i < env->nb_breakpoints; i++) {
1262 breakpoint_invalidate(env, env->breakpoints[i]);
1263 }
1264 env->nb_breakpoints = 0;
1265 #endif
1266 }
1267
1268 /* remove a breakpoint */
1269 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1270 {
1271 #if defined(TARGET_HAS_ICE)
1272 int i;
1273 for(i = 0; i < env->nb_breakpoints; i++) {
1274 if (env->breakpoints[i] == pc)
1275 goto found;
1276 }
1277 return -1;
1278 found:
1279 env->nb_breakpoints--;
1280 if (i < env->nb_breakpoints)
1281 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1282
1283 breakpoint_invalidate(env, pc);
1284 return 0;
1285 #else
1286 return -1;
1287 #endif
1288 }
1289
1290 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1291 CPU loop after each instruction */
1292 void cpu_single_step(CPUState *env, int enabled)
1293 {
1294 #if defined(TARGET_HAS_ICE)
1295 if (env->singlestep_enabled != enabled) {
1296 env->singlestep_enabled = enabled;
1297 /* must flush all the translated code to avoid inconsistancies */
1298 /* XXX: only flush what is necessary */
1299 tb_flush(env);
1300 }
1301 #endif
1302 }
1303
1304 /* enable or disable low levels log */
1305 void cpu_set_log(int log_flags)
1306 {
1307 loglevel = log_flags;
1308 if (loglevel && !logfile) {
1309 logfile = fopen(logfilename, log_append ? "a" : "w");
1310 if (!logfile) {
1311 perror(logfilename);
1312 _exit(1);
1313 }
1314 #if !defined(CONFIG_SOFTMMU)
1315 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1316 {
1317 static uint8_t logfile_buf[4096];
1318 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1319 }
1320 #else
1321 setvbuf(logfile, NULL, _IOLBF, 0);
1322 #endif
1323 log_append = 1;
1324 }
1325 if (!loglevel && logfile) {
1326 fclose(logfile);
1327 logfile = NULL;
1328 }
1329 }
1330
1331 void cpu_set_log_filename(const char *filename)
1332 {
1333 logfilename = strdup(filename);
1334 if (logfile) {
1335 fclose(logfile);
1336 logfile = NULL;
1337 }
1338 cpu_set_log(loglevel);
1339 }
1340
1341 /* mask must never be zero, except for A20 change call */
1342 void cpu_interrupt(CPUState *env, int mask)
1343 {
1344 #if !defined(USE_NPTL)
1345 TranslationBlock *tb;
1346 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1347 #endif
1348
1349 /* FIXME: This is probably not threadsafe. A different thread could
1350 be in the mittle of a read-modify-write operation. */
1351 env->interrupt_request |= mask;
1352 #if defined(USE_NPTL)
1353 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1354 problem and hope the cpu will stop of its own accord. For userspace
1355 emulation this often isn't actually as bad as it sounds. Often
1356 signals are used primarily to interrupt blocking syscalls. */
1357 #else
1358 /* if the cpu is currently executing code, we must unlink it and
1359 all the potentially executing TB */
1360 tb = env->current_tb;
1361 if (tb && !testandset(&interrupt_lock)) {
1362 env->current_tb = NULL;
1363 tb_reset_jump_recursive(tb);
1364 resetlock(&interrupt_lock);
1365 }
1366 #endif
1367 }
1368
1369 void cpu_reset_interrupt(CPUState *env, int mask)
1370 {
1371 env->interrupt_request &= ~mask;
1372 }
1373
1374 CPULogItem cpu_log_items[] = {
1375 { CPU_LOG_TB_OUT_ASM, "out_asm",
1376 "show generated host assembly code for each compiled TB" },
1377 { CPU_LOG_TB_IN_ASM, "in_asm",
1378 "show target assembly code for each compiled TB" },
1379 { CPU_LOG_TB_OP, "op",
1380 "show micro ops for each compiled TB" },
1381 { CPU_LOG_TB_OP_OPT, "op_opt",
1382 "show micro ops "
1383 #ifdef TARGET_I386
1384 "before eflags optimization and "
1385 #endif
1386 "after liveness analysis" },
1387 { CPU_LOG_INT, "int",
1388 "show interrupts/exceptions in short format" },
1389 { CPU_LOG_EXEC, "exec",
1390 "show trace before each executed TB (lots of logs)" },
1391 { CPU_LOG_TB_CPU, "cpu",
1392 "show CPU state before block translation" },
1393 #ifdef TARGET_I386
1394 { CPU_LOG_PCALL, "pcall",
1395 "show protected mode far calls/returns/exceptions" },
1396 #endif
1397 #ifdef DEBUG_IOPORT
1398 { CPU_LOG_IOPORT, "ioport",
1399 "show all i/o ports accesses" },
1400 #endif
1401 { 0, NULL, NULL },
1402 };
1403
1404 static int cmp1(const char *s1, int n, const char *s2)
1405 {
1406 if (strlen(s2) != n)
1407 return 0;
1408 return memcmp(s1, s2, n) == 0;
1409 }
1410
1411 /* takes a comma separated list of log masks. Return 0 if error. */
1412 int cpu_str_to_log_mask(const char *str)
1413 {
1414 CPULogItem *item;
1415 int mask;
1416 const char *p, *p1;
1417
1418 p = str;
1419 mask = 0;
1420 for(;;) {
1421 p1 = strchr(p, ',');
1422 if (!p1)
1423 p1 = p + strlen(p);
1424 if(cmp1(p,p1-p,"all")) {
1425 for(item = cpu_log_items; item->mask != 0; item++) {
1426 mask |= item->mask;
1427 }
1428 } else {
1429 for(item = cpu_log_items; item->mask != 0; item++) {
1430 if (cmp1(p, p1 - p, item->name))
1431 goto found;
1432 }
1433 return 0;
1434 }
1435 found:
1436 mask |= item->mask;
1437 if (*p1 != ',')
1438 break;
1439 p = p1 + 1;
1440 }
1441 return mask;
1442 }
1443
1444 void cpu_abort(CPUState *env, const char *fmt, ...)
1445 {
1446 va_list ap;
1447 va_list ap2;
1448
1449 va_start(ap, fmt);
1450 va_copy(ap2, ap);
1451 fprintf(stderr, "qemu: fatal: ");
1452 vfprintf(stderr, fmt, ap);
1453 fprintf(stderr, "\n");
1454 #ifdef TARGET_I386
1455 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1456 #else
1457 cpu_dump_state(env, stderr, fprintf, 0);
1458 #endif
1459 if (logfile) {
1460 fprintf(logfile, "qemu: fatal: ");
1461 vfprintf(logfile, fmt, ap2);
1462 fprintf(logfile, "\n");
1463 #ifdef TARGET_I386
1464 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1465 #else
1466 cpu_dump_state(env, logfile, fprintf, 0);
1467 #endif
1468 fflush(logfile);
1469 fclose(logfile);
1470 }
1471 va_end(ap2);
1472 va_end(ap);
1473 abort();
1474 }
1475
1476 CPUState *cpu_copy(CPUState *env)
1477 {
1478 CPUState *new_env = cpu_init(env->cpu_model_str);
1479 /* preserve chaining and index */
1480 CPUState *next_cpu = new_env->next_cpu;
1481 int cpu_index = new_env->cpu_index;
1482 memcpy(new_env, env, sizeof(CPUState));
1483 new_env->next_cpu = next_cpu;
1484 new_env->cpu_index = cpu_index;
1485 return new_env;
1486 }
1487
1488 #if !defined(CONFIG_USER_ONLY)
1489
1490 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1491 {
1492 unsigned int i;
1493
1494 /* Discard jump cache entries for any tb which might potentially
1495 overlap the flushed page. */
1496 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1497 memset (&env->tb_jmp_cache[i], 0,
1498 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1499
1500 i = tb_jmp_cache_hash_page(addr);
1501 memset (&env->tb_jmp_cache[i], 0,
1502 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1503 }
1504
1505 /* NOTE: if flush_global is true, also flush global entries (not
1506 implemented yet) */
1507 void tlb_flush(CPUState *env, int flush_global)
1508 {
1509 int i;
1510
1511 #if defined(DEBUG_TLB)
1512 printf("tlb_flush:\n");
1513 #endif
1514 /* must reset current TB so that interrupts cannot modify the
1515 links while we are modifying them */
1516 env->current_tb = NULL;
1517
1518 for(i = 0; i < CPU_TLB_SIZE; i++) {
1519 env->tlb_table[0][i].addr_read = -1;
1520 env->tlb_table[0][i].addr_write = -1;
1521 env->tlb_table[0][i].addr_code = -1;
1522 env->tlb_table[1][i].addr_read = -1;
1523 env->tlb_table[1][i].addr_write = -1;
1524 env->tlb_table[1][i].addr_code = -1;
1525 #if (NB_MMU_MODES >= 3)
1526 env->tlb_table[2][i].addr_read = -1;
1527 env->tlb_table[2][i].addr_write = -1;
1528 env->tlb_table[2][i].addr_code = -1;
1529 #if (NB_MMU_MODES == 4)
1530 env->tlb_table[3][i].addr_read = -1;
1531 env->tlb_table[3][i].addr_write = -1;
1532 env->tlb_table[3][i].addr_code = -1;
1533 #endif
1534 #endif
1535 }
1536
1537 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1538
1539 #if !defined(CONFIG_SOFTMMU)
1540 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1541 #endif
1542 #ifdef USE_KQEMU
1543 if (env->kqemu_enabled) {
1544 kqemu_flush(env, flush_global);
1545 }
1546 #endif
1547 tlb_flush_count++;
1548 }
1549
1550 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1551 {
1552 if (addr == (tlb_entry->addr_read &
1553 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1554 addr == (tlb_entry->addr_write &
1555 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1556 addr == (tlb_entry->addr_code &
1557 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1558 tlb_entry->addr_read = -1;
1559 tlb_entry->addr_write = -1;
1560 tlb_entry->addr_code = -1;
1561 }
1562 }
1563
1564 void tlb_flush_page(CPUState *env, target_ulong addr)
1565 {
1566 int i;
1567
1568 #if defined(DEBUG_TLB)
1569 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1570 #endif
1571 /* must reset current TB so that interrupts cannot modify the
1572 links while we are modifying them */
1573 env->current_tb = NULL;
1574
1575 addr &= TARGET_PAGE_MASK;
1576 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1577 tlb_flush_entry(&env->tlb_table[0][i], addr);
1578 tlb_flush_entry(&env->tlb_table[1][i], addr);
1579 #if (NB_MMU_MODES >= 3)
1580 tlb_flush_entry(&env->tlb_table[2][i], addr);
1581 #if (NB_MMU_MODES == 4)
1582 tlb_flush_entry(&env->tlb_table[3][i], addr);
1583 #endif
1584 #endif
1585
1586 tlb_flush_jmp_cache(env, addr);
1587
1588 #if !defined(CONFIG_SOFTMMU)
1589 if (addr < MMAP_AREA_END)
1590 munmap((void *)addr, TARGET_PAGE_SIZE);
1591 #endif
1592 #ifdef USE_KQEMU
1593 if (env->kqemu_enabled) {
1594 kqemu_flush_page(env, addr);
1595 }
1596 #endif
1597 }
1598
1599 /* update the TLBs so that writes to code in the virtual page 'addr'
1600 can be detected */
1601 static void tlb_protect_code(ram_addr_t ram_addr)
1602 {
1603 cpu_physical_memory_reset_dirty(ram_addr,
1604 ram_addr + TARGET_PAGE_SIZE,
1605 CODE_DIRTY_FLAG);
1606 }
1607
1608 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1609 tested for self modifying code */
1610 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1611 target_ulong vaddr)
1612 {
1613 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1614 }
1615
1616 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1617 unsigned long start, unsigned long length)
1618 {
1619 unsigned long addr;
1620 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1621 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1622 if ((addr - start) < length) {
1623 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1624 }
1625 }
1626 }
1627
1628 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1629 int dirty_flags)
1630 {
1631 CPUState *env;
1632 unsigned long length, start1;
1633 int i, mask, len;
1634 uint8_t *p;
1635
1636 start &= TARGET_PAGE_MASK;
1637 end = TARGET_PAGE_ALIGN(end);
1638
1639 length = end - start;
1640 if (length == 0)
1641 return;
1642 len = length >> TARGET_PAGE_BITS;
1643 #ifdef USE_KQEMU
1644 /* XXX: should not depend on cpu context */
1645 env = first_cpu;
1646 if (env->kqemu_enabled) {
1647 ram_addr_t addr;
1648 addr = start;
1649 for(i = 0; i < len; i++) {
1650 kqemu_set_notdirty(env, addr);
1651 addr += TARGET_PAGE_SIZE;
1652 }
1653 }
1654 #endif
1655 mask = ~dirty_flags;
1656 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1657 for(i = 0; i < len; i++)
1658 p[i] &= mask;
1659
1660 /* we modify the TLB cache so that the dirty bit will be set again
1661 when accessing the range */
1662 start1 = start + (unsigned long)phys_ram_base;
1663 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1664 for(i = 0; i < CPU_TLB_SIZE; i++)
1665 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1666 for(i = 0; i < CPU_TLB_SIZE; i++)
1667 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1668 #if (NB_MMU_MODES >= 3)
1669 for(i = 0; i < CPU_TLB_SIZE; i++)
1670 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1671 #if (NB_MMU_MODES == 4)
1672 for(i = 0; i < CPU_TLB_SIZE; i++)
1673 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1674 #endif
1675 #endif
1676 }
1677
1678 #if !defined(CONFIG_SOFTMMU)
1679 /* XXX: this is expensive */
1680 {
1681 VirtPageDesc *p;
1682 int j;
1683 target_ulong addr;
1684
1685 for(i = 0; i < L1_SIZE; i++) {
1686 p = l1_virt_map[i];
1687 if (p) {
1688 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1689 for(j = 0; j < L2_SIZE; j++) {
1690 if (p->valid_tag == virt_valid_tag &&
1691 p->phys_addr >= start && p->phys_addr < end &&
1692 (p->prot & PROT_WRITE)) {
1693 if (addr < MMAP_AREA_END) {
1694 mprotect((void *)addr, TARGET_PAGE_SIZE,
1695 p->prot & ~PROT_WRITE);
1696 }
1697 }
1698 addr += TARGET_PAGE_SIZE;
1699 p++;
1700 }
1701 }
1702 }
1703 }
1704 #endif
1705 }
1706
1707 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1708 {
1709 ram_addr_t ram_addr;
1710
1711 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1712 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1713 tlb_entry->addend - (unsigned long)phys_ram_base;
1714 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1715 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1716 }
1717 }
1718 }
1719
1720 /* update the TLB according to the current state of the dirty bits */
1721 void cpu_tlb_update_dirty(CPUState *env)
1722 {
1723 int i;
1724 for(i = 0; i < CPU_TLB_SIZE; i++)
1725 tlb_update_dirty(&env->tlb_table[0][i]);
1726 for(i = 0; i < CPU_TLB_SIZE; i++)
1727 tlb_update_dirty(&env->tlb_table[1][i]);
1728 #if (NB_MMU_MODES >= 3)
1729 for(i = 0; i < CPU_TLB_SIZE; i++)
1730 tlb_update_dirty(&env->tlb_table[2][i]);
1731 #if (NB_MMU_MODES == 4)
1732 for(i = 0; i < CPU_TLB_SIZE; i++)
1733 tlb_update_dirty(&env->tlb_table[3][i]);
1734 #endif
1735 #endif
1736 }
1737
1738 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1739 unsigned long start)
1740 {
1741 unsigned long addr;
1742 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1743 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1744 if (addr == start) {
1745 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1746 }
1747 }
1748 }
1749
1750 /* update the TLB corresponding to virtual page vaddr and phys addr
1751 addr so that it is no longer dirty */
1752 static inline void tlb_set_dirty(CPUState *env,
1753 unsigned long addr, target_ulong vaddr)
1754 {
1755 int i;
1756
1757 addr &= TARGET_PAGE_MASK;
1758 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1759 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1760 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1761 #if (NB_MMU_MODES >= 3)
1762 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1763 #if (NB_MMU_MODES == 4)
1764 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1765 #endif
1766 #endif
1767 }
1768
1769 /* add a new TLB entry. At most one entry for a given virtual address
1770 is permitted. Return 0 if OK or 2 if the page could not be mapped
1771 (can only happen in non SOFTMMU mode for I/O pages or pages
1772 conflicting with the host address space). */
1773 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1774 target_phys_addr_t paddr, int prot,
1775 int mmu_idx, int is_softmmu)
1776 {
1777 PhysPageDesc *p;
1778 unsigned long pd;
1779 unsigned int index;
1780 target_ulong address;
1781 target_phys_addr_t addend;
1782 int ret;
1783 CPUTLBEntry *te;
1784 int i;
1785
1786 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1787 if (!p) {
1788 pd = IO_MEM_UNASSIGNED;
1789 } else {
1790 pd = p->phys_offset;
1791 }
1792 #if defined(DEBUG_TLB)
1793 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1794 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1795 #endif
1796
1797 ret = 0;
1798 #if !defined(CONFIG_SOFTMMU)
1799 if (is_softmmu)
1800 #endif
1801 {
1802 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1803 /* IO memory case */
1804 address = vaddr | pd;
1805 addend = paddr;
1806 } else {
1807 /* standard memory */
1808 address = vaddr;
1809 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1810 }
1811
1812 /* Make accesses to pages with watchpoints go via the
1813 watchpoint trap routines. */
1814 for (i = 0; i < env->nb_watchpoints; i++) {
1815 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1816 if (address & ~TARGET_PAGE_MASK) {
1817 env->watchpoint[i].addend = 0;
1818 address = vaddr | io_mem_watch;
1819 } else {
1820 env->watchpoint[i].addend = pd - paddr +
1821 (unsigned long) phys_ram_base;
1822 /* TODO: Figure out how to make read watchpoints coexist
1823 with code. */
1824 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1825 }
1826 }
1827 }
1828
1829 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1830 addend -= vaddr;
1831 te = &env->tlb_table[mmu_idx][index];
1832 te->addend = addend;
1833 if (prot & PAGE_READ) {
1834 te->addr_read = address;
1835 } else {
1836 te->addr_read = -1;
1837 }
1838
1839 if (prot & PAGE_EXEC) {
1840 te->addr_code = address;
1841 } else {
1842 te->addr_code = -1;
1843 }
1844 if (prot & PAGE_WRITE) {
1845 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1846 (pd & IO_MEM_ROMD)) {
1847 /* write access calls the I/O callback */
1848 te->addr_write = vaddr |
1849 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1850 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1851 !cpu_physical_memory_is_dirty(pd)) {
1852 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1853 } else {
1854 te->addr_write = address;
1855 }
1856 } else {
1857 te->addr_write = -1;
1858 }
1859 }
1860 #if !defined(CONFIG_SOFTMMU)
1861 else {
1862 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1863 /* IO access: no mapping is done as it will be handled by the
1864 soft MMU */
1865 if (!(env->hflags & HF_SOFTMMU_MASK))
1866 ret = 2;
1867 } else {
1868 void *map_addr;
1869
1870 if (vaddr >= MMAP_AREA_END) {
1871 ret = 2;
1872 } else {
1873 if (prot & PROT_WRITE) {
1874 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1875 #if defined(TARGET_HAS_SMC) || 1
1876 first_tb ||
1877 #endif
1878 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1879 !cpu_physical_memory_is_dirty(pd))) {
1880 /* ROM: we do as if code was inside */
1881 /* if code is present, we only map as read only and save the
1882 original mapping */
1883 VirtPageDesc *vp;
1884
1885 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1886 vp->phys_addr = pd;
1887 vp->prot = prot;
1888 vp->valid_tag = virt_valid_tag;
1889 prot &= ~PAGE_WRITE;
1890 }
1891 }
1892 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1893 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1894 if (map_addr == MAP_FAILED) {
1895 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1896 paddr, vaddr);
1897 }
1898 }
1899 }
1900 }
1901 #endif
1902 return ret;
1903 }
1904
1905 /* called from signal handler: invalidate the code and unprotect the
1906 page. Return TRUE if the fault was succesfully handled. */
1907 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1908 {
1909 #if !defined(CONFIG_SOFTMMU)
1910 VirtPageDesc *vp;
1911
1912 #if defined(DEBUG_TLB)
1913 printf("page_unprotect: addr=0x%08x\n", addr);
1914 #endif
1915 addr &= TARGET_PAGE_MASK;
1916
1917 /* if it is not mapped, no need to worry here */
1918 if (addr >= MMAP_AREA_END)
1919 return 0;
1920 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1921 if (!vp)
1922 return 0;
1923 /* NOTE: in this case, validate_tag is _not_ tested as it
1924 validates only the code TLB */
1925 if (vp->valid_tag != virt_valid_tag)
1926 return 0;
1927 if (!(vp->prot & PAGE_WRITE))
1928 return 0;
1929 #if defined(DEBUG_TLB)
1930 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1931 addr, vp->phys_addr, vp->prot);
1932 #endif
1933 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1934 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1935 (unsigned long)addr, vp->prot);
1936 /* set the dirty bit */
1937 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1938 /* flush the code inside */
1939 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1940 return 1;
1941 #else
1942 return 0;
1943 #endif
1944 }
1945
1946 #else
1947
1948 void tlb_flush(CPUState *env, int flush_global)
1949 {
1950 }
1951
1952 void tlb_flush_page(CPUState *env, target_ulong addr)
1953 {
1954 }
1955
1956 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1957 target_phys_addr_t paddr, int prot,
1958 int mmu_idx, int is_softmmu)
1959 {
1960 return 0;
1961 }
1962
1963 /* dump memory mappings */
1964 void page_dump(FILE *f)
1965 {
1966 unsigned long start, end;
1967 int i, j, prot, prot1;
1968 PageDesc *p;
1969
1970 fprintf(f, "%-8s %-8s %-8s %s\n",
1971 "start", "end", "size", "prot");
1972 start = -1;
1973 end = -1;
1974 prot = 0;
1975 for(i = 0; i <= L1_SIZE; i++) {
1976 if (i < L1_SIZE)
1977 p = l1_map[i];
1978 else
1979 p = NULL;
1980 for(j = 0;j < L2_SIZE; j++) {
1981 if (!p)
1982 prot1 = 0;
1983 else
1984 prot1 = p[j].flags;
1985 if (prot1 != prot) {
1986 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1987 if (start != -1) {
1988 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1989 start, end, end - start,
1990 prot & PAGE_READ ? 'r' : '-',
1991 prot & PAGE_WRITE ? 'w' : '-',
1992 prot & PAGE_EXEC ? 'x' : '-');
1993 }
1994 if (prot1 != 0)
1995 start = end;
1996 else
1997 start = -1;
1998 prot = prot1;
1999 }
2000 if (!p)
2001 break;
2002 }
2003 }
2004 }
2005
2006 int page_get_flags(target_ulong address)
2007 {
2008 PageDesc *p;
2009
2010 p = page_find(address >> TARGET_PAGE_BITS);
2011 if (!p)
2012 return 0;
2013 return p->flags;
2014 }
2015
2016 /* modify the flags of a page and invalidate the code if
2017 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2018 depending on PAGE_WRITE */
2019 void page_set_flags(target_ulong start, target_ulong end, int flags)
2020 {
2021 PageDesc *p;
2022 target_ulong addr;
2023
2024 /* mmap_lock should already be held. */
2025 start = start & TARGET_PAGE_MASK;
2026 end = TARGET_PAGE_ALIGN(end);
2027 if (flags & PAGE_WRITE)
2028 flags |= PAGE_WRITE_ORG;
2029 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2030 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2031 /* if the write protection is set, then we invalidate the code
2032 inside */
2033 if (!(p->flags & PAGE_WRITE) &&
2034 (flags & PAGE_WRITE) &&
2035 p->first_tb) {
2036 tb_invalidate_phys_page(addr, 0, NULL);
2037 }
2038 p->flags = flags;
2039 }
2040 }
2041
2042 int page_check_range(target_ulong start, target_ulong len, int flags)
2043 {
2044 PageDesc *p;
2045 target_ulong end;
2046 target_ulong addr;
2047
2048 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2049 start = start & TARGET_PAGE_MASK;
2050
2051 if( end < start )
2052 /* we've wrapped around */
2053 return -1;
2054 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2055 p = page_find(addr >> TARGET_PAGE_BITS);
2056 if( !p )
2057 return -1;
2058 if( !(p->flags & PAGE_VALID) )
2059 return -1;
2060
2061 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2062 return -1;
2063 if (flags & PAGE_WRITE) {
2064 if (!(p->flags & PAGE_WRITE_ORG))
2065 return -1;
2066 /* unprotect the page if it was put read-only because it
2067 contains translated code */
2068 if (!(p->flags & PAGE_WRITE)) {
2069 if (!page_unprotect(addr, 0, NULL))
2070 return -1;
2071 }
2072 return 0;
2073 }
2074 }
2075 return 0;
2076 }
2077
2078 /* called from signal handler: invalidate the code and unprotect the
2079 page. Return TRUE if the fault was succesfully handled. */
2080 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2081 {
2082 unsigned int page_index, prot, pindex;
2083 PageDesc *p, *p1;
2084 target_ulong host_start, host_end, addr;
2085
2086 /* Technically this isn't safe inside a signal handler. However we
2087 know this only ever happens in a synchronous SEGV handler, so in
2088 practice it seems to be ok. */
2089 mmap_lock();
2090
2091 host_start = address & qemu_host_page_mask;
2092 page_index = host_start >> TARGET_PAGE_BITS;
2093 p1 = page_find(page_index);
2094 if (!p1) {
2095 mmap_unlock();
2096 return 0;
2097 }
2098 host_end = host_start + qemu_host_page_size;
2099 p = p1;
2100 prot = 0;
2101 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2102 prot |= p->flags;
2103 p++;
2104 }
2105 /* if the page was really writable, then we change its
2106 protection back to writable */
2107 if (prot & PAGE_WRITE_ORG) {
2108 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2109 if (!(p1[pindex].flags & PAGE_WRITE)) {
2110 mprotect((void *)g2h(host_start), qemu_host_page_size,
2111 (prot & PAGE_BITS) | PAGE_WRITE);
2112 p1[pindex].flags |= PAGE_WRITE;
2113 /* and since the content will be modified, we must invalidate
2114 the corresponding translated code. */
2115 tb_invalidate_phys_page(address, pc, puc);
2116 #ifdef DEBUG_TB_CHECK
2117 tb_invalidate_check(address);
2118 #endif
2119 mmap_unlock();
2120 return 1;
2121 }
2122 }
2123 mmap_unlock();
2124 return 0;
2125 }
2126
2127 static inline void tlb_set_dirty(CPUState *env,
2128 unsigned long addr, target_ulong vaddr)
2129 {
2130 }
2131 #endif /* defined(CONFIG_USER_ONLY) */
2132
2133 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2134 ram_addr_t memory);
2135 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2136 ram_addr_t orig_memory);
2137 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2138 need_subpage) \
2139 do { \
2140 if (addr > start_addr) \
2141 start_addr2 = 0; \
2142 else { \
2143 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2144 if (start_addr2 > 0) \
2145 need_subpage = 1; \
2146 } \
2147 \
2148 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2149 end_addr2 = TARGET_PAGE_SIZE - 1; \
2150 else { \
2151 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2152 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2153 need_subpage = 1; \
2154 } \
2155 } while (0)
2156
2157 /* register physical memory. 'size' must be a multiple of the target
2158 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2159 io memory page */
2160 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2161 ram_addr_t size,
2162 ram_addr_t phys_offset)
2163 {
2164 target_phys_addr_t addr, end_addr;
2165 PhysPageDesc *p;
2166 CPUState *env;
2167 ram_addr_t orig_size = size;
2168 void *subpage;
2169
2170 #ifdef USE_KQEMU
2171 /* XXX: should not depend on cpu context */
2172 env = first_cpu;
2173 if (env->kqemu_enabled) {
2174 kqemu_set_phys_mem(start_addr, size, phys_offset);
2175 }
2176 #endif
2177 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2178 end_addr = start_addr + (target_phys_addr_t)size;
2179 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2180 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2181 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2182 ram_addr_t orig_memory = p->phys_offset;
2183 target_phys_addr_t start_addr2, end_addr2;
2184 int need_subpage = 0;
2185
2186 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2187 need_subpage);
2188 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2189 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2190 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2191 &p->phys_offset, orig_memory);
2192 } else {
2193 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2194 >> IO_MEM_SHIFT];
2195 }
2196 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2197 } else {
2198 p->phys_offset = phys_offset;
2199 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2200 (phys_offset & IO_MEM_ROMD))
2201 phys_offset += TARGET_PAGE_SIZE;
2202 }
2203 } else {
2204 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2205 p->phys_offset = phys_offset;
2206 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2207 (phys_offset & IO_MEM_ROMD))
2208 phys_offset += TARGET_PAGE_SIZE;
2209 else {
2210 target_phys_addr_t start_addr2, end_addr2;
2211 int need_subpage = 0;
2212
2213 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2214 end_addr2, need_subpage);
2215
2216 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2217 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2218 &p->phys_offset, IO_MEM_UNASSIGNED);
2219 subpage_register(subpage, start_addr2, end_addr2,
2220 phys_offset);
2221 }
2222 }
2223 }
2224 }
2225
2226 /* since each CPU stores ram addresses in its TLB cache, we must
2227 reset the modified entries */
2228 /* XXX: slow ! */
2229 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2230 tlb_flush(env, 1);
2231 }
2232 }
2233
2234 /* XXX: temporary until new memory mapping API */
2235 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2236 {
2237 PhysPageDesc *p;
2238
2239 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2240 if (!p)
2241 return IO_MEM_UNASSIGNED;
2242 return p->phys_offset;
2243 }
2244
2245 /* XXX: better than nothing */
2246 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2247 {
2248 ram_addr_t addr;
2249 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2250 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2251 (uint64_t)size, (uint64_t)phys_ram_size);
2252 abort();
2253 }
2254 addr = phys_ram_alloc_offset;
2255 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2256 return addr;
2257 }
2258
2259 void qemu_ram_free(ram_addr_t addr)
2260 {
2261 }
2262
2263 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2264 {
2265 #ifdef DEBUG_UNASSIGNED
2266 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2267 #endif
2268 #ifdef TARGET_SPARC
2269 do_unassigned_access(addr, 0, 0, 0);
2270 #elif TARGET_CRIS
2271 do_unassigned_access(addr, 0, 0, 0);
2272 #endif
2273 return 0;
2274 }
2275
2276 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2277 {
2278 #ifdef DEBUG_UNASSIGNED
2279 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2280 #endif
2281 #ifdef TARGET_SPARC
2282 do_unassigned_access(addr, 1, 0, 0);
2283 #elif TARGET_CRIS
2284 do_unassigned_access(addr, 1, 0, 0);
2285 #endif
2286 }
2287
2288 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2289 unassigned_mem_readb,
2290 unassigned_mem_readb,
2291 unassigned_mem_readb,
2292 };
2293
2294 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2295 unassigned_mem_writeb,
2296 unassigned_mem_writeb,
2297 unassigned_mem_writeb,
2298 };
2299
2300 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2301 {
2302 unsigned long ram_addr;
2303 int dirty_flags;
2304 ram_addr = addr - (unsigned long)phys_ram_base;
2305 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2306 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2307 #if !defined(CONFIG_USER_ONLY)
2308 tb_invalidate_phys_page_fast(ram_addr, 1);
2309 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2310 #endif
2311 }
2312 stb_p((uint8_t *)(long)addr, val);
2313 #ifdef USE_KQEMU
2314 if (cpu_single_env->kqemu_enabled &&
2315 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2316 kqemu_modify_page(cpu_single_env, ram_addr);
2317 #endif
2318 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2319 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2320 /* we remove the notdirty callback only if the code has been
2321 flushed */
2322 if (dirty_flags == 0xff)
2323 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2324 }
2325
2326 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2327 {
2328 unsigned long ram_addr;
2329 int dirty_flags;
2330 ram_addr = addr - (unsigned long)phys_ram_base;
2331 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2332 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2333 #if !defined(CONFIG_USER_ONLY)
2334 tb_invalidate_phys_page_fast(ram_addr, 2);
2335 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2336 #endif
2337 }
2338 stw_p((uint8_t *)(long)addr, val);
2339 #ifdef USE_KQEMU
2340 if (cpu_single_env->kqemu_enabled &&
2341 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2342 kqemu_modify_page(cpu_single_env, ram_addr);
2343 #endif
2344 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2345 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2346 /* we remove the notdirty callback only if the code has been
2347 flushed */
2348 if (dirty_flags == 0xff)
2349 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2350 }
2351
2352 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2353 {
2354 unsigned long ram_addr;
2355 int dirty_flags;
2356 ram_addr = addr - (unsigned long)phys_ram_base;
2357 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2358 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2359 #if !defined(CONFIG_USER_ONLY)
2360 tb_invalidate_phys_page_fast(ram_addr, 4);
2361 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2362 #endif
2363 }
2364 stl_p((uint8_t *)(long)addr, val);
2365 #ifdef USE_KQEMU
2366 if (cpu_single_env->kqemu_enabled &&
2367 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2368 kqemu_modify_page(cpu_single_env, ram_addr);
2369 #endif
2370 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2371 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2372 /* we remove the notdirty callback only if the code has been
2373 flushed */
2374 if (dirty_flags == 0xff)
2375 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2376 }
2377
2378 static CPUReadMemoryFunc *error_mem_read[3] = {
2379 NULL, /* never used */
2380 NULL, /* never used */
2381 NULL, /* never used */
2382 };
2383
2384 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2385 notdirty_mem_writeb,
2386 notdirty_mem_writew,
2387 notdirty_mem_writel,
2388 };
2389
2390 #if defined(CONFIG_SOFTMMU)
2391 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2392 so these check for a hit then pass through to the normal out-of-line
2393 phys routines. */
2394 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2395 {
2396 return ldub_phys(addr);
2397 }
2398
2399 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2400 {
2401 return lduw_phys(addr);
2402 }
2403
2404 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2405 {
2406 return ldl_phys(addr);
2407 }
2408
2409 /* Generate a debug exception if a watchpoint has been hit.
2410 Returns the real physical address of the access. addr will be a host
2411 address in case of a RAM location. */
2412 static target_ulong check_watchpoint(target_phys_addr_t addr)
2413 {
2414 CPUState *env = cpu_single_env;
2415 target_ulong watch;
2416 target_ulong retaddr;
2417 int i;
2418
2419 retaddr = addr;
2420 for (i = 0; i < env->nb_watchpoints; i++) {
2421 watch = env->watchpoint[i].vaddr;
2422 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2423 retaddr = addr - env->watchpoint[i].addend;
2424 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2425 cpu_single_env->watchpoint_hit = i + 1;
2426 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2427 break;
2428 }
2429 }
2430 }
2431 return retaddr;
2432 }
2433
2434 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2435 uint32_t val)
2436 {
2437 addr = check_watchpoint(addr);
2438 stb_phys(addr, val);
2439 }
2440
2441 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2442 uint32_t val)
2443 {
2444 addr = check_watchpoint(addr);
2445 stw_phys(addr, val);
2446 }
2447
2448 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2449 uint32_t val)
2450 {
2451 addr = check_watchpoint(addr);
2452 stl_phys(addr, val);
2453 }
2454
2455 static CPUReadMemoryFunc *watch_mem_read[3] = {
2456 watch_mem_readb,
2457 watch_mem_readw,
2458 watch_mem_readl,
2459 };
2460
2461 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2462 watch_mem_writeb,
2463 watch_mem_writew,
2464 watch_mem_writel,
2465 };
2466 #endif
2467
2468 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2469 unsigned int len)
2470 {
2471 uint32_t ret;
2472 unsigned int idx;
2473
2474 idx = SUBPAGE_IDX(addr - mmio->base);
2475 #if defined(DEBUG_SUBPAGE)
2476 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2477 mmio, len, addr, idx);
2478 #endif
2479 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2480
2481 return ret;
2482 }
2483
2484 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2485 uint32_t value, unsigned int len)
2486 {
2487 unsigned int idx;
2488
2489 idx = SUBPAGE_IDX(addr - mmio->base);
2490 #if defined(DEBUG_SUBPAGE)
2491 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2492 mmio, len, addr, idx, value);
2493 #endif
2494 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2495 }
2496
2497 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2498 {
2499 #if defined(DEBUG_SUBPAGE)
2500 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2501 #endif
2502
2503 return subpage_readlen(opaque, addr, 0);
2504 }
2505
2506 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2507 uint32_t value)
2508 {
2509 #if defined(DEBUG_SUBPAGE)
2510 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2511 #endif
2512 subpage_writelen(opaque, addr, value, 0);
2513 }
2514
2515 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2516 {
2517 #if defined(DEBUG_SUBPAGE)
2518 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2519 #endif
2520
2521 return subpage_readlen(opaque, addr, 1);
2522 }
2523
2524 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2525 uint32_t value)
2526 {
2527 #if defined(DEBUG_SUBPAGE)
2528 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2529 #endif
2530 subpage_writelen(opaque, addr, value, 1);
2531 }
2532
2533 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2534 {
2535 #if defined(DEBUG_SUBPAGE)
2536 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2537 #endif
2538
2539 return subpage_readlen(opaque, addr, 2);
2540 }
2541
2542 static void subpage_writel (void *opaque,
2543 target_phys_addr_t addr, uint32_t value)
2544 {
2545 #if defined(DEBUG_SUBPAGE)
2546 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2547 #endif
2548 subpage_writelen(opaque, addr, value, 2);
2549 }
2550
2551 static CPUReadMemoryFunc *subpage_read[] = {
2552 &subpage_readb,
2553 &subpage_readw,
2554 &subpage_readl,
2555 };
2556
2557 static CPUWriteMemoryFunc *subpage_write[] = {
2558 &subpage_writeb,
2559 &subpage_writew,
2560 &subpage_writel,
2561 };
2562
2563 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2564 ram_addr_t memory)
2565 {
2566 int idx, eidx;
2567 unsigned int i;
2568
2569 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2570 return -1;
2571 idx = SUBPAGE_IDX(start);
2572 eidx = SUBPAGE_IDX(end);
2573 #if defined(DEBUG_SUBPAGE)
2574 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2575 mmio, start, end, idx, eidx, memory);
2576 #endif
2577 memory >>= IO_MEM_SHIFT;
2578 for (; idx <= eidx; idx++) {
2579 for (i = 0; i < 4; i++) {
2580 if (io_mem_read[memory][i]) {
2581 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2582 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2583 }
2584 if (io_mem_write[memory][i]) {
2585 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2586 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2587 }
2588 }
2589 }
2590
2591 return 0;
2592 }
2593
2594 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2595 ram_addr_t orig_memory)
2596 {
2597 subpage_t *mmio;
2598 int subpage_memory;
2599
2600 mmio = qemu_mallocz(sizeof(subpage_t));
2601 if (mmio != NULL) {
2602 mmio->base = base;
2603 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2604 #if defined(DEBUG_SUBPAGE)
2605 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2606 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2607 #endif
2608 *phys = subpage_memory | IO_MEM_SUBPAGE;
2609 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2610 }
2611
2612 return mmio;
2613 }
2614
2615 static void io_mem_init(void)
2616 {
2617 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2618 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2619 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2620 io_mem_nb = 5;
2621
2622 #if defined(CONFIG_SOFTMMU)
2623 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2624 watch_mem_write, NULL);
2625 #endif
2626 /* alloc dirty bits array */
2627 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2628 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2629 }
2630
2631 /* mem_read and mem_write are arrays of functions containing the
2632 function to access byte (index 0), word (index 1) and dword (index
2633 2). Functions can be omitted with a NULL function pointer. The
2634 registered functions may be modified dynamically later.
2635 If io_index is non zero, the corresponding io zone is
2636 modified. If it is zero, a new io zone is allocated. The return
2637 value can be used with cpu_register_physical_memory(). (-1) is
2638 returned if error. */
2639 int cpu_register_io_memory(int io_index,
2640 CPUReadMemoryFunc **mem_read,
2641 CPUWriteMemoryFunc **mem_write,
2642 void *opaque)
2643 {
2644 int i, subwidth = 0;
2645
2646 if (io_index <= 0) {
2647 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2648 return -1;
2649 io_index = io_mem_nb++;
2650 } else {
2651 if (io_index >= IO_MEM_NB_ENTRIES)
2652 return -1;
2653 }
2654
2655 for(i = 0;i < 3; i++) {
2656 if (!mem_read[i] || !mem_write[i])
2657 subwidth = IO_MEM_SUBWIDTH;
2658 io_mem_read[io_index][i] = mem_read[i];
2659 io_mem_write[io_index][i] = mem_write[i];
2660 }
2661 io_mem_opaque[io_index] = opaque;
2662 return (io_index << IO_MEM_SHIFT) | subwidth;
2663 }
2664
2665 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2666 {
2667 return io_mem_write[io_index >> IO_MEM_SHIFT];
2668 }
2669
2670 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2671 {
2672 return io_mem_read[io_index >> IO_MEM_SHIFT];
2673 }
2674
2675 /* physical memory access (slow version, mainly for debug) */
2676 #if defined(CONFIG_USER_ONLY)
2677 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2678 int len, int is_write)
2679 {
2680 int l, flags;
2681 target_ulong page;
2682 void * p;
2683
2684 while (len > 0) {
2685 page = addr & TARGET_PAGE_MASK;
2686 l = (page + TARGET_PAGE_SIZE) - addr;
2687 if (l > len)
2688 l = len;
2689 flags = page_get_flags(page);
2690 if (!(flags & PAGE_VALID))
2691 return;
2692 if (is_write) {
2693 if (!(flags & PAGE_WRITE))
2694 return;
2695 /* XXX: this code should not depend on lock_user */
2696 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2697 /* FIXME - should this return an error rather than just fail? */
2698 return;
2699 memcpy(p, buf, l);
2700 unlock_user(p, addr, l);
2701 } else {
2702 if (!(flags & PAGE_READ))
2703 return;
2704 /* XXX: this code should not depend on lock_user */
2705 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2706 /* FIXME - should this return an error rather than just fail? */
2707 return;
2708 memcpy(buf, p, l);
2709 unlock_user(p, addr, 0);
2710 }
2711 len -= l;
2712 buf += l;
2713 addr += l;
2714 }
2715 }
2716
2717 #else
2718 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2719 int len, int is_write)
2720 {
2721 int l, io_index;
2722 uint8_t *ptr;
2723 uint32_t val;
2724 target_phys_addr_t page;
2725 unsigned long pd;
2726 PhysPageDesc *p;
2727
2728 while (len > 0) {
2729 page = addr & TARGET_PAGE_MASK;
2730 l = (page + TARGET_PAGE_SIZE) - addr;
2731 if (l > len)
2732 l = len;
2733 p = phys_page_find(page >> TARGET_PAGE_BITS);
2734 if (!p) {
2735 pd = IO_MEM_UNASSIGNED;
2736 } else {
2737 pd = p->phys_offset;
2738 }
2739
2740 if (is_write) {
2741 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2742 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2743 /* XXX: could force cpu_single_env to NULL to avoid
2744 potential bugs */
2745 if (l >= 4 && ((addr & 3) == 0)) {
2746 /* 32 bit write access */
2747 val = ldl_p(buf);
2748 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2749 l = 4;
2750 } else if (l >= 2 && ((addr & 1) == 0)) {
2751 /* 16 bit write access */
2752 val = lduw_p(buf);
2753 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2754 l = 2;
2755 } else {
2756 /* 8 bit write access */
2757 val = ldub_p(buf);
2758 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2759 l = 1;
2760 }
2761 } else {
2762 unsigned long addr1;
2763 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2764 /* RAM case */
2765 ptr = phys_ram_base + addr1;
2766 memcpy(ptr, buf, l);
2767 if (!cpu_physical_memory_is_dirty(addr1)) {
2768 /* invalidate code */
2769 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2770 /* set dirty bit */
2771 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2772 (0xff & ~CODE_DIRTY_FLAG);
2773 }
2774 }
2775 } else {
2776 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2777 !(pd & IO_MEM_ROMD)) {
2778 /* I/O case */
2779 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2780 if (l >= 4 && ((addr & 3) == 0)) {
2781 /* 32 bit read access */
2782 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2783 stl_p(buf, val);
2784 l = 4;
2785 } else if (l >= 2 && ((addr & 1) == 0)) {
2786 /* 16 bit read access */
2787 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2788 stw_p(buf, val);
2789 l = 2;
2790 } else {
2791 /* 8 bit read access */
2792 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2793 stb_p(buf, val);
2794 l = 1;
2795 }
2796 } else {
2797 /* RAM case */
2798 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2799 (addr & ~TARGET_PAGE_MASK);
2800 memcpy(buf, ptr, l);
2801 }
2802 }
2803 len -= l;
2804 buf += l;
2805 addr += l;
2806 }
2807 }
2808
2809 /* used for ROM loading : can write in RAM and ROM */
2810 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2811 const uint8_t *buf, int len)
2812 {
2813 int l;
2814 uint8_t *ptr;
2815 target_phys_addr_t page;
2816 unsigned long pd;
2817 PhysPageDesc *p;
2818
2819 while (len > 0) {
2820 page = addr & TARGET_PAGE_MASK;
2821 l = (page + TARGET_PAGE_SIZE) - addr;
2822 if (l > len)
2823 l = len;
2824 p = phys_page_find(page >> TARGET_PAGE_BITS);
2825 if (!p) {
2826 pd = IO_MEM_UNASSIGNED;
2827 } else {
2828 pd = p->phys_offset;
2829 }
2830
2831 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2832 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2833 !(pd & IO_MEM_ROMD)) {
2834 /* do nothing */
2835 } else {
2836 unsigned long addr1;
2837 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2838 /* ROM/RAM case */
2839 ptr = phys_ram_base + addr1;
2840 memcpy(ptr, buf, l);
2841 }
2842 len -= l;
2843 buf += l;
2844 addr += l;
2845 }
2846 }
2847
2848
2849 /* warning: addr must be aligned */
2850 uint32_t ldl_phys(target_phys_addr_t addr)
2851 {
2852 int io_index;
2853 uint8_t *ptr;
2854 uint32_t val;
2855 unsigned long pd;
2856 PhysPageDesc *p;
2857
2858 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2859 if (!p) {
2860 pd = IO_MEM_UNASSIGNED;
2861 } else {
2862 pd = p->phys_offset;
2863 }
2864
2865 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2866 !(pd & IO_MEM_ROMD)) {
2867 /* I/O case */
2868 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2869 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2870 } else {
2871 /* RAM case */
2872 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2873 (addr & ~TARGET_PAGE_MASK);
2874 val = ldl_p(ptr);
2875 }
2876 return val;
2877 }
2878
2879 /* warning: addr must be aligned */
2880 uint64_t ldq_phys(target_phys_addr_t addr)
2881 {
2882 int io_index;
2883 uint8_t *ptr;
2884 uint64_t val;
2885 unsigned long pd;
2886 PhysPageDesc *p;
2887
2888 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2889 if (!p) {
2890 pd = IO_MEM_UNASSIGNED;
2891 } else {
2892 pd = p->phys_offset;
2893 }
2894
2895 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2896 !(pd & IO_MEM_ROMD)) {
2897 /* I/O case */
2898 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2899 #ifdef TARGET_WORDS_BIGENDIAN
2900 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2901 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2902 #else
2903 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2904 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2905 #endif
2906 } else {
2907 /* RAM case */
2908 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2909 (addr & ~TARGET_PAGE_MASK);
2910 val = ldq_p(ptr);
2911 }
2912 return val;
2913 }
2914
2915 /* XXX: optimize */
2916 uint32_t ldub_phys(target_phys_addr_t addr)
2917 {
2918 uint8_t val;
2919 cpu_physical_memory_read(addr, &val, 1);
2920 return val;
2921 }
2922
2923 /* XXX: optimize */
2924 uint32_t lduw_phys(target_phys_addr_t addr)
2925 {
2926 uint16_t val;
2927 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2928 return tswap16(val);
2929 }
2930
2931 /* warning: addr must be aligned. The ram page is not masked as dirty
2932 and the code inside is not invalidated. It is useful if the dirty
2933 bits are used to track modified PTEs */
2934 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2935 {
2936 int io_index;
2937 uint8_t *ptr;
2938 unsigned long pd;
2939 PhysPageDesc *p;
2940
2941 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2942 if (!p) {
2943 pd = IO_MEM_UNASSIGNED;
2944 } else {
2945 pd = p->phys_offset;
2946 }
2947
2948 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2949 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2950 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2951 } else {
2952 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2953 (addr & ~TARGET_PAGE_MASK);
2954 stl_p(ptr, val);
2955 }
2956 }
2957
2958 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2959 {
2960 int io_index;
2961 uint8_t *ptr;
2962 unsigned long pd;
2963 PhysPageDesc *p;
2964
2965 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2966 if (!p) {
2967 pd = IO_MEM_UNASSIGNED;
2968 } else {
2969 pd = p->phys_offset;
2970 }
2971
2972 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2973 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2974 #ifdef TARGET_WORDS_BIGENDIAN
2975 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2976 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2977 #else
2978 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2979 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2980 #endif
2981 } else {
2982 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2983 (addr & ~TARGET_PAGE_MASK);
2984 stq_p(ptr, val);
2985 }
2986 }
2987
2988 /* warning: addr must be aligned */
2989 void stl_phys(target_phys_addr_t addr, uint32_t val)
2990 {
2991 int io_index;
2992 uint8_t *ptr;
2993 unsigned long pd;
2994 PhysPageDesc *p;
2995
2996 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2997 if (!p) {
2998 pd = IO_MEM_UNASSIGNED;
2999 } else {
3000 pd = p->phys_offset;
3001 }
3002
3003 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3004 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3005 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3006 } else {
3007 unsigned long addr1;
3008 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3009 /* RAM case */
3010 ptr = phys_ram_base + addr1;
3011 stl_p(ptr, val);
3012 if (!cpu_physical_memory_is_dirty(addr1)) {
3013 /* invalidate code */
3014 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3015 /* set dirty bit */
3016 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3017 (0xff & ~CODE_DIRTY_FLAG);
3018 }
3019 }
3020 }
3021
3022 /* XXX: optimize */
3023 void stb_phys(target_phys_addr_t addr, uint32_t val)
3024 {
3025 uint8_t v = val;
3026 cpu_physical_memory_write(addr, &v, 1);
3027 }
3028
3029 /* XXX: optimize */
3030 void stw_phys(target_phys_addr_t addr, uint32_t val)
3031 {
3032 uint16_t v = tswap16(val);
3033 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3034 }
3035
3036 /* XXX: optimize */
3037 void stq_phys(target_phys_addr_t addr, uint64_t val)
3038 {
3039 val = tswap64(val);
3040 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3041 }
3042
3043 #endif
3044
3045 /* virtual memory access for debug */
3046 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3047 uint8_t *buf, int len, int is_write)
3048 {
3049 int l;
3050 target_phys_addr_t phys_addr;
3051 target_ulong page;
3052
3053 while (len > 0) {
3054 page = addr & TARGET_PAGE_MASK;
3055 phys_addr = cpu_get_phys_page_debug(env, page);
3056 /* if no physical page mapped, return an error */
3057 if (phys_addr == -1)
3058 return -1;
3059 l = (page + TARGET_PAGE_SIZE) - addr;
3060 if (l > len)
3061 l = len;
3062 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3063 buf, l, is_write);
3064 len -= l;
3065 buf += l;
3066 addr += l;
3067 }
3068 return 0;
3069 }
3070
3071 void dump_exec_info(FILE *f,
3072 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3073 {
3074 int i, target_code_size, max_target_code_size;
3075 int direct_jmp_count, direct_jmp2_count, cross_page;
3076 TranslationBlock *tb;
3077
3078 target_code_size = 0;
3079 max_target_code_size = 0;
3080 cross_page = 0;
3081 direct_jmp_count = 0;
3082 direct_jmp2_count = 0;
3083 for(i = 0; i < nb_tbs; i++) {
3084 tb = &tbs[i];
3085 target_code_size += tb->size;
3086 if (tb->size > max_target_code_size)
3087 max_target_code_size = tb->size;
3088 if (tb->page_addr[1] != -1)
3089 cross_page++;
3090 if (tb->tb_next_offset[0] != 0xffff) {
3091 direct_jmp_count++;
3092 if (tb->tb_next_offset[1] != 0xffff) {
3093 direct_jmp2_count++;
3094 }
3095 }
3096 }
3097 /* XXX: avoid using doubles ? */
3098 cpu_fprintf(f, "Translation buffer state:\n");
3099 cpu_fprintf(f, "gen code size %ld/%ld\n",
3100 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3101 cpu_fprintf(f, "TB count %d/%d\n",
3102 nb_tbs, code_gen_max_blocks);
3103 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3104 nb_tbs ? target_code_size / nb_tbs : 0,
3105 max_target_code_size);
3106 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3107 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3108 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3109 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3110 cross_page,
3111 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3112 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3113 direct_jmp_count,
3114 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3115 direct_jmp2_count,
3116 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3117 cpu_fprintf(f, "\nStatistics:\n");
3118 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3119 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3120 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3121 tcg_dump_info(f, cpu_fprintf);
3122 }
3123
3124 #if !defined(CONFIG_USER_ONLY)
3125
3126 #define MMUSUFFIX _cmmu
3127 #define GETPC() NULL
3128 #define env cpu_single_env
3129 #define SOFTMMU_CODE_ACCESS
3130
3131 #define SHIFT 0
3132 #include "softmmu_template.h"
3133
3134 #define SHIFT 1
3135 #include "softmmu_template.h"
3136
3137 #define SHIFT 2
3138 #include "softmmu_template.h"
3139
3140 #define SHIFT 3
3141 #include "softmmu_template.h"
3142
3143 #undef env
3144
3145 #endif