]> git.proxmox.com Git - mirror_qemu.git/blob - exec.c
Refactor translation block CPU state handling (Jan Kiszka)
[mirror_qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
35
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #include "hw/hw.h"
41 #include "osdep.h"
42 #include "kvm.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #endif
46
47 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_FLUSH
49 //#define DEBUG_TLB
50 //#define DEBUG_UNASSIGNED
51
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
55
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
58
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
61 #undef DEBUG_TB_CHECK
62 #endif
63
64 #define SMC_BITMAP_USE_THRESHOLD 10
65
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
68
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82 #else
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 #endif
86
87 static TranslationBlock *tbs;
88 int code_gen_max_blocks;
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90 static int nb_tbs;
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
93
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101 #else
102 #define code_gen_section \
103 __attribute__((aligned (32)))
104 #endif
105
106 uint8_t code_gen_prologue[1024] code_gen_section;
107 static uint8_t *code_gen_buffer;
108 static unsigned long code_gen_buffer_size;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size;
111 uint8_t *code_gen_ptr;
112
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size;
115 int phys_ram_fd;
116 uint8_t *phys_ram_base;
117 uint8_t *phys_ram_dirty;
118 static int in_migration;
119 static ram_addr_t phys_ram_alloc_offset = 0;
120 #endif
121
122 CPUState *first_cpu;
123 /* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
125 CPUState *cpu_single_env;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
129 int use_icount = 0;
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132 int64_t qemu_icount;
133
134 typedef struct PageDesc {
135 /* list of TBs intersecting this ram page */
136 TranslationBlock *first_tb;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141 #if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143 #endif
144 } PageDesc;
145
146 typedef struct PhysPageDesc {
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset;
149 } PhysPageDesc;
150
151 #define L2_BITS 10
152 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
153 /* XXX: this is a temporary hack for alpha target.
154 * In the future, this is to be replaced by a multi-level table
155 * to actually be able to handle the complete 64 bits address space.
156 */
157 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
158 #else
159 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
160 #endif
161
162 #define L1_SIZE (1 << L1_BITS)
163 #define L2_SIZE (1 << L2_BITS)
164
165 unsigned long qemu_real_host_page_size;
166 unsigned long qemu_host_page_bits;
167 unsigned long qemu_host_page_size;
168 unsigned long qemu_host_page_mask;
169
170 /* XXX: for system emulation, it could just be an array */
171 static PageDesc *l1_map[L1_SIZE];
172 static PhysPageDesc **l1_phys_map;
173
174 #if !defined(CONFIG_USER_ONLY)
175 static void io_mem_init(void);
176
177 /* io memory support */
178 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
179 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
180 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
181 static int io_mem_nb;
182 static int io_mem_watch;
183 #endif
184
185 /* log support */
186 static const char *logfilename = "/tmp/qemu.log";
187 FILE *logfile;
188 int loglevel;
189 static int log_append = 0;
190
191 /* statistics */
192 static int tlb_flush_count;
193 static int tb_flush_count;
194 static int tb_phys_invalidate_count;
195
196 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
197 typedef struct subpage_t {
198 target_phys_addr_t base;
199 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
200 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
201 void *opaque[TARGET_PAGE_SIZE][2][4];
202 } subpage_t;
203
204 #ifdef _WIN32
205 static void map_exec(void *addr, long size)
206 {
207 DWORD old_protect;
208 VirtualProtect(addr, size,
209 PAGE_EXECUTE_READWRITE, &old_protect);
210
211 }
212 #else
213 static void map_exec(void *addr, long size)
214 {
215 unsigned long start, end, page_size;
216
217 page_size = getpagesize();
218 start = (unsigned long)addr;
219 start &= ~(page_size - 1);
220
221 end = (unsigned long)addr + size;
222 end += page_size - 1;
223 end &= ~(page_size - 1);
224
225 mprotect((void *)start, end - start,
226 PROT_READ | PROT_WRITE | PROT_EXEC);
227 }
228 #endif
229
230 static void page_init(void)
231 {
232 /* NOTE: we can always suppose that qemu_host_page_size >=
233 TARGET_PAGE_SIZE */
234 #ifdef _WIN32
235 {
236 SYSTEM_INFO system_info;
237
238 GetSystemInfo(&system_info);
239 qemu_real_host_page_size = system_info.dwPageSize;
240 }
241 #else
242 qemu_real_host_page_size = getpagesize();
243 #endif
244 if (qemu_host_page_size == 0)
245 qemu_host_page_size = qemu_real_host_page_size;
246 if (qemu_host_page_size < TARGET_PAGE_SIZE)
247 qemu_host_page_size = TARGET_PAGE_SIZE;
248 qemu_host_page_bits = 0;
249 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
250 qemu_host_page_bits++;
251 qemu_host_page_mask = ~(qemu_host_page_size - 1);
252 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
253 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
254
255 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
256 {
257 long long startaddr, endaddr;
258 FILE *f;
259 int n;
260
261 mmap_lock();
262 last_brk = (unsigned long)sbrk(0);
263 f = fopen("/proc/self/maps", "r");
264 if (f) {
265 do {
266 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
267 if (n == 2) {
268 startaddr = MIN(startaddr,
269 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270 endaddr = MIN(endaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 page_set_flags(startaddr & TARGET_PAGE_MASK,
273 TARGET_PAGE_ALIGN(endaddr),
274 PAGE_RESERVED);
275 }
276 } while (!feof(f));
277 fclose(f);
278 }
279 mmap_unlock();
280 }
281 #endif
282 }
283
284 static inline PageDesc **page_l1_map(target_ulong index)
285 {
286 #if TARGET_LONG_BITS > 32
287 /* Host memory outside guest VM. For 32-bit targets we have already
288 excluded high addresses. */
289 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
290 return NULL;
291 #endif
292 return &l1_map[index >> L2_BITS];
293 }
294
295 static inline PageDesc *page_find_alloc(target_ulong index)
296 {
297 PageDesc **lp, *p;
298 lp = page_l1_map(index);
299 if (!lp)
300 return NULL;
301
302 p = *lp;
303 if (!p) {
304 /* allocate if not found */
305 #if defined(CONFIG_USER_ONLY)
306 unsigned long addr;
307 size_t len = sizeof(PageDesc) * L2_SIZE;
308 /* Don't use qemu_malloc because it may recurse. */
309 p = mmap(0, len, PROT_READ | PROT_WRITE,
310 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
311 *lp = p;
312 addr = h2g(p);
313 if (addr == (target_ulong)addr) {
314 page_set_flags(addr & TARGET_PAGE_MASK,
315 TARGET_PAGE_ALIGN(addr + len),
316 PAGE_RESERVED);
317 }
318 #else
319 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
320 *lp = p;
321 #endif
322 }
323 return p + (index & (L2_SIZE - 1));
324 }
325
326 static inline PageDesc *page_find(target_ulong index)
327 {
328 PageDesc **lp, *p;
329 lp = page_l1_map(index);
330 if (!lp)
331 return NULL;
332
333 p = *lp;
334 if (!p)
335 return 0;
336 return p + (index & (L2_SIZE - 1));
337 }
338
339 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
340 {
341 void **lp, **p;
342 PhysPageDesc *pd;
343
344 p = (void **)l1_phys_map;
345 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
346
347 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
348 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
349 #endif
350 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
351 p = *lp;
352 if (!p) {
353 /* allocate if not found */
354 if (!alloc)
355 return NULL;
356 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
357 memset(p, 0, sizeof(void *) * L1_SIZE);
358 *lp = p;
359 }
360 #endif
361 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
362 pd = *lp;
363 if (!pd) {
364 int i;
365 /* allocate if not found */
366 if (!alloc)
367 return NULL;
368 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
369 *lp = pd;
370 for (i = 0; i < L2_SIZE; i++)
371 pd[i].phys_offset = IO_MEM_UNASSIGNED;
372 }
373 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
374 }
375
376 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
377 {
378 return phys_page_find_alloc(index, 0);
379 }
380
381 #if !defined(CONFIG_USER_ONLY)
382 static void tlb_protect_code(ram_addr_t ram_addr);
383 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
384 target_ulong vaddr);
385 #define mmap_lock() do { } while(0)
386 #define mmap_unlock() do { } while(0)
387 #endif
388
389 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
390
391 #if defined(CONFIG_USER_ONLY)
392 /* Currently it is not recommanded to allocate big chunks of data in
393 user mode. It will change when a dedicated libc will be used */
394 #define USE_STATIC_CODE_GEN_BUFFER
395 #endif
396
397 #ifdef USE_STATIC_CODE_GEN_BUFFER
398 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
399 #endif
400
401 static void code_gen_alloc(unsigned long tb_size)
402 {
403 #ifdef USE_STATIC_CODE_GEN_BUFFER
404 code_gen_buffer = static_code_gen_buffer;
405 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
406 map_exec(code_gen_buffer, code_gen_buffer_size);
407 #else
408 code_gen_buffer_size = tb_size;
409 if (code_gen_buffer_size == 0) {
410 #if defined(CONFIG_USER_ONLY)
411 /* in user mode, phys_ram_size is not meaningful */
412 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
413 #else
414 /* XXX: needs ajustments */
415 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
416 #endif
417 }
418 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
419 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
420 /* The code gen buffer location may have constraints depending on
421 the host cpu and OS */
422 #if defined(__linux__)
423 {
424 int flags;
425 void *start = NULL;
426
427 flags = MAP_PRIVATE | MAP_ANONYMOUS;
428 #if defined(__x86_64__)
429 flags |= MAP_32BIT;
430 /* Cannot map more than that */
431 if (code_gen_buffer_size > (800 * 1024 * 1024))
432 code_gen_buffer_size = (800 * 1024 * 1024);
433 #elif defined(__sparc_v9__)
434 // Map the buffer below 2G, so we can use direct calls and branches
435 flags |= MAP_FIXED;
436 start = (void *) 0x60000000UL;
437 if (code_gen_buffer_size > (512 * 1024 * 1024))
438 code_gen_buffer_size = (512 * 1024 * 1024);
439 #endif
440 code_gen_buffer = mmap(start, code_gen_buffer_size,
441 PROT_WRITE | PROT_READ | PROT_EXEC,
442 flags, -1, 0);
443 if (code_gen_buffer == MAP_FAILED) {
444 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
445 exit(1);
446 }
447 }
448 #elif defined(__FreeBSD__)
449 {
450 int flags;
451 void *addr = NULL;
452 flags = MAP_PRIVATE | MAP_ANONYMOUS;
453 #if defined(__x86_64__)
454 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
455 * 0x40000000 is free */
456 flags |= MAP_FIXED;
457 addr = (void *)0x40000000;
458 /* Cannot map more than that */
459 if (code_gen_buffer_size > (800 * 1024 * 1024))
460 code_gen_buffer_size = (800 * 1024 * 1024);
461 #endif
462 code_gen_buffer = mmap(addr, code_gen_buffer_size,
463 PROT_WRITE | PROT_READ | PROT_EXEC,
464 flags, -1, 0);
465 if (code_gen_buffer == MAP_FAILED) {
466 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
467 exit(1);
468 }
469 }
470 #else
471 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
472 if (!code_gen_buffer) {
473 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
474 exit(1);
475 }
476 map_exec(code_gen_buffer, code_gen_buffer_size);
477 #endif
478 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
479 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
480 code_gen_buffer_max_size = code_gen_buffer_size -
481 code_gen_max_block_size();
482 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
483 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
484 }
485
486 /* Must be called before using the QEMU cpus. 'tb_size' is the size
487 (in bytes) allocated to the translation buffer. Zero means default
488 size. */
489 void cpu_exec_init_all(unsigned long tb_size)
490 {
491 cpu_gen_init();
492 code_gen_alloc(tb_size);
493 code_gen_ptr = code_gen_buffer;
494 page_init();
495 #if !defined(CONFIG_USER_ONLY)
496 io_mem_init();
497 #endif
498 }
499
500 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
501
502 #define CPU_COMMON_SAVE_VERSION 1
503
504 static void cpu_common_save(QEMUFile *f, void *opaque)
505 {
506 CPUState *env = opaque;
507
508 qemu_put_be32s(f, &env->halted);
509 qemu_put_be32s(f, &env->interrupt_request);
510 }
511
512 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
513 {
514 CPUState *env = opaque;
515
516 if (version_id != CPU_COMMON_SAVE_VERSION)
517 return -EINVAL;
518
519 qemu_get_be32s(f, &env->halted);
520 qemu_get_be32s(f, &env->interrupt_request);
521 tlb_flush(env, 1);
522
523 return 0;
524 }
525 #endif
526
527 void cpu_exec_init(CPUState *env)
528 {
529 CPUState **penv;
530 int cpu_index;
531
532 env->next_cpu = NULL;
533 penv = &first_cpu;
534 cpu_index = 0;
535 while (*penv != NULL) {
536 penv = (CPUState **)&(*penv)->next_cpu;
537 cpu_index++;
538 }
539 env->cpu_index = cpu_index;
540 env->nb_watchpoints = 0;
541 *penv = env;
542 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
543 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
544 cpu_common_save, cpu_common_load, env);
545 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
546 cpu_save, cpu_load, env);
547 #endif
548 }
549
550 static inline void invalidate_page_bitmap(PageDesc *p)
551 {
552 if (p->code_bitmap) {
553 qemu_free(p->code_bitmap);
554 p->code_bitmap = NULL;
555 }
556 p->code_write_count = 0;
557 }
558
559 /* set to NULL all the 'first_tb' fields in all PageDescs */
560 static void page_flush_tb(void)
561 {
562 int i, j;
563 PageDesc *p;
564
565 for(i = 0; i < L1_SIZE; i++) {
566 p = l1_map[i];
567 if (p) {
568 for(j = 0; j < L2_SIZE; j++) {
569 p->first_tb = NULL;
570 invalidate_page_bitmap(p);
571 p++;
572 }
573 }
574 }
575 }
576
577 /* flush all the translation blocks */
578 /* XXX: tb_flush is currently not thread safe */
579 void tb_flush(CPUState *env1)
580 {
581 CPUState *env;
582 #if defined(DEBUG_FLUSH)
583 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
584 (unsigned long)(code_gen_ptr - code_gen_buffer),
585 nb_tbs, nb_tbs > 0 ?
586 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
587 #endif
588 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
589 cpu_abort(env1, "Internal error: code buffer overflow\n");
590
591 nb_tbs = 0;
592
593 for(env = first_cpu; env != NULL; env = env->next_cpu) {
594 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
595 }
596
597 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
598 page_flush_tb();
599
600 code_gen_ptr = code_gen_buffer;
601 /* XXX: flush processor icache at this point if cache flush is
602 expensive */
603 tb_flush_count++;
604 }
605
606 #ifdef DEBUG_TB_CHECK
607
608 static void tb_invalidate_check(target_ulong address)
609 {
610 TranslationBlock *tb;
611 int i;
612 address &= TARGET_PAGE_MASK;
613 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
614 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
615 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
616 address >= tb->pc + tb->size)) {
617 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
618 address, (long)tb->pc, tb->size);
619 }
620 }
621 }
622 }
623
624 /* verify that all the pages have correct rights for code */
625 static void tb_page_check(void)
626 {
627 TranslationBlock *tb;
628 int i, flags1, flags2;
629
630 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
631 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
632 flags1 = page_get_flags(tb->pc);
633 flags2 = page_get_flags(tb->pc + tb->size - 1);
634 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
635 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
636 (long)tb->pc, tb->size, flags1, flags2);
637 }
638 }
639 }
640 }
641
642 static void tb_jmp_check(TranslationBlock *tb)
643 {
644 TranslationBlock *tb1;
645 unsigned int n1;
646
647 /* suppress any remaining jumps to this TB */
648 tb1 = tb->jmp_first;
649 for(;;) {
650 n1 = (long)tb1 & 3;
651 tb1 = (TranslationBlock *)((long)tb1 & ~3);
652 if (n1 == 2)
653 break;
654 tb1 = tb1->jmp_next[n1];
655 }
656 /* check end of list */
657 if (tb1 != tb) {
658 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
659 }
660 }
661
662 #endif
663
664 /* invalidate one TB */
665 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
666 int next_offset)
667 {
668 TranslationBlock *tb1;
669 for(;;) {
670 tb1 = *ptb;
671 if (tb1 == tb) {
672 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
673 break;
674 }
675 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
676 }
677 }
678
679 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
680 {
681 TranslationBlock *tb1;
682 unsigned int n1;
683
684 for(;;) {
685 tb1 = *ptb;
686 n1 = (long)tb1 & 3;
687 tb1 = (TranslationBlock *)((long)tb1 & ~3);
688 if (tb1 == tb) {
689 *ptb = tb1->page_next[n1];
690 break;
691 }
692 ptb = &tb1->page_next[n1];
693 }
694 }
695
696 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
697 {
698 TranslationBlock *tb1, **ptb;
699 unsigned int n1;
700
701 ptb = &tb->jmp_next[n];
702 tb1 = *ptb;
703 if (tb1) {
704 /* find tb(n) in circular list */
705 for(;;) {
706 tb1 = *ptb;
707 n1 = (long)tb1 & 3;
708 tb1 = (TranslationBlock *)((long)tb1 & ~3);
709 if (n1 == n && tb1 == tb)
710 break;
711 if (n1 == 2) {
712 ptb = &tb1->jmp_first;
713 } else {
714 ptb = &tb1->jmp_next[n1];
715 }
716 }
717 /* now we can suppress tb(n) from the list */
718 *ptb = tb->jmp_next[n];
719
720 tb->jmp_next[n] = NULL;
721 }
722 }
723
724 /* reset the jump entry 'n' of a TB so that it is not chained to
725 another TB */
726 static inline void tb_reset_jump(TranslationBlock *tb, int n)
727 {
728 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
729 }
730
731 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
732 {
733 CPUState *env;
734 PageDesc *p;
735 unsigned int h, n1;
736 target_phys_addr_t phys_pc;
737 TranslationBlock *tb1, *tb2;
738
739 /* remove the TB from the hash list */
740 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
741 h = tb_phys_hash_func(phys_pc);
742 tb_remove(&tb_phys_hash[h], tb,
743 offsetof(TranslationBlock, phys_hash_next));
744
745 /* remove the TB from the page list */
746 if (tb->page_addr[0] != page_addr) {
747 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
748 tb_page_remove(&p->first_tb, tb);
749 invalidate_page_bitmap(p);
750 }
751 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
752 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
753 tb_page_remove(&p->first_tb, tb);
754 invalidate_page_bitmap(p);
755 }
756
757 tb_invalidated_flag = 1;
758
759 /* remove the TB from the hash list */
760 h = tb_jmp_cache_hash_func(tb->pc);
761 for(env = first_cpu; env != NULL; env = env->next_cpu) {
762 if (env->tb_jmp_cache[h] == tb)
763 env->tb_jmp_cache[h] = NULL;
764 }
765
766 /* suppress this TB from the two jump lists */
767 tb_jmp_remove(tb, 0);
768 tb_jmp_remove(tb, 1);
769
770 /* suppress any remaining jumps to this TB */
771 tb1 = tb->jmp_first;
772 for(;;) {
773 n1 = (long)tb1 & 3;
774 if (n1 == 2)
775 break;
776 tb1 = (TranslationBlock *)((long)tb1 & ~3);
777 tb2 = tb1->jmp_next[n1];
778 tb_reset_jump(tb1, n1);
779 tb1->jmp_next[n1] = NULL;
780 tb1 = tb2;
781 }
782 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
783
784 tb_phys_invalidate_count++;
785 }
786
787 static inline void set_bits(uint8_t *tab, int start, int len)
788 {
789 int end, mask, end1;
790
791 end = start + len;
792 tab += start >> 3;
793 mask = 0xff << (start & 7);
794 if ((start & ~7) == (end & ~7)) {
795 if (start < end) {
796 mask &= ~(0xff << (end & 7));
797 *tab |= mask;
798 }
799 } else {
800 *tab++ |= mask;
801 start = (start + 8) & ~7;
802 end1 = end & ~7;
803 while (start < end1) {
804 *tab++ = 0xff;
805 start += 8;
806 }
807 if (start < end) {
808 mask = ~(0xff << (end & 7));
809 *tab |= mask;
810 }
811 }
812 }
813
814 static void build_page_bitmap(PageDesc *p)
815 {
816 int n, tb_start, tb_end;
817 TranslationBlock *tb;
818
819 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
820 if (!p->code_bitmap)
821 return;
822
823 tb = p->first_tb;
824 while (tb != NULL) {
825 n = (long)tb & 3;
826 tb = (TranslationBlock *)((long)tb & ~3);
827 /* NOTE: this is subtle as a TB may span two physical pages */
828 if (n == 0) {
829 /* NOTE: tb_end may be after the end of the page, but
830 it is not a problem */
831 tb_start = tb->pc & ~TARGET_PAGE_MASK;
832 tb_end = tb_start + tb->size;
833 if (tb_end > TARGET_PAGE_SIZE)
834 tb_end = TARGET_PAGE_SIZE;
835 } else {
836 tb_start = 0;
837 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
838 }
839 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
840 tb = tb->page_next[n];
841 }
842 }
843
844 TranslationBlock *tb_gen_code(CPUState *env,
845 target_ulong pc, target_ulong cs_base,
846 int flags, int cflags)
847 {
848 TranslationBlock *tb;
849 uint8_t *tc_ptr;
850 target_ulong phys_pc, phys_page2, virt_page2;
851 int code_gen_size;
852
853 phys_pc = get_phys_addr_code(env, pc);
854 tb = tb_alloc(pc);
855 if (!tb) {
856 /* flush must be done */
857 tb_flush(env);
858 /* cannot fail at this point */
859 tb = tb_alloc(pc);
860 /* Don't forget to invalidate previous TB info. */
861 tb_invalidated_flag = 1;
862 }
863 tc_ptr = code_gen_ptr;
864 tb->tc_ptr = tc_ptr;
865 tb->cs_base = cs_base;
866 tb->flags = flags;
867 tb->cflags = cflags;
868 cpu_gen_code(env, tb, &code_gen_size);
869 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
870
871 /* check next page if needed */
872 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
873 phys_page2 = -1;
874 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
875 phys_page2 = get_phys_addr_code(env, virt_page2);
876 }
877 tb_link_phys(tb, phys_pc, phys_page2);
878 return tb;
879 }
880
881 /* invalidate all TBs which intersect with the target physical page
882 starting in range [start;end[. NOTE: start and end must refer to
883 the same physical page. 'is_cpu_write_access' should be true if called
884 from a real cpu write access: the virtual CPU will exit the current
885 TB if code is modified inside this TB. */
886 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
887 int is_cpu_write_access)
888 {
889 TranslationBlock *tb, *tb_next, *saved_tb;
890 CPUState *env = cpu_single_env;
891 target_ulong tb_start, tb_end;
892 PageDesc *p;
893 int n;
894 #ifdef TARGET_HAS_PRECISE_SMC
895 int current_tb_not_found = is_cpu_write_access;
896 TranslationBlock *current_tb = NULL;
897 int current_tb_modified = 0;
898 target_ulong current_pc = 0;
899 target_ulong current_cs_base = 0;
900 int current_flags = 0;
901 #endif /* TARGET_HAS_PRECISE_SMC */
902
903 p = page_find(start >> TARGET_PAGE_BITS);
904 if (!p)
905 return;
906 if (!p->code_bitmap &&
907 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
908 is_cpu_write_access) {
909 /* build code bitmap */
910 build_page_bitmap(p);
911 }
912
913 /* we remove all the TBs in the range [start, end[ */
914 /* XXX: see if in some cases it could be faster to invalidate all the code */
915 tb = p->first_tb;
916 while (tb != NULL) {
917 n = (long)tb & 3;
918 tb = (TranslationBlock *)((long)tb & ~3);
919 tb_next = tb->page_next[n];
920 /* NOTE: this is subtle as a TB may span two physical pages */
921 if (n == 0) {
922 /* NOTE: tb_end may be after the end of the page, but
923 it is not a problem */
924 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
925 tb_end = tb_start + tb->size;
926 } else {
927 tb_start = tb->page_addr[1];
928 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
929 }
930 if (!(tb_end <= start || tb_start >= end)) {
931 #ifdef TARGET_HAS_PRECISE_SMC
932 if (current_tb_not_found) {
933 current_tb_not_found = 0;
934 current_tb = NULL;
935 if (env->mem_io_pc) {
936 /* now we have a real cpu fault */
937 current_tb = tb_find_pc(env->mem_io_pc);
938 }
939 }
940 if (current_tb == tb &&
941 (current_tb->cflags & CF_COUNT_MASK) != 1) {
942 /* If we are modifying the current TB, we must stop
943 its execution. We could be more precise by checking
944 that the modification is after the current PC, but it
945 would require a specialized function to partially
946 restore the CPU state */
947
948 current_tb_modified = 1;
949 cpu_restore_state(current_tb, env,
950 env->mem_io_pc, NULL);
951 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
952 &current_flags);
953 }
954 #endif /* TARGET_HAS_PRECISE_SMC */
955 /* we need to do that to handle the case where a signal
956 occurs while doing tb_phys_invalidate() */
957 saved_tb = NULL;
958 if (env) {
959 saved_tb = env->current_tb;
960 env->current_tb = NULL;
961 }
962 tb_phys_invalidate(tb, -1);
963 if (env) {
964 env->current_tb = saved_tb;
965 if (env->interrupt_request && env->current_tb)
966 cpu_interrupt(env, env->interrupt_request);
967 }
968 }
969 tb = tb_next;
970 }
971 #if !defined(CONFIG_USER_ONLY)
972 /* if no code remaining, no need to continue to use slow writes */
973 if (!p->first_tb) {
974 invalidate_page_bitmap(p);
975 if (is_cpu_write_access) {
976 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
977 }
978 }
979 #endif
980 #ifdef TARGET_HAS_PRECISE_SMC
981 if (current_tb_modified) {
982 /* we generate a block containing just the instruction
983 modifying the memory. It will ensure that it cannot modify
984 itself */
985 env->current_tb = NULL;
986 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
987 cpu_resume_from_signal(env, NULL);
988 }
989 #endif
990 }
991
992 /* len must be <= 8 and start must be a multiple of len */
993 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
994 {
995 PageDesc *p;
996 int offset, b;
997 #if 0
998 if (1) {
999 if (loglevel) {
1000 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1001 cpu_single_env->mem_io_vaddr, len,
1002 cpu_single_env->eip,
1003 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1004 }
1005 }
1006 #endif
1007 p = page_find(start >> TARGET_PAGE_BITS);
1008 if (!p)
1009 return;
1010 if (p->code_bitmap) {
1011 offset = start & ~TARGET_PAGE_MASK;
1012 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1013 if (b & ((1 << len) - 1))
1014 goto do_invalidate;
1015 } else {
1016 do_invalidate:
1017 tb_invalidate_phys_page_range(start, start + len, 1);
1018 }
1019 }
1020
1021 #if !defined(CONFIG_SOFTMMU)
1022 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1023 unsigned long pc, void *puc)
1024 {
1025 TranslationBlock *tb;
1026 PageDesc *p;
1027 int n;
1028 #ifdef TARGET_HAS_PRECISE_SMC
1029 TranslationBlock *current_tb = NULL;
1030 CPUState *env = cpu_single_env;
1031 int current_tb_modified = 0;
1032 target_ulong current_pc = 0;
1033 target_ulong current_cs_base = 0;
1034 int current_flags = 0;
1035 #endif
1036
1037 addr &= TARGET_PAGE_MASK;
1038 p = page_find(addr >> TARGET_PAGE_BITS);
1039 if (!p)
1040 return;
1041 tb = p->first_tb;
1042 #ifdef TARGET_HAS_PRECISE_SMC
1043 if (tb && pc != 0) {
1044 current_tb = tb_find_pc(pc);
1045 }
1046 #endif
1047 while (tb != NULL) {
1048 n = (long)tb & 3;
1049 tb = (TranslationBlock *)((long)tb & ~3);
1050 #ifdef TARGET_HAS_PRECISE_SMC
1051 if (current_tb == tb &&
1052 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1053 /* If we are modifying the current TB, we must stop
1054 its execution. We could be more precise by checking
1055 that the modification is after the current PC, but it
1056 would require a specialized function to partially
1057 restore the CPU state */
1058
1059 current_tb_modified = 1;
1060 cpu_restore_state(current_tb, env, pc, puc);
1061 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1062 &current_flags);
1063 }
1064 #endif /* TARGET_HAS_PRECISE_SMC */
1065 tb_phys_invalidate(tb, addr);
1066 tb = tb->page_next[n];
1067 }
1068 p->first_tb = NULL;
1069 #ifdef TARGET_HAS_PRECISE_SMC
1070 if (current_tb_modified) {
1071 /* we generate a block containing just the instruction
1072 modifying the memory. It will ensure that it cannot modify
1073 itself */
1074 env->current_tb = NULL;
1075 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1076 cpu_resume_from_signal(env, puc);
1077 }
1078 #endif
1079 }
1080 #endif
1081
1082 /* add the tb in the target page and protect it if necessary */
1083 static inline void tb_alloc_page(TranslationBlock *tb,
1084 unsigned int n, target_ulong page_addr)
1085 {
1086 PageDesc *p;
1087 TranslationBlock *last_first_tb;
1088
1089 tb->page_addr[n] = page_addr;
1090 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1091 tb->page_next[n] = p->first_tb;
1092 last_first_tb = p->first_tb;
1093 p->first_tb = (TranslationBlock *)((long)tb | n);
1094 invalidate_page_bitmap(p);
1095
1096 #if defined(TARGET_HAS_SMC) || 1
1097
1098 #if defined(CONFIG_USER_ONLY)
1099 if (p->flags & PAGE_WRITE) {
1100 target_ulong addr;
1101 PageDesc *p2;
1102 int prot;
1103
1104 /* force the host page as non writable (writes will have a
1105 page fault + mprotect overhead) */
1106 page_addr &= qemu_host_page_mask;
1107 prot = 0;
1108 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1109 addr += TARGET_PAGE_SIZE) {
1110
1111 p2 = page_find (addr >> TARGET_PAGE_BITS);
1112 if (!p2)
1113 continue;
1114 prot |= p2->flags;
1115 p2->flags &= ~PAGE_WRITE;
1116 page_get_flags(addr);
1117 }
1118 mprotect(g2h(page_addr), qemu_host_page_size,
1119 (prot & PAGE_BITS) & ~PAGE_WRITE);
1120 #ifdef DEBUG_TB_INVALIDATE
1121 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1122 page_addr);
1123 #endif
1124 }
1125 #else
1126 /* if some code is already present, then the pages are already
1127 protected. So we handle the case where only the first TB is
1128 allocated in a physical page */
1129 if (!last_first_tb) {
1130 tlb_protect_code(page_addr);
1131 }
1132 #endif
1133
1134 #endif /* TARGET_HAS_SMC */
1135 }
1136
1137 /* Allocate a new translation block. Flush the translation buffer if
1138 too many translation blocks or too much generated code. */
1139 TranslationBlock *tb_alloc(target_ulong pc)
1140 {
1141 TranslationBlock *tb;
1142
1143 if (nb_tbs >= code_gen_max_blocks ||
1144 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1145 return NULL;
1146 tb = &tbs[nb_tbs++];
1147 tb->pc = pc;
1148 tb->cflags = 0;
1149 return tb;
1150 }
1151
1152 void tb_free(TranslationBlock *tb)
1153 {
1154 /* In practice this is mostly used for single use temporary TB
1155 Ignore the hard cases and just back up if this TB happens to
1156 be the last one generated. */
1157 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1158 code_gen_ptr = tb->tc_ptr;
1159 nb_tbs--;
1160 }
1161 }
1162
1163 /* add a new TB and link it to the physical page tables. phys_page2 is
1164 (-1) to indicate that only one page contains the TB. */
1165 void tb_link_phys(TranslationBlock *tb,
1166 target_ulong phys_pc, target_ulong phys_page2)
1167 {
1168 unsigned int h;
1169 TranslationBlock **ptb;
1170
1171 /* Grab the mmap lock to stop another thread invalidating this TB
1172 before we are done. */
1173 mmap_lock();
1174 /* add in the physical hash table */
1175 h = tb_phys_hash_func(phys_pc);
1176 ptb = &tb_phys_hash[h];
1177 tb->phys_hash_next = *ptb;
1178 *ptb = tb;
1179
1180 /* add in the page list */
1181 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1182 if (phys_page2 != -1)
1183 tb_alloc_page(tb, 1, phys_page2);
1184 else
1185 tb->page_addr[1] = -1;
1186
1187 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1188 tb->jmp_next[0] = NULL;
1189 tb->jmp_next[1] = NULL;
1190
1191 /* init original jump addresses */
1192 if (tb->tb_next_offset[0] != 0xffff)
1193 tb_reset_jump(tb, 0);
1194 if (tb->tb_next_offset[1] != 0xffff)
1195 tb_reset_jump(tb, 1);
1196
1197 #ifdef DEBUG_TB_CHECK
1198 tb_page_check();
1199 #endif
1200 mmap_unlock();
1201 }
1202
1203 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1204 tb[1].tc_ptr. Return NULL if not found */
1205 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1206 {
1207 int m_min, m_max, m;
1208 unsigned long v;
1209 TranslationBlock *tb;
1210
1211 if (nb_tbs <= 0)
1212 return NULL;
1213 if (tc_ptr < (unsigned long)code_gen_buffer ||
1214 tc_ptr >= (unsigned long)code_gen_ptr)
1215 return NULL;
1216 /* binary search (cf Knuth) */
1217 m_min = 0;
1218 m_max = nb_tbs - 1;
1219 while (m_min <= m_max) {
1220 m = (m_min + m_max) >> 1;
1221 tb = &tbs[m];
1222 v = (unsigned long)tb->tc_ptr;
1223 if (v == tc_ptr)
1224 return tb;
1225 else if (tc_ptr < v) {
1226 m_max = m - 1;
1227 } else {
1228 m_min = m + 1;
1229 }
1230 }
1231 return &tbs[m_max];
1232 }
1233
1234 static void tb_reset_jump_recursive(TranslationBlock *tb);
1235
1236 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1237 {
1238 TranslationBlock *tb1, *tb_next, **ptb;
1239 unsigned int n1;
1240
1241 tb1 = tb->jmp_next[n];
1242 if (tb1 != NULL) {
1243 /* find head of list */
1244 for(;;) {
1245 n1 = (long)tb1 & 3;
1246 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1247 if (n1 == 2)
1248 break;
1249 tb1 = tb1->jmp_next[n1];
1250 }
1251 /* we are now sure now that tb jumps to tb1 */
1252 tb_next = tb1;
1253
1254 /* remove tb from the jmp_first list */
1255 ptb = &tb_next->jmp_first;
1256 for(;;) {
1257 tb1 = *ptb;
1258 n1 = (long)tb1 & 3;
1259 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1260 if (n1 == n && tb1 == tb)
1261 break;
1262 ptb = &tb1->jmp_next[n1];
1263 }
1264 *ptb = tb->jmp_next[n];
1265 tb->jmp_next[n] = NULL;
1266
1267 /* suppress the jump to next tb in generated code */
1268 tb_reset_jump(tb, n);
1269
1270 /* suppress jumps in the tb on which we could have jumped */
1271 tb_reset_jump_recursive(tb_next);
1272 }
1273 }
1274
1275 static void tb_reset_jump_recursive(TranslationBlock *tb)
1276 {
1277 tb_reset_jump_recursive2(tb, 0);
1278 tb_reset_jump_recursive2(tb, 1);
1279 }
1280
1281 #if defined(TARGET_HAS_ICE)
1282 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1283 {
1284 target_phys_addr_t addr;
1285 target_ulong pd;
1286 ram_addr_t ram_addr;
1287 PhysPageDesc *p;
1288
1289 addr = cpu_get_phys_page_debug(env, pc);
1290 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1291 if (!p) {
1292 pd = IO_MEM_UNASSIGNED;
1293 } else {
1294 pd = p->phys_offset;
1295 }
1296 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1297 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1298 }
1299 #endif
1300
1301 /* Add a watchpoint. */
1302 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1303 {
1304 int i;
1305
1306 for (i = 0; i < env->nb_watchpoints; i++) {
1307 if (addr == env->watchpoint[i].vaddr)
1308 return 0;
1309 }
1310 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1311 return -1;
1312
1313 i = env->nb_watchpoints++;
1314 env->watchpoint[i].vaddr = addr;
1315 env->watchpoint[i].type = type;
1316 tlb_flush_page(env, addr);
1317 /* FIXME: This flush is needed because of the hack to make memory ops
1318 terminate the TB. It can be removed once the proper IO trap and
1319 re-execute bits are in. */
1320 tb_flush(env);
1321 return i;
1322 }
1323
1324 /* Remove a watchpoint. */
1325 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1326 {
1327 int i;
1328
1329 for (i = 0; i < env->nb_watchpoints; i++) {
1330 if (addr == env->watchpoint[i].vaddr) {
1331 env->nb_watchpoints--;
1332 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1333 tlb_flush_page(env, addr);
1334 return 0;
1335 }
1336 }
1337 return -1;
1338 }
1339
1340 /* Remove all watchpoints. */
1341 void cpu_watchpoint_remove_all(CPUState *env) {
1342 int i;
1343
1344 for (i = 0; i < env->nb_watchpoints; i++) {
1345 tlb_flush_page(env, env->watchpoint[i].vaddr);
1346 }
1347 env->nb_watchpoints = 0;
1348 }
1349
1350 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1351 breakpoint is reached */
1352 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1353 {
1354 #if defined(TARGET_HAS_ICE)
1355 int i;
1356
1357 for(i = 0; i < env->nb_breakpoints; i++) {
1358 if (env->breakpoints[i] == pc)
1359 return 0;
1360 }
1361
1362 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1363 return -1;
1364 env->breakpoints[env->nb_breakpoints++] = pc;
1365
1366 breakpoint_invalidate(env, pc);
1367 return 0;
1368 #else
1369 return -1;
1370 #endif
1371 }
1372
1373 /* remove all breakpoints */
1374 void cpu_breakpoint_remove_all(CPUState *env) {
1375 #if defined(TARGET_HAS_ICE)
1376 int i;
1377 for(i = 0; i < env->nb_breakpoints; i++) {
1378 breakpoint_invalidate(env, env->breakpoints[i]);
1379 }
1380 env->nb_breakpoints = 0;
1381 #endif
1382 }
1383
1384 /* remove a breakpoint */
1385 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1386 {
1387 #if defined(TARGET_HAS_ICE)
1388 int i;
1389 for(i = 0; i < env->nb_breakpoints; i++) {
1390 if (env->breakpoints[i] == pc)
1391 goto found;
1392 }
1393 return -1;
1394 found:
1395 env->nb_breakpoints--;
1396 if (i < env->nb_breakpoints)
1397 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1398
1399 breakpoint_invalidate(env, pc);
1400 return 0;
1401 #else
1402 return -1;
1403 #endif
1404 }
1405
1406 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1407 CPU loop after each instruction */
1408 void cpu_single_step(CPUState *env, int enabled)
1409 {
1410 #if defined(TARGET_HAS_ICE)
1411 if (env->singlestep_enabled != enabled) {
1412 env->singlestep_enabled = enabled;
1413 /* must flush all the translated code to avoid inconsistancies */
1414 /* XXX: only flush what is necessary */
1415 tb_flush(env);
1416 }
1417 #endif
1418 }
1419
1420 /* enable or disable low levels log */
1421 void cpu_set_log(int log_flags)
1422 {
1423 loglevel = log_flags;
1424 if (loglevel && !logfile) {
1425 logfile = fopen(logfilename, log_append ? "a" : "w");
1426 if (!logfile) {
1427 perror(logfilename);
1428 _exit(1);
1429 }
1430 #if !defined(CONFIG_SOFTMMU)
1431 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1432 {
1433 static char logfile_buf[4096];
1434 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1435 }
1436 #else
1437 setvbuf(logfile, NULL, _IOLBF, 0);
1438 #endif
1439 log_append = 1;
1440 }
1441 if (!loglevel && logfile) {
1442 fclose(logfile);
1443 logfile = NULL;
1444 }
1445 }
1446
1447 void cpu_set_log_filename(const char *filename)
1448 {
1449 logfilename = strdup(filename);
1450 if (logfile) {
1451 fclose(logfile);
1452 logfile = NULL;
1453 }
1454 cpu_set_log(loglevel);
1455 }
1456
1457 /* mask must never be zero, except for A20 change call */
1458 void cpu_interrupt(CPUState *env, int mask)
1459 {
1460 #if !defined(USE_NPTL)
1461 TranslationBlock *tb;
1462 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1463 #endif
1464 int old_mask;
1465
1466 old_mask = env->interrupt_request;
1467 /* FIXME: This is probably not threadsafe. A different thread could
1468 be in the middle of a read-modify-write operation. */
1469 env->interrupt_request |= mask;
1470 #if defined(USE_NPTL)
1471 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1472 problem and hope the cpu will stop of its own accord. For userspace
1473 emulation this often isn't actually as bad as it sounds. Often
1474 signals are used primarily to interrupt blocking syscalls. */
1475 #else
1476 if (use_icount) {
1477 env->icount_decr.u16.high = 0xffff;
1478 #ifndef CONFIG_USER_ONLY
1479 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1480 an async event happened and we need to process it. */
1481 if (!can_do_io(env)
1482 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1483 cpu_abort(env, "Raised interrupt while not in I/O function");
1484 }
1485 #endif
1486 } else {
1487 tb = env->current_tb;
1488 /* if the cpu is currently executing code, we must unlink it and
1489 all the potentially executing TB */
1490 if (tb && !testandset(&interrupt_lock)) {
1491 env->current_tb = NULL;
1492 tb_reset_jump_recursive(tb);
1493 resetlock(&interrupt_lock);
1494 }
1495 }
1496 #endif
1497 }
1498
1499 void cpu_reset_interrupt(CPUState *env, int mask)
1500 {
1501 env->interrupt_request &= ~mask;
1502 }
1503
1504 const CPULogItem cpu_log_items[] = {
1505 { CPU_LOG_TB_OUT_ASM, "out_asm",
1506 "show generated host assembly code for each compiled TB" },
1507 { CPU_LOG_TB_IN_ASM, "in_asm",
1508 "show target assembly code for each compiled TB" },
1509 { CPU_LOG_TB_OP, "op",
1510 "show micro ops for each compiled TB" },
1511 { CPU_LOG_TB_OP_OPT, "op_opt",
1512 "show micro ops "
1513 #ifdef TARGET_I386
1514 "before eflags optimization and "
1515 #endif
1516 "after liveness analysis" },
1517 { CPU_LOG_INT, "int",
1518 "show interrupts/exceptions in short format" },
1519 { CPU_LOG_EXEC, "exec",
1520 "show trace before each executed TB (lots of logs)" },
1521 { CPU_LOG_TB_CPU, "cpu",
1522 "show CPU state before block translation" },
1523 #ifdef TARGET_I386
1524 { CPU_LOG_PCALL, "pcall",
1525 "show protected mode far calls/returns/exceptions" },
1526 #endif
1527 #ifdef DEBUG_IOPORT
1528 { CPU_LOG_IOPORT, "ioport",
1529 "show all i/o ports accesses" },
1530 #endif
1531 { 0, NULL, NULL },
1532 };
1533
1534 static int cmp1(const char *s1, int n, const char *s2)
1535 {
1536 if (strlen(s2) != n)
1537 return 0;
1538 return memcmp(s1, s2, n) == 0;
1539 }
1540
1541 /* takes a comma separated list of log masks. Return 0 if error. */
1542 int cpu_str_to_log_mask(const char *str)
1543 {
1544 const CPULogItem *item;
1545 int mask;
1546 const char *p, *p1;
1547
1548 p = str;
1549 mask = 0;
1550 for(;;) {
1551 p1 = strchr(p, ',');
1552 if (!p1)
1553 p1 = p + strlen(p);
1554 if(cmp1(p,p1-p,"all")) {
1555 for(item = cpu_log_items; item->mask != 0; item++) {
1556 mask |= item->mask;
1557 }
1558 } else {
1559 for(item = cpu_log_items; item->mask != 0; item++) {
1560 if (cmp1(p, p1 - p, item->name))
1561 goto found;
1562 }
1563 return 0;
1564 }
1565 found:
1566 mask |= item->mask;
1567 if (*p1 != ',')
1568 break;
1569 p = p1 + 1;
1570 }
1571 return mask;
1572 }
1573
1574 void cpu_abort(CPUState *env, const char *fmt, ...)
1575 {
1576 va_list ap;
1577 va_list ap2;
1578
1579 va_start(ap, fmt);
1580 va_copy(ap2, ap);
1581 fprintf(stderr, "qemu: fatal: ");
1582 vfprintf(stderr, fmt, ap);
1583 fprintf(stderr, "\n");
1584 #ifdef TARGET_I386
1585 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1586 #else
1587 cpu_dump_state(env, stderr, fprintf, 0);
1588 #endif
1589 if (logfile) {
1590 fprintf(logfile, "qemu: fatal: ");
1591 vfprintf(logfile, fmt, ap2);
1592 fprintf(logfile, "\n");
1593 #ifdef TARGET_I386
1594 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1595 #else
1596 cpu_dump_state(env, logfile, fprintf, 0);
1597 #endif
1598 fflush(logfile);
1599 fclose(logfile);
1600 }
1601 va_end(ap2);
1602 va_end(ap);
1603 abort();
1604 }
1605
1606 CPUState *cpu_copy(CPUState *env)
1607 {
1608 CPUState *new_env = cpu_init(env->cpu_model_str);
1609 /* preserve chaining and index */
1610 CPUState *next_cpu = new_env->next_cpu;
1611 int cpu_index = new_env->cpu_index;
1612 memcpy(new_env, env, sizeof(CPUState));
1613 new_env->next_cpu = next_cpu;
1614 new_env->cpu_index = cpu_index;
1615 return new_env;
1616 }
1617
1618 #if !defined(CONFIG_USER_ONLY)
1619
1620 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1621 {
1622 unsigned int i;
1623
1624 /* Discard jump cache entries for any tb which might potentially
1625 overlap the flushed page. */
1626 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1627 memset (&env->tb_jmp_cache[i], 0,
1628 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1629
1630 i = tb_jmp_cache_hash_page(addr);
1631 memset (&env->tb_jmp_cache[i], 0,
1632 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1633 }
1634
1635 /* NOTE: if flush_global is true, also flush global entries (not
1636 implemented yet) */
1637 void tlb_flush(CPUState *env, int flush_global)
1638 {
1639 int i;
1640
1641 #if defined(DEBUG_TLB)
1642 printf("tlb_flush:\n");
1643 #endif
1644 /* must reset current TB so that interrupts cannot modify the
1645 links while we are modifying them */
1646 env->current_tb = NULL;
1647
1648 for(i = 0; i < CPU_TLB_SIZE; i++) {
1649 env->tlb_table[0][i].addr_read = -1;
1650 env->tlb_table[0][i].addr_write = -1;
1651 env->tlb_table[0][i].addr_code = -1;
1652 env->tlb_table[1][i].addr_read = -1;
1653 env->tlb_table[1][i].addr_write = -1;
1654 env->tlb_table[1][i].addr_code = -1;
1655 #if (NB_MMU_MODES >= 3)
1656 env->tlb_table[2][i].addr_read = -1;
1657 env->tlb_table[2][i].addr_write = -1;
1658 env->tlb_table[2][i].addr_code = -1;
1659 #if (NB_MMU_MODES == 4)
1660 env->tlb_table[3][i].addr_read = -1;
1661 env->tlb_table[3][i].addr_write = -1;
1662 env->tlb_table[3][i].addr_code = -1;
1663 #endif
1664 #endif
1665 }
1666
1667 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1668
1669 #ifdef USE_KQEMU
1670 if (env->kqemu_enabled) {
1671 kqemu_flush(env, flush_global);
1672 }
1673 #endif
1674 tlb_flush_count++;
1675 }
1676
1677 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1678 {
1679 if (addr == (tlb_entry->addr_read &
1680 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1681 addr == (tlb_entry->addr_write &
1682 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1683 addr == (tlb_entry->addr_code &
1684 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1685 tlb_entry->addr_read = -1;
1686 tlb_entry->addr_write = -1;
1687 tlb_entry->addr_code = -1;
1688 }
1689 }
1690
1691 void tlb_flush_page(CPUState *env, target_ulong addr)
1692 {
1693 int i;
1694
1695 #if defined(DEBUG_TLB)
1696 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1697 #endif
1698 /* must reset current TB so that interrupts cannot modify the
1699 links while we are modifying them */
1700 env->current_tb = NULL;
1701
1702 addr &= TARGET_PAGE_MASK;
1703 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1704 tlb_flush_entry(&env->tlb_table[0][i], addr);
1705 tlb_flush_entry(&env->tlb_table[1][i], addr);
1706 #if (NB_MMU_MODES >= 3)
1707 tlb_flush_entry(&env->tlb_table[2][i], addr);
1708 #if (NB_MMU_MODES == 4)
1709 tlb_flush_entry(&env->tlb_table[3][i], addr);
1710 #endif
1711 #endif
1712
1713 tlb_flush_jmp_cache(env, addr);
1714
1715 #ifdef USE_KQEMU
1716 if (env->kqemu_enabled) {
1717 kqemu_flush_page(env, addr);
1718 }
1719 #endif
1720 }
1721
1722 /* update the TLBs so that writes to code in the virtual page 'addr'
1723 can be detected */
1724 static void tlb_protect_code(ram_addr_t ram_addr)
1725 {
1726 cpu_physical_memory_reset_dirty(ram_addr,
1727 ram_addr + TARGET_PAGE_SIZE,
1728 CODE_DIRTY_FLAG);
1729 }
1730
1731 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1732 tested for self modifying code */
1733 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1734 target_ulong vaddr)
1735 {
1736 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1737 }
1738
1739 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1740 unsigned long start, unsigned long length)
1741 {
1742 unsigned long addr;
1743 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1744 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1745 if ((addr - start) < length) {
1746 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1747 }
1748 }
1749 }
1750
1751 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1752 int dirty_flags)
1753 {
1754 CPUState *env;
1755 unsigned long length, start1;
1756 int i, mask, len;
1757 uint8_t *p;
1758
1759 start &= TARGET_PAGE_MASK;
1760 end = TARGET_PAGE_ALIGN(end);
1761
1762 length = end - start;
1763 if (length == 0)
1764 return;
1765 len = length >> TARGET_PAGE_BITS;
1766 #ifdef USE_KQEMU
1767 /* XXX: should not depend on cpu context */
1768 env = first_cpu;
1769 if (env->kqemu_enabled) {
1770 ram_addr_t addr;
1771 addr = start;
1772 for(i = 0; i < len; i++) {
1773 kqemu_set_notdirty(env, addr);
1774 addr += TARGET_PAGE_SIZE;
1775 }
1776 }
1777 #endif
1778 mask = ~dirty_flags;
1779 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1780 for(i = 0; i < len; i++)
1781 p[i] &= mask;
1782
1783 /* we modify the TLB cache so that the dirty bit will be set again
1784 when accessing the range */
1785 start1 = start + (unsigned long)phys_ram_base;
1786 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1787 for(i = 0; i < CPU_TLB_SIZE; i++)
1788 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1789 for(i = 0; i < CPU_TLB_SIZE; i++)
1790 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1791 #if (NB_MMU_MODES >= 3)
1792 for(i = 0; i < CPU_TLB_SIZE; i++)
1793 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1794 #if (NB_MMU_MODES == 4)
1795 for(i = 0; i < CPU_TLB_SIZE; i++)
1796 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1797 #endif
1798 #endif
1799 }
1800 }
1801
1802 int cpu_physical_memory_set_dirty_tracking(int enable)
1803 {
1804 in_migration = enable;
1805 return 0;
1806 }
1807
1808 int cpu_physical_memory_get_dirty_tracking(void)
1809 {
1810 return in_migration;
1811 }
1812
1813 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1814 {
1815 ram_addr_t ram_addr;
1816
1817 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1818 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1819 tlb_entry->addend - (unsigned long)phys_ram_base;
1820 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1821 tlb_entry->addr_write |= TLB_NOTDIRTY;
1822 }
1823 }
1824 }
1825
1826 /* update the TLB according to the current state of the dirty bits */
1827 void cpu_tlb_update_dirty(CPUState *env)
1828 {
1829 int i;
1830 for(i = 0; i < CPU_TLB_SIZE; i++)
1831 tlb_update_dirty(&env->tlb_table[0][i]);
1832 for(i = 0; i < CPU_TLB_SIZE; i++)
1833 tlb_update_dirty(&env->tlb_table[1][i]);
1834 #if (NB_MMU_MODES >= 3)
1835 for(i = 0; i < CPU_TLB_SIZE; i++)
1836 tlb_update_dirty(&env->tlb_table[2][i]);
1837 #if (NB_MMU_MODES == 4)
1838 for(i = 0; i < CPU_TLB_SIZE; i++)
1839 tlb_update_dirty(&env->tlb_table[3][i]);
1840 #endif
1841 #endif
1842 }
1843
1844 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1845 {
1846 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1847 tlb_entry->addr_write = vaddr;
1848 }
1849
1850 /* update the TLB corresponding to virtual page vaddr
1851 so that it is no longer dirty */
1852 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1853 {
1854 int i;
1855
1856 vaddr &= TARGET_PAGE_MASK;
1857 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1858 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1859 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1860 #if (NB_MMU_MODES >= 3)
1861 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1862 #if (NB_MMU_MODES == 4)
1863 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1864 #endif
1865 #endif
1866 }
1867
1868 /* add a new TLB entry. At most one entry for a given virtual address
1869 is permitted. Return 0 if OK or 2 if the page could not be mapped
1870 (can only happen in non SOFTMMU mode for I/O pages or pages
1871 conflicting with the host address space). */
1872 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1873 target_phys_addr_t paddr, int prot,
1874 int mmu_idx, int is_softmmu)
1875 {
1876 PhysPageDesc *p;
1877 unsigned long pd;
1878 unsigned int index;
1879 target_ulong address;
1880 target_ulong code_address;
1881 target_phys_addr_t addend;
1882 int ret;
1883 CPUTLBEntry *te;
1884 int i;
1885 target_phys_addr_t iotlb;
1886
1887 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1888 if (!p) {
1889 pd = IO_MEM_UNASSIGNED;
1890 } else {
1891 pd = p->phys_offset;
1892 }
1893 #if defined(DEBUG_TLB)
1894 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1895 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1896 #endif
1897
1898 ret = 0;
1899 address = vaddr;
1900 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1901 /* IO memory case (romd handled later) */
1902 address |= TLB_MMIO;
1903 }
1904 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1905 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1906 /* Normal RAM. */
1907 iotlb = pd & TARGET_PAGE_MASK;
1908 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1909 iotlb |= IO_MEM_NOTDIRTY;
1910 else
1911 iotlb |= IO_MEM_ROM;
1912 } else {
1913 /* IO handlers are currently passed a phsical address.
1914 It would be nice to pass an offset from the base address
1915 of that region. This would avoid having to special case RAM,
1916 and avoid full address decoding in every device.
1917 We can't use the high bits of pd for this because
1918 IO_MEM_ROMD uses these as a ram address. */
1919 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1920 }
1921
1922 code_address = address;
1923 /* Make accesses to pages with watchpoints go via the
1924 watchpoint trap routines. */
1925 for (i = 0; i < env->nb_watchpoints; i++) {
1926 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1927 iotlb = io_mem_watch + paddr;
1928 /* TODO: The memory case can be optimized by not trapping
1929 reads of pages with a write breakpoint. */
1930 address |= TLB_MMIO;
1931 }
1932 }
1933
1934 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1935 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1936 te = &env->tlb_table[mmu_idx][index];
1937 te->addend = addend - vaddr;
1938 if (prot & PAGE_READ) {
1939 te->addr_read = address;
1940 } else {
1941 te->addr_read = -1;
1942 }
1943
1944 if (prot & PAGE_EXEC) {
1945 te->addr_code = code_address;
1946 } else {
1947 te->addr_code = -1;
1948 }
1949 if (prot & PAGE_WRITE) {
1950 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1951 (pd & IO_MEM_ROMD)) {
1952 /* Write access calls the I/O callback. */
1953 te->addr_write = address | TLB_MMIO;
1954 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1955 !cpu_physical_memory_is_dirty(pd)) {
1956 te->addr_write = address | TLB_NOTDIRTY;
1957 } else {
1958 te->addr_write = address;
1959 }
1960 } else {
1961 te->addr_write = -1;
1962 }
1963 return ret;
1964 }
1965
1966 #else
1967
1968 void tlb_flush(CPUState *env, int flush_global)
1969 {
1970 }
1971
1972 void tlb_flush_page(CPUState *env, target_ulong addr)
1973 {
1974 }
1975
1976 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1977 target_phys_addr_t paddr, int prot,
1978 int mmu_idx, int is_softmmu)
1979 {
1980 return 0;
1981 }
1982
1983 /* dump memory mappings */
1984 void page_dump(FILE *f)
1985 {
1986 unsigned long start, end;
1987 int i, j, prot, prot1;
1988 PageDesc *p;
1989
1990 fprintf(f, "%-8s %-8s %-8s %s\n",
1991 "start", "end", "size", "prot");
1992 start = -1;
1993 end = -1;
1994 prot = 0;
1995 for(i = 0; i <= L1_SIZE; i++) {
1996 if (i < L1_SIZE)
1997 p = l1_map[i];
1998 else
1999 p = NULL;
2000 for(j = 0;j < L2_SIZE; j++) {
2001 if (!p)
2002 prot1 = 0;
2003 else
2004 prot1 = p[j].flags;
2005 if (prot1 != prot) {
2006 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2007 if (start != -1) {
2008 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2009 start, end, end - start,
2010 prot & PAGE_READ ? 'r' : '-',
2011 prot & PAGE_WRITE ? 'w' : '-',
2012 prot & PAGE_EXEC ? 'x' : '-');
2013 }
2014 if (prot1 != 0)
2015 start = end;
2016 else
2017 start = -1;
2018 prot = prot1;
2019 }
2020 if (!p)
2021 break;
2022 }
2023 }
2024 }
2025
2026 int page_get_flags(target_ulong address)
2027 {
2028 PageDesc *p;
2029
2030 p = page_find(address >> TARGET_PAGE_BITS);
2031 if (!p)
2032 return 0;
2033 return p->flags;
2034 }
2035
2036 /* modify the flags of a page and invalidate the code if
2037 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2038 depending on PAGE_WRITE */
2039 void page_set_flags(target_ulong start, target_ulong end, int flags)
2040 {
2041 PageDesc *p;
2042 target_ulong addr;
2043
2044 /* mmap_lock should already be held. */
2045 start = start & TARGET_PAGE_MASK;
2046 end = TARGET_PAGE_ALIGN(end);
2047 if (flags & PAGE_WRITE)
2048 flags |= PAGE_WRITE_ORG;
2049 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2050 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2051 /* We may be called for host regions that are outside guest
2052 address space. */
2053 if (!p)
2054 return;
2055 /* if the write protection is set, then we invalidate the code
2056 inside */
2057 if (!(p->flags & PAGE_WRITE) &&
2058 (flags & PAGE_WRITE) &&
2059 p->first_tb) {
2060 tb_invalidate_phys_page(addr, 0, NULL);
2061 }
2062 p->flags = flags;
2063 }
2064 }
2065
2066 int page_check_range(target_ulong start, target_ulong len, int flags)
2067 {
2068 PageDesc *p;
2069 target_ulong end;
2070 target_ulong addr;
2071
2072 if (start + len < start)
2073 /* we've wrapped around */
2074 return -1;
2075
2076 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2077 start = start & TARGET_PAGE_MASK;
2078
2079 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2080 p = page_find(addr >> TARGET_PAGE_BITS);
2081 if( !p )
2082 return -1;
2083 if( !(p->flags & PAGE_VALID) )
2084 return -1;
2085
2086 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2087 return -1;
2088 if (flags & PAGE_WRITE) {
2089 if (!(p->flags & PAGE_WRITE_ORG))
2090 return -1;
2091 /* unprotect the page if it was put read-only because it
2092 contains translated code */
2093 if (!(p->flags & PAGE_WRITE)) {
2094 if (!page_unprotect(addr, 0, NULL))
2095 return -1;
2096 }
2097 return 0;
2098 }
2099 }
2100 return 0;
2101 }
2102
2103 /* called from signal handler: invalidate the code and unprotect the
2104 page. Return TRUE if the fault was succesfully handled. */
2105 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2106 {
2107 unsigned int page_index, prot, pindex;
2108 PageDesc *p, *p1;
2109 target_ulong host_start, host_end, addr;
2110
2111 /* Technically this isn't safe inside a signal handler. However we
2112 know this only ever happens in a synchronous SEGV handler, so in
2113 practice it seems to be ok. */
2114 mmap_lock();
2115
2116 host_start = address & qemu_host_page_mask;
2117 page_index = host_start >> TARGET_PAGE_BITS;
2118 p1 = page_find(page_index);
2119 if (!p1) {
2120 mmap_unlock();
2121 return 0;
2122 }
2123 host_end = host_start + qemu_host_page_size;
2124 p = p1;
2125 prot = 0;
2126 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2127 prot |= p->flags;
2128 p++;
2129 }
2130 /* if the page was really writable, then we change its
2131 protection back to writable */
2132 if (prot & PAGE_WRITE_ORG) {
2133 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2134 if (!(p1[pindex].flags & PAGE_WRITE)) {
2135 mprotect((void *)g2h(host_start), qemu_host_page_size,
2136 (prot & PAGE_BITS) | PAGE_WRITE);
2137 p1[pindex].flags |= PAGE_WRITE;
2138 /* and since the content will be modified, we must invalidate
2139 the corresponding translated code. */
2140 tb_invalidate_phys_page(address, pc, puc);
2141 #ifdef DEBUG_TB_CHECK
2142 tb_invalidate_check(address);
2143 #endif
2144 mmap_unlock();
2145 return 1;
2146 }
2147 }
2148 mmap_unlock();
2149 return 0;
2150 }
2151
2152 static inline void tlb_set_dirty(CPUState *env,
2153 unsigned long addr, target_ulong vaddr)
2154 {
2155 }
2156 #endif /* defined(CONFIG_USER_ONLY) */
2157
2158 #if !defined(CONFIG_USER_ONLY)
2159 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2160 ram_addr_t memory);
2161 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2162 ram_addr_t orig_memory);
2163 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2164 need_subpage) \
2165 do { \
2166 if (addr > start_addr) \
2167 start_addr2 = 0; \
2168 else { \
2169 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2170 if (start_addr2 > 0) \
2171 need_subpage = 1; \
2172 } \
2173 \
2174 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2175 end_addr2 = TARGET_PAGE_SIZE - 1; \
2176 else { \
2177 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2178 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2179 need_subpage = 1; \
2180 } \
2181 } while (0)
2182
2183 /* register physical memory. 'size' must be a multiple of the target
2184 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2185 io memory page */
2186 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2187 ram_addr_t size,
2188 ram_addr_t phys_offset)
2189 {
2190 target_phys_addr_t addr, end_addr;
2191 PhysPageDesc *p;
2192 CPUState *env;
2193 ram_addr_t orig_size = size;
2194 void *subpage;
2195
2196 #ifdef USE_KQEMU
2197 /* XXX: should not depend on cpu context */
2198 env = first_cpu;
2199 if (env->kqemu_enabled) {
2200 kqemu_set_phys_mem(start_addr, size, phys_offset);
2201 }
2202 #endif
2203 if (kvm_enabled())
2204 kvm_set_phys_mem(start_addr, size, phys_offset);
2205
2206 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2207 end_addr = start_addr + (target_phys_addr_t)size;
2208 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2209 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2210 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2211 ram_addr_t orig_memory = p->phys_offset;
2212 target_phys_addr_t start_addr2, end_addr2;
2213 int need_subpage = 0;
2214
2215 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2216 need_subpage);
2217 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2218 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2219 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2220 &p->phys_offset, orig_memory);
2221 } else {
2222 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2223 >> IO_MEM_SHIFT];
2224 }
2225 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2226 } else {
2227 p->phys_offset = phys_offset;
2228 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2229 (phys_offset & IO_MEM_ROMD))
2230 phys_offset += TARGET_PAGE_SIZE;
2231 }
2232 } else {
2233 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2234 p->phys_offset = phys_offset;
2235 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2236 (phys_offset & IO_MEM_ROMD))
2237 phys_offset += TARGET_PAGE_SIZE;
2238 else {
2239 target_phys_addr_t start_addr2, end_addr2;
2240 int need_subpage = 0;
2241
2242 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2243 end_addr2, need_subpage);
2244
2245 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2246 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2247 &p->phys_offset, IO_MEM_UNASSIGNED);
2248 subpage_register(subpage, start_addr2, end_addr2,
2249 phys_offset);
2250 }
2251 }
2252 }
2253 }
2254
2255 /* since each CPU stores ram addresses in its TLB cache, we must
2256 reset the modified entries */
2257 /* XXX: slow ! */
2258 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2259 tlb_flush(env, 1);
2260 }
2261 }
2262
2263 /* XXX: temporary until new memory mapping API */
2264 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2265 {
2266 PhysPageDesc *p;
2267
2268 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2269 if (!p)
2270 return IO_MEM_UNASSIGNED;
2271 return p->phys_offset;
2272 }
2273
2274 /* XXX: better than nothing */
2275 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2276 {
2277 ram_addr_t addr;
2278 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2279 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2280 (uint64_t)size, (uint64_t)phys_ram_size);
2281 abort();
2282 }
2283 addr = phys_ram_alloc_offset;
2284 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2285 return addr;
2286 }
2287
2288 void qemu_ram_free(ram_addr_t addr)
2289 {
2290 }
2291
2292 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2293 {
2294 #ifdef DEBUG_UNASSIGNED
2295 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2296 #endif
2297 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2298 do_unassigned_access(addr, 0, 0, 0, 1);
2299 #endif
2300 return 0;
2301 }
2302
2303 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2304 {
2305 #ifdef DEBUG_UNASSIGNED
2306 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2307 #endif
2308 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2309 do_unassigned_access(addr, 0, 0, 0, 2);
2310 #endif
2311 return 0;
2312 }
2313
2314 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2315 {
2316 #ifdef DEBUG_UNASSIGNED
2317 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2318 #endif
2319 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2320 do_unassigned_access(addr, 0, 0, 0, 4);
2321 #endif
2322 return 0;
2323 }
2324
2325 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2326 {
2327 #ifdef DEBUG_UNASSIGNED
2328 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2329 #endif
2330 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2331 do_unassigned_access(addr, 1, 0, 0, 1);
2332 #endif
2333 }
2334
2335 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2336 {
2337 #ifdef DEBUG_UNASSIGNED
2338 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2339 #endif
2340 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2341 do_unassigned_access(addr, 1, 0, 0, 2);
2342 #endif
2343 }
2344
2345 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2346 {
2347 #ifdef DEBUG_UNASSIGNED
2348 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2349 #endif
2350 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2351 do_unassigned_access(addr, 1, 0, 0, 4);
2352 #endif
2353 }
2354
2355 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2356 unassigned_mem_readb,
2357 unassigned_mem_readw,
2358 unassigned_mem_readl,
2359 };
2360
2361 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2362 unassigned_mem_writeb,
2363 unassigned_mem_writew,
2364 unassigned_mem_writel,
2365 };
2366
2367 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2368 uint32_t val)
2369 {
2370 int dirty_flags;
2371 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2372 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2373 #if !defined(CONFIG_USER_ONLY)
2374 tb_invalidate_phys_page_fast(ram_addr, 1);
2375 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2376 #endif
2377 }
2378 stb_p(phys_ram_base + ram_addr, val);
2379 #ifdef USE_KQEMU
2380 if (cpu_single_env->kqemu_enabled &&
2381 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2382 kqemu_modify_page(cpu_single_env, ram_addr);
2383 #endif
2384 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2385 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2386 /* we remove the notdirty callback only if the code has been
2387 flushed */
2388 if (dirty_flags == 0xff)
2389 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2390 }
2391
2392 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2393 uint32_t val)
2394 {
2395 int dirty_flags;
2396 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2397 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2398 #if !defined(CONFIG_USER_ONLY)
2399 tb_invalidate_phys_page_fast(ram_addr, 2);
2400 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2401 #endif
2402 }
2403 stw_p(phys_ram_base + ram_addr, val);
2404 #ifdef USE_KQEMU
2405 if (cpu_single_env->kqemu_enabled &&
2406 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2407 kqemu_modify_page(cpu_single_env, ram_addr);
2408 #endif
2409 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2410 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2411 /* we remove the notdirty callback only if the code has been
2412 flushed */
2413 if (dirty_flags == 0xff)
2414 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2415 }
2416
2417 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2418 uint32_t val)
2419 {
2420 int dirty_flags;
2421 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2422 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2423 #if !defined(CONFIG_USER_ONLY)
2424 tb_invalidate_phys_page_fast(ram_addr, 4);
2425 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2426 #endif
2427 }
2428 stl_p(phys_ram_base + ram_addr, val);
2429 #ifdef USE_KQEMU
2430 if (cpu_single_env->kqemu_enabled &&
2431 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2432 kqemu_modify_page(cpu_single_env, ram_addr);
2433 #endif
2434 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2435 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2436 /* we remove the notdirty callback only if the code has been
2437 flushed */
2438 if (dirty_flags == 0xff)
2439 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2440 }
2441
2442 static CPUReadMemoryFunc *error_mem_read[3] = {
2443 NULL, /* never used */
2444 NULL, /* never used */
2445 NULL, /* never used */
2446 };
2447
2448 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2449 notdirty_mem_writeb,
2450 notdirty_mem_writew,
2451 notdirty_mem_writel,
2452 };
2453
2454 /* Generate a debug exception if a watchpoint has been hit. */
2455 static void check_watchpoint(int offset, int flags)
2456 {
2457 CPUState *env = cpu_single_env;
2458 target_ulong vaddr;
2459 int i;
2460
2461 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2462 for (i = 0; i < env->nb_watchpoints; i++) {
2463 if (vaddr == env->watchpoint[i].vaddr
2464 && (env->watchpoint[i].type & flags)) {
2465 env->watchpoint_hit = i + 1;
2466 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2467 break;
2468 }
2469 }
2470 }
2471
2472 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2473 so these check for a hit then pass through to the normal out-of-line
2474 phys routines. */
2475 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2476 {
2477 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2478 return ldub_phys(addr);
2479 }
2480
2481 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2482 {
2483 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2484 return lduw_phys(addr);
2485 }
2486
2487 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2488 {
2489 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2490 return ldl_phys(addr);
2491 }
2492
2493 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2494 uint32_t val)
2495 {
2496 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2497 stb_phys(addr, val);
2498 }
2499
2500 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2501 uint32_t val)
2502 {
2503 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2504 stw_phys(addr, val);
2505 }
2506
2507 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2508 uint32_t val)
2509 {
2510 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2511 stl_phys(addr, val);
2512 }
2513
2514 static CPUReadMemoryFunc *watch_mem_read[3] = {
2515 watch_mem_readb,
2516 watch_mem_readw,
2517 watch_mem_readl,
2518 };
2519
2520 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2521 watch_mem_writeb,
2522 watch_mem_writew,
2523 watch_mem_writel,
2524 };
2525
2526 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2527 unsigned int len)
2528 {
2529 uint32_t ret;
2530 unsigned int idx;
2531
2532 idx = SUBPAGE_IDX(addr - mmio->base);
2533 #if defined(DEBUG_SUBPAGE)
2534 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2535 mmio, len, addr, idx);
2536 #endif
2537 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2538
2539 return ret;
2540 }
2541
2542 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2543 uint32_t value, unsigned int len)
2544 {
2545 unsigned int idx;
2546
2547 idx = SUBPAGE_IDX(addr - mmio->base);
2548 #if defined(DEBUG_SUBPAGE)
2549 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2550 mmio, len, addr, idx, value);
2551 #endif
2552 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2553 }
2554
2555 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2556 {
2557 #if defined(DEBUG_SUBPAGE)
2558 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2559 #endif
2560
2561 return subpage_readlen(opaque, addr, 0);
2562 }
2563
2564 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2565 uint32_t value)
2566 {
2567 #if defined(DEBUG_SUBPAGE)
2568 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2569 #endif
2570 subpage_writelen(opaque, addr, value, 0);
2571 }
2572
2573 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2574 {
2575 #if defined(DEBUG_SUBPAGE)
2576 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2577 #endif
2578
2579 return subpage_readlen(opaque, addr, 1);
2580 }
2581
2582 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2583 uint32_t value)
2584 {
2585 #if defined(DEBUG_SUBPAGE)
2586 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2587 #endif
2588 subpage_writelen(opaque, addr, value, 1);
2589 }
2590
2591 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2592 {
2593 #if defined(DEBUG_SUBPAGE)
2594 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2595 #endif
2596
2597 return subpage_readlen(opaque, addr, 2);
2598 }
2599
2600 static void subpage_writel (void *opaque,
2601 target_phys_addr_t addr, uint32_t value)
2602 {
2603 #if defined(DEBUG_SUBPAGE)
2604 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2605 #endif
2606 subpage_writelen(opaque, addr, value, 2);
2607 }
2608
2609 static CPUReadMemoryFunc *subpage_read[] = {
2610 &subpage_readb,
2611 &subpage_readw,
2612 &subpage_readl,
2613 };
2614
2615 static CPUWriteMemoryFunc *subpage_write[] = {
2616 &subpage_writeb,
2617 &subpage_writew,
2618 &subpage_writel,
2619 };
2620
2621 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2622 ram_addr_t memory)
2623 {
2624 int idx, eidx;
2625 unsigned int i;
2626
2627 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2628 return -1;
2629 idx = SUBPAGE_IDX(start);
2630 eidx = SUBPAGE_IDX(end);
2631 #if defined(DEBUG_SUBPAGE)
2632 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2633 mmio, start, end, idx, eidx, memory);
2634 #endif
2635 memory >>= IO_MEM_SHIFT;
2636 for (; idx <= eidx; idx++) {
2637 for (i = 0; i < 4; i++) {
2638 if (io_mem_read[memory][i]) {
2639 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2640 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2641 }
2642 if (io_mem_write[memory][i]) {
2643 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2644 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2645 }
2646 }
2647 }
2648
2649 return 0;
2650 }
2651
2652 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2653 ram_addr_t orig_memory)
2654 {
2655 subpage_t *mmio;
2656 int subpage_memory;
2657
2658 mmio = qemu_mallocz(sizeof(subpage_t));
2659 if (mmio != NULL) {
2660 mmio->base = base;
2661 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2662 #if defined(DEBUG_SUBPAGE)
2663 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2664 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2665 #endif
2666 *phys = subpage_memory | IO_MEM_SUBPAGE;
2667 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2668 }
2669
2670 return mmio;
2671 }
2672
2673 static void io_mem_init(void)
2674 {
2675 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2676 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2677 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2678 io_mem_nb = 5;
2679
2680 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2681 watch_mem_write, NULL);
2682 /* alloc dirty bits array */
2683 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2684 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2685 }
2686
2687 /* mem_read and mem_write are arrays of functions containing the
2688 function to access byte (index 0), word (index 1) and dword (index
2689 2). Functions can be omitted with a NULL function pointer. The
2690 registered functions may be modified dynamically later.
2691 If io_index is non zero, the corresponding io zone is
2692 modified. If it is zero, a new io zone is allocated. The return
2693 value can be used with cpu_register_physical_memory(). (-1) is
2694 returned if error. */
2695 int cpu_register_io_memory(int io_index,
2696 CPUReadMemoryFunc **mem_read,
2697 CPUWriteMemoryFunc **mem_write,
2698 void *opaque)
2699 {
2700 int i, subwidth = 0;
2701
2702 if (io_index <= 0) {
2703 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2704 return -1;
2705 io_index = io_mem_nb++;
2706 } else {
2707 if (io_index >= IO_MEM_NB_ENTRIES)
2708 return -1;
2709 }
2710
2711 for(i = 0;i < 3; i++) {
2712 if (!mem_read[i] || !mem_write[i])
2713 subwidth = IO_MEM_SUBWIDTH;
2714 io_mem_read[io_index][i] = mem_read[i];
2715 io_mem_write[io_index][i] = mem_write[i];
2716 }
2717 io_mem_opaque[io_index] = opaque;
2718 return (io_index << IO_MEM_SHIFT) | subwidth;
2719 }
2720
2721 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2722 {
2723 return io_mem_write[io_index >> IO_MEM_SHIFT];
2724 }
2725
2726 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2727 {
2728 return io_mem_read[io_index >> IO_MEM_SHIFT];
2729 }
2730
2731 #endif /* !defined(CONFIG_USER_ONLY) */
2732
2733 /* physical memory access (slow version, mainly for debug) */
2734 #if defined(CONFIG_USER_ONLY)
2735 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2736 int len, int is_write)
2737 {
2738 int l, flags;
2739 target_ulong page;
2740 void * p;
2741
2742 while (len > 0) {
2743 page = addr & TARGET_PAGE_MASK;
2744 l = (page + TARGET_PAGE_SIZE) - addr;
2745 if (l > len)
2746 l = len;
2747 flags = page_get_flags(page);
2748 if (!(flags & PAGE_VALID))
2749 return;
2750 if (is_write) {
2751 if (!(flags & PAGE_WRITE))
2752 return;
2753 /* XXX: this code should not depend on lock_user */
2754 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2755 /* FIXME - should this return an error rather than just fail? */
2756 return;
2757 memcpy(p, buf, l);
2758 unlock_user(p, addr, l);
2759 } else {
2760 if (!(flags & PAGE_READ))
2761 return;
2762 /* XXX: this code should not depend on lock_user */
2763 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2764 /* FIXME - should this return an error rather than just fail? */
2765 return;
2766 memcpy(buf, p, l);
2767 unlock_user(p, addr, 0);
2768 }
2769 len -= l;
2770 buf += l;
2771 addr += l;
2772 }
2773 }
2774
2775 #else
2776 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2777 int len, int is_write)
2778 {
2779 int l, io_index;
2780 uint8_t *ptr;
2781 uint32_t val;
2782 target_phys_addr_t page;
2783 unsigned long pd;
2784 PhysPageDesc *p;
2785
2786 while (len > 0) {
2787 page = addr & TARGET_PAGE_MASK;
2788 l = (page + TARGET_PAGE_SIZE) - addr;
2789 if (l > len)
2790 l = len;
2791 p = phys_page_find(page >> TARGET_PAGE_BITS);
2792 if (!p) {
2793 pd = IO_MEM_UNASSIGNED;
2794 } else {
2795 pd = p->phys_offset;
2796 }
2797
2798 if (is_write) {
2799 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2800 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2801 /* XXX: could force cpu_single_env to NULL to avoid
2802 potential bugs */
2803 if (l >= 4 && ((addr & 3) == 0)) {
2804 /* 32 bit write access */
2805 val = ldl_p(buf);
2806 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2807 l = 4;
2808 } else if (l >= 2 && ((addr & 1) == 0)) {
2809 /* 16 bit write access */
2810 val = lduw_p(buf);
2811 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2812 l = 2;
2813 } else {
2814 /* 8 bit write access */
2815 val = ldub_p(buf);
2816 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2817 l = 1;
2818 }
2819 } else {
2820 unsigned long addr1;
2821 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2822 /* RAM case */
2823 ptr = phys_ram_base + addr1;
2824 memcpy(ptr, buf, l);
2825 if (!cpu_physical_memory_is_dirty(addr1)) {
2826 /* invalidate code */
2827 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2828 /* set dirty bit */
2829 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2830 (0xff & ~CODE_DIRTY_FLAG);
2831 }
2832 }
2833 } else {
2834 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2835 !(pd & IO_MEM_ROMD)) {
2836 /* I/O case */
2837 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2838 if (l >= 4 && ((addr & 3) == 0)) {
2839 /* 32 bit read access */
2840 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2841 stl_p(buf, val);
2842 l = 4;
2843 } else if (l >= 2 && ((addr & 1) == 0)) {
2844 /* 16 bit read access */
2845 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2846 stw_p(buf, val);
2847 l = 2;
2848 } else {
2849 /* 8 bit read access */
2850 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2851 stb_p(buf, val);
2852 l = 1;
2853 }
2854 } else {
2855 /* RAM case */
2856 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2857 (addr & ~TARGET_PAGE_MASK);
2858 memcpy(buf, ptr, l);
2859 }
2860 }
2861 len -= l;
2862 buf += l;
2863 addr += l;
2864 }
2865 }
2866
2867 /* used for ROM loading : can write in RAM and ROM */
2868 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2869 const uint8_t *buf, int len)
2870 {
2871 int l;
2872 uint8_t *ptr;
2873 target_phys_addr_t page;
2874 unsigned long pd;
2875 PhysPageDesc *p;
2876
2877 while (len > 0) {
2878 page = addr & TARGET_PAGE_MASK;
2879 l = (page + TARGET_PAGE_SIZE) - addr;
2880 if (l > len)
2881 l = len;
2882 p = phys_page_find(page >> TARGET_PAGE_BITS);
2883 if (!p) {
2884 pd = IO_MEM_UNASSIGNED;
2885 } else {
2886 pd = p->phys_offset;
2887 }
2888
2889 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2890 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2891 !(pd & IO_MEM_ROMD)) {
2892 /* do nothing */
2893 } else {
2894 unsigned long addr1;
2895 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2896 /* ROM/RAM case */
2897 ptr = phys_ram_base + addr1;
2898 memcpy(ptr, buf, l);
2899 }
2900 len -= l;
2901 buf += l;
2902 addr += l;
2903 }
2904 }
2905
2906
2907 /* warning: addr must be aligned */
2908 uint32_t ldl_phys(target_phys_addr_t addr)
2909 {
2910 int io_index;
2911 uint8_t *ptr;
2912 uint32_t val;
2913 unsigned long pd;
2914 PhysPageDesc *p;
2915
2916 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2917 if (!p) {
2918 pd = IO_MEM_UNASSIGNED;
2919 } else {
2920 pd = p->phys_offset;
2921 }
2922
2923 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2924 !(pd & IO_MEM_ROMD)) {
2925 /* I/O case */
2926 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2927 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2928 } else {
2929 /* RAM case */
2930 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2931 (addr & ~TARGET_PAGE_MASK);
2932 val = ldl_p(ptr);
2933 }
2934 return val;
2935 }
2936
2937 /* warning: addr must be aligned */
2938 uint64_t ldq_phys(target_phys_addr_t addr)
2939 {
2940 int io_index;
2941 uint8_t *ptr;
2942 uint64_t val;
2943 unsigned long pd;
2944 PhysPageDesc *p;
2945
2946 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2947 if (!p) {
2948 pd = IO_MEM_UNASSIGNED;
2949 } else {
2950 pd = p->phys_offset;
2951 }
2952
2953 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2954 !(pd & IO_MEM_ROMD)) {
2955 /* I/O case */
2956 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2957 #ifdef TARGET_WORDS_BIGENDIAN
2958 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2959 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2960 #else
2961 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2962 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2963 #endif
2964 } else {
2965 /* RAM case */
2966 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2967 (addr & ~TARGET_PAGE_MASK);
2968 val = ldq_p(ptr);
2969 }
2970 return val;
2971 }
2972
2973 /* XXX: optimize */
2974 uint32_t ldub_phys(target_phys_addr_t addr)
2975 {
2976 uint8_t val;
2977 cpu_physical_memory_read(addr, &val, 1);
2978 return val;
2979 }
2980
2981 /* XXX: optimize */
2982 uint32_t lduw_phys(target_phys_addr_t addr)
2983 {
2984 uint16_t val;
2985 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2986 return tswap16(val);
2987 }
2988
2989 /* warning: addr must be aligned. The ram page is not masked as dirty
2990 and the code inside is not invalidated. It is useful if the dirty
2991 bits are used to track modified PTEs */
2992 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2993 {
2994 int io_index;
2995 uint8_t *ptr;
2996 unsigned long pd;
2997 PhysPageDesc *p;
2998
2999 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3000 if (!p) {
3001 pd = IO_MEM_UNASSIGNED;
3002 } else {
3003 pd = p->phys_offset;
3004 }
3005
3006 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3007 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3008 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3009 } else {
3010 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3011 ptr = phys_ram_base + addr1;
3012 stl_p(ptr, val);
3013
3014 if (unlikely(in_migration)) {
3015 if (!cpu_physical_memory_is_dirty(addr1)) {
3016 /* invalidate code */
3017 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3018 /* set dirty bit */
3019 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3020 (0xff & ~CODE_DIRTY_FLAG);
3021 }
3022 }
3023 }
3024 }
3025
3026 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3027 {
3028 int io_index;
3029 uint8_t *ptr;
3030 unsigned long pd;
3031 PhysPageDesc *p;
3032
3033 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3034 if (!p) {
3035 pd = IO_MEM_UNASSIGNED;
3036 } else {
3037 pd = p->phys_offset;
3038 }
3039
3040 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3041 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3042 #ifdef TARGET_WORDS_BIGENDIAN
3043 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3044 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3045 #else
3046 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3047 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3048 #endif
3049 } else {
3050 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3051 (addr & ~TARGET_PAGE_MASK);
3052 stq_p(ptr, val);
3053 }
3054 }
3055
3056 /* warning: addr must be aligned */
3057 void stl_phys(target_phys_addr_t addr, uint32_t val)
3058 {
3059 int io_index;
3060 uint8_t *ptr;
3061 unsigned long pd;
3062 PhysPageDesc *p;
3063
3064 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3065 if (!p) {
3066 pd = IO_MEM_UNASSIGNED;
3067 } else {
3068 pd = p->phys_offset;
3069 }
3070
3071 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3072 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3073 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3074 } else {
3075 unsigned long addr1;
3076 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3077 /* RAM case */
3078 ptr = phys_ram_base + addr1;
3079 stl_p(ptr, val);
3080 if (!cpu_physical_memory_is_dirty(addr1)) {
3081 /* invalidate code */
3082 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3083 /* set dirty bit */
3084 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3085 (0xff & ~CODE_DIRTY_FLAG);
3086 }
3087 }
3088 }
3089
3090 /* XXX: optimize */
3091 void stb_phys(target_phys_addr_t addr, uint32_t val)
3092 {
3093 uint8_t v = val;
3094 cpu_physical_memory_write(addr, &v, 1);
3095 }
3096
3097 /* XXX: optimize */
3098 void stw_phys(target_phys_addr_t addr, uint32_t val)
3099 {
3100 uint16_t v = tswap16(val);
3101 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3102 }
3103
3104 /* XXX: optimize */
3105 void stq_phys(target_phys_addr_t addr, uint64_t val)
3106 {
3107 val = tswap64(val);
3108 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3109 }
3110
3111 #endif
3112
3113 /* virtual memory access for debug */
3114 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3115 uint8_t *buf, int len, int is_write)
3116 {
3117 int l;
3118 target_phys_addr_t phys_addr;
3119 target_ulong page;
3120
3121 while (len > 0) {
3122 page = addr & TARGET_PAGE_MASK;
3123 phys_addr = cpu_get_phys_page_debug(env, page);
3124 /* if no physical page mapped, return an error */
3125 if (phys_addr == -1)
3126 return -1;
3127 l = (page + TARGET_PAGE_SIZE) - addr;
3128 if (l > len)
3129 l = len;
3130 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3131 buf, l, is_write);
3132 len -= l;
3133 buf += l;
3134 addr += l;
3135 }
3136 return 0;
3137 }
3138
3139 /* in deterministic execution mode, instructions doing device I/Os
3140 must be at the end of the TB */
3141 void cpu_io_recompile(CPUState *env, void *retaddr)
3142 {
3143 TranslationBlock *tb;
3144 uint32_t n, cflags;
3145 target_ulong pc, cs_base;
3146 uint64_t flags;
3147
3148 tb = tb_find_pc((unsigned long)retaddr);
3149 if (!tb) {
3150 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3151 retaddr);
3152 }
3153 n = env->icount_decr.u16.low + tb->icount;
3154 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3155 /* Calculate how many instructions had been executed before the fault
3156 occurred. */
3157 n = n - env->icount_decr.u16.low;
3158 /* Generate a new TB ending on the I/O insn. */
3159 n++;
3160 /* On MIPS and SH, delay slot instructions can only be restarted if
3161 they were already the first instruction in the TB. If this is not
3162 the first instruction in a TB then re-execute the preceding
3163 branch. */
3164 #if defined(TARGET_MIPS)
3165 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3166 env->active_tc.PC -= 4;
3167 env->icount_decr.u16.low++;
3168 env->hflags &= ~MIPS_HFLAG_BMASK;
3169 }
3170 #elif defined(TARGET_SH4)
3171 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3172 && n > 1) {
3173 env->pc -= 2;
3174 env->icount_decr.u16.low++;
3175 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3176 }
3177 #endif
3178 /* This should never happen. */
3179 if (n > CF_COUNT_MASK)
3180 cpu_abort(env, "TB too big during recompile");
3181
3182 cflags = n | CF_LAST_IO;
3183 pc = tb->pc;
3184 cs_base = tb->cs_base;
3185 flags = tb->flags;
3186 tb_phys_invalidate(tb, -1);
3187 /* FIXME: In theory this could raise an exception. In practice
3188 we have already translated the block once so it's probably ok. */
3189 tb_gen_code(env, pc, cs_base, flags, cflags);
3190 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3191 the first in the TB) then we end up generating a whole new TB and
3192 repeating the fault, which is horribly inefficient.
3193 Better would be to execute just this insn uncached, or generate a
3194 second new TB. */
3195 cpu_resume_from_signal(env, NULL);
3196 }
3197
3198 void dump_exec_info(FILE *f,
3199 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3200 {
3201 int i, target_code_size, max_target_code_size;
3202 int direct_jmp_count, direct_jmp2_count, cross_page;
3203 TranslationBlock *tb;
3204
3205 target_code_size = 0;
3206 max_target_code_size = 0;
3207 cross_page = 0;
3208 direct_jmp_count = 0;
3209 direct_jmp2_count = 0;
3210 for(i = 0; i < nb_tbs; i++) {
3211 tb = &tbs[i];
3212 target_code_size += tb->size;
3213 if (tb->size > max_target_code_size)
3214 max_target_code_size = tb->size;
3215 if (tb->page_addr[1] != -1)
3216 cross_page++;
3217 if (tb->tb_next_offset[0] != 0xffff) {
3218 direct_jmp_count++;
3219 if (tb->tb_next_offset[1] != 0xffff) {
3220 direct_jmp2_count++;
3221 }
3222 }
3223 }
3224 /* XXX: avoid using doubles ? */
3225 cpu_fprintf(f, "Translation buffer state:\n");
3226 cpu_fprintf(f, "gen code size %ld/%ld\n",
3227 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3228 cpu_fprintf(f, "TB count %d/%d\n",
3229 nb_tbs, code_gen_max_blocks);
3230 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3231 nb_tbs ? target_code_size / nb_tbs : 0,
3232 max_target_code_size);
3233 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3234 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3235 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3236 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3237 cross_page,
3238 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3239 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3240 direct_jmp_count,
3241 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3242 direct_jmp2_count,
3243 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3244 cpu_fprintf(f, "\nStatistics:\n");
3245 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3246 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3247 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3248 tcg_dump_info(f, cpu_fprintf);
3249 }
3250
3251 #if !defined(CONFIG_USER_ONLY)
3252
3253 #define MMUSUFFIX _cmmu
3254 #define GETPC() NULL
3255 #define env cpu_single_env
3256 #define SOFTMMU_CODE_ACCESS
3257
3258 #define SHIFT 0
3259 #include "softmmu_template.h"
3260
3261 #define SHIFT 1
3262 #include "softmmu_template.h"
3263
3264 #define SHIFT 2
3265 #include "softmmu_template.h"
3266
3267 #define SHIFT 3
3268 #include "softmmu_template.h"
3269
3270 #undef env
3271
3272 #endif