]> git.proxmox.com Git - qemu.git/blob - exec.c
KVM: Rework VCPU state writeback API
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdarg.h>
29 #include <string.h>
30 #include <errno.h>
31 #include <unistd.h>
32 #include <inttypes.h>
33
34 #include "cpu.h"
35 #include "exec-all.h"
36 #include "qemu-common.h"
37 #include "tcg.h"
38 #include "hw/hw.h"
39 #include "osdep.h"
40 #include "kvm.h"
41 #if defined(CONFIG_USER_ONLY)
42 #include <qemu.h>
43 #include <signal.h>
44 #endif
45
46 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_FLUSH
48 //#define DEBUG_TLB
49 //#define DEBUG_UNASSIGNED
50
51 /* make various TB consistency checks */
52 //#define DEBUG_TB_CHECK
53 //#define DEBUG_TLB_CHECK
54
55 //#define DEBUG_IOPORT
56 //#define DEBUG_SUBPAGE
57
58 #if !defined(CONFIG_USER_ONLY)
59 /* TB consistency checks only implemented for usermode emulation. */
60 #undef DEBUG_TB_CHECK
61 #endif
62
63 #define SMC_BITMAP_USE_THRESHOLD 10
64
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_SPARC)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 36
69 #elif defined(TARGET_ALPHA)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 42
71 #define TARGET_VIRT_ADDR_SPACE_BITS 42
72 #elif defined(TARGET_PPC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_X86_64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_I386)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
78 #else
79 #define TARGET_PHYS_ADDR_SPACE_BITS 32
80 #endif
81
82 static TranslationBlock *tbs;
83 int code_gen_max_blocks;
84 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 static int nb_tbs;
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
96 #elif defined(_WIN32)
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
100 #else
101 #define code_gen_section \
102 __attribute__((aligned (32)))
103 #endif
104
105 uint8_t code_gen_prologue[1024] code_gen_section;
106 static uint8_t *code_gen_buffer;
107 static unsigned long code_gen_buffer_size;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size;
110 uint8_t *code_gen_ptr;
111
112 #if !defined(CONFIG_USER_ONLY)
113 int phys_ram_fd;
114 uint8_t *phys_ram_dirty;
115 static int in_migration;
116
117 typedef struct RAMBlock {
118 uint8_t *host;
119 ram_addr_t offset;
120 ram_addr_t length;
121 struct RAMBlock *next;
122 } RAMBlock;
123
124 static RAMBlock *ram_blocks;
125 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
126 then we can no longer assume contiguous ram offsets, and external uses
127 of this variable will break. */
128 ram_addr_t last_ram_offset;
129 #endif
130
131 CPUState *first_cpu;
132 /* current CPU in the current thread. It is only valid inside
133 cpu_exec() */
134 CPUState *cpu_single_env;
135 /* 0 = Do not count executed instructions.
136 1 = Precise instruction counting.
137 2 = Adaptive rate instruction counting. */
138 int use_icount = 0;
139 /* Current instruction counter. While executing translated code this may
140 include some instructions that have not yet been executed. */
141 int64_t qemu_icount;
142
143 typedef struct PageDesc {
144 /* list of TBs intersecting this ram page */
145 TranslationBlock *first_tb;
146 /* in order to optimize self modifying code, we count the number
147 of lookups we do to a given page to use a bitmap */
148 unsigned int code_write_count;
149 uint8_t *code_bitmap;
150 #if defined(CONFIG_USER_ONLY)
151 unsigned long flags;
152 #endif
153 } PageDesc;
154
155 typedef struct PhysPageDesc {
156 /* offset in host memory of the page + io_index in the low bits */
157 ram_addr_t phys_offset;
158 ram_addr_t region_offset;
159 } PhysPageDesc;
160
161 #define L2_BITS 10
162 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163 /* XXX: this is a temporary hack for alpha target.
164 * In the future, this is to be replaced by a multi-level table
165 * to actually be able to handle the complete 64 bits address space.
166 */
167 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168 #else
169 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
170 #endif
171
172 #define L1_SIZE (1 << L1_BITS)
173 #define L2_SIZE (1 << L2_BITS)
174
175 unsigned long qemu_real_host_page_size;
176 unsigned long qemu_host_page_bits;
177 unsigned long qemu_host_page_size;
178 unsigned long qemu_host_page_mask;
179
180 /* XXX: for system emulation, it could just be an array */
181 static PageDesc *l1_map[L1_SIZE];
182
183 #if !defined(CONFIG_USER_ONLY)
184 static PhysPageDesc **l1_phys_map;
185
186 static void io_mem_init(void);
187
188 /* io memory support */
189 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
190 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
191 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
192 static char io_mem_used[IO_MEM_NB_ENTRIES];
193 static int io_mem_watch;
194 #endif
195
196 /* log support */
197 #ifdef WIN32
198 static const char *logfilename = "qemu.log";
199 #else
200 static const char *logfilename = "/tmp/qemu.log";
201 #endif
202 FILE *logfile;
203 int loglevel;
204 static int log_append = 0;
205
206 /* statistics */
207 static int tlb_flush_count;
208 static int tb_flush_count;
209 static int tb_phys_invalidate_count;
210
211 #ifdef _WIN32
212 static void map_exec(void *addr, long size)
213 {
214 DWORD old_protect;
215 VirtualProtect(addr, size,
216 PAGE_EXECUTE_READWRITE, &old_protect);
217
218 }
219 #else
220 static void map_exec(void *addr, long size)
221 {
222 unsigned long start, end, page_size;
223
224 page_size = getpagesize();
225 start = (unsigned long)addr;
226 start &= ~(page_size - 1);
227
228 end = (unsigned long)addr + size;
229 end += page_size - 1;
230 end &= ~(page_size - 1);
231
232 mprotect((void *)start, end - start,
233 PROT_READ | PROT_WRITE | PROT_EXEC);
234 }
235 #endif
236
237 static void page_init(void)
238 {
239 /* NOTE: we can always suppose that qemu_host_page_size >=
240 TARGET_PAGE_SIZE */
241 #ifdef _WIN32
242 {
243 SYSTEM_INFO system_info;
244
245 GetSystemInfo(&system_info);
246 qemu_real_host_page_size = system_info.dwPageSize;
247 }
248 #else
249 qemu_real_host_page_size = getpagesize();
250 #endif
251 if (qemu_host_page_size == 0)
252 qemu_host_page_size = qemu_real_host_page_size;
253 if (qemu_host_page_size < TARGET_PAGE_SIZE)
254 qemu_host_page_size = TARGET_PAGE_SIZE;
255 qemu_host_page_bits = 0;
256 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
257 qemu_host_page_bits++;
258 qemu_host_page_mask = ~(qemu_host_page_size - 1);
259 #if !defined(CONFIG_USER_ONLY)
260 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
262 #endif
263
264 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
265 {
266 long long startaddr, endaddr;
267 FILE *f;
268 int n;
269
270 mmap_lock();
271 last_brk = (unsigned long)sbrk(0);
272 f = fopen("/proc/self/maps", "r");
273 if (f) {
274 do {
275 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
276 if (n == 2) {
277 startaddr = MIN(startaddr,
278 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
279 endaddr = MIN(endaddr,
280 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
281 page_set_flags(startaddr & TARGET_PAGE_MASK,
282 TARGET_PAGE_ALIGN(endaddr),
283 PAGE_RESERVED);
284 }
285 } while (!feof(f));
286 fclose(f);
287 }
288 mmap_unlock();
289 }
290 #endif
291 }
292
293 static inline PageDesc **page_l1_map(target_ulong index)
294 {
295 #if TARGET_LONG_BITS > 32
296 /* Host memory outside guest VM. For 32-bit targets we have already
297 excluded high addresses. */
298 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
299 return NULL;
300 #endif
301 return &l1_map[index >> L2_BITS];
302 }
303
304 static inline PageDesc *page_find_alloc(target_ulong index)
305 {
306 PageDesc **lp, *p;
307 lp = page_l1_map(index);
308 if (!lp)
309 return NULL;
310
311 p = *lp;
312 if (!p) {
313 /* allocate if not found */
314 #if defined(CONFIG_USER_ONLY)
315 size_t len = sizeof(PageDesc) * L2_SIZE;
316 /* Don't use qemu_malloc because it may recurse. */
317 p = mmap(NULL, len, PROT_READ | PROT_WRITE,
318 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
319 *lp = p;
320 if (h2g_valid(p)) {
321 unsigned long addr = h2g(p);
322 page_set_flags(addr & TARGET_PAGE_MASK,
323 TARGET_PAGE_ALIGN(addr + len),
324 PAGE_RESERVED);
325 }
326 #else
327 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
328 *lp = p;
329 #endif
330 }
331 return p + (index & (L2_SIZE - 1));
332 }
333
334 static inline PageDesc *page_find(target_ulong index)
335 {
336 PageDesc **lp, *p;
337 lp = page_l1_map(index);
338 if (!lp)
339 return NULL;
340
341 p = *lp;
342 if (!p) {
343 return NULL;
344 }
345 return p + (index & (L2_SIZE - 1));
346 }
347
348 #if !defined(CONFIG_USER_ONLY)
349 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
350 {
351 void **lp, **p;
352 PhysPageDesc *pd;
353
354 p = (void **)l1_phys_map;
355 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
356
357 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359 #endif
360 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
361 p = *lp;
362 if (!p) {
363 /* allocate if not found */
364 if (!alloc)
365 return NULL;
366 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
367 memset(p, 0, sizeof(void *) * L1_SIZE);
368 *lp = p;
369 }
370 #endif
371 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
372 pd = *lp;
373 if (!pd) {
374 int i;
375 /* allocate if not found */
376 if (!alloc)
377 return NULL;
378 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
379 *lp = pd;
380 for (i = 0; i < L2_SIZE; i++) {
381 pd[i].phys_offset = IO_MEM_UNASSIGNED;
382 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
383 }
384 }
385 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
386 }
387
388 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
389 {
390 return phys_page_find_alloc(index, 0);
391 }
392
393 static void tlb_protect_code(ram_addr_t ram_addr);
394 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
395 target_ulong vaddr);
396 #define mmap_lock() do { } while(0)
397 #define mmap_unlock() do { } while(0)
398 #endif
399
400 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
401
402 #if defined(CONFIG_USER_ONLY)
403 /* Currently it is not recommended to allocate big chunks of data in
404 user mode. It will change when a dedicated libc will be used */
405 #define USE_STATIC_CODE_GEN_BUFFER
406 #endif
407
408 #ifdef USE_STATIC_CODE_GEN_BUFFER
409 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
410 #endif
411
412 static void code_gen_alloc(unsigned long tb_size)
413 {
414 #ifdef USE_STATIC_CODE_GEN_BUFFER
415 code_gen_buffer = static_code_gen_buffer;
416 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
417 map_exec(code_gen_buffer, code_gen_buffer_size);
418 #else
419 code_gen_buffer_size = tb_size;
420 if (code_gen_buffer_size == 0) {
421 #if defined(CONFIG_USER_ONLY)
422 /* in user mode, phys_ram_size is not meaningful */
423 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
424 #else
425 /* XXX: needs adjustments */
426 code_gen_buffer_size = (unsigned long)(ram_size / 4);
427 #endif
428 }
429 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
430 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
431 /* The code gen buffer location may have constraints depending on
432 the host cpu and OS */
433 #if defined(__linux__)
434 {
435 int flags;
436 void *start = NULL;
437
438 flags = MAP_PRIVATE | MAP_ANONYMOUS;
439 #if defined(__x86_64__)
440 flags |= MAP_32BIT;
441 /* Cannot map more than that */
442 if (code_gen_buffer_size > (800 * 1024 * 1024))
443 code_gen_buffer_size = (800 * 1024 * 1024);
444 #elif defined(__sparc_v9__)
445 // Map the buffer below 2G, so we can use direct calls and branches
446 flags |= MAP_FIXED;
447 start = (void *) 0x60000000UL;
448 if (code_gen_buffer_size > (512 * 1024 * 1024))
449 code_gen_buffer_size = (512 * 1024 * 1024);
450 #elif defined(__arm__)
451 /* Map the buffer below 32M, so we can use direct calls and branches */
452 flags |= MAP_FIXED;
453 start = (void *) 0x01000000UL;
454 if (code_gen_buffer_size > 16 * 1024 * 1024)
455 code_gen_buffer_size = 16 * 1024 * 1024;
456 #endif
457 code_gen_buffer = mmap(start, code_gen_buffer_size,
458 PROT_WRITE | PROT_READ | PROT_EXEC,
459 flags, -1, 0);
460 if (code_gen_buffer == MAP_FAILED) {
461 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
462 exit(1);
463 }
464 }
465 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
466 {
467 int flags;
468 void *addr = NULL;
469 flags = MAP_PRIVATE | MAP_ANONYMOUS;
470 #if defined(__x86_64__)
471 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
472 * 0x40000000 is free */
473 flags |= MAP_FIXED;
474 addr = (void *)0x40000000;
475 /* Cannot map more than that */
476 if (code_gen_buffer_size > (800 * 1024 * 1024))
477 code_gen_buffer_size = (800 * 1024 * 1024);
478 #endif
479 code_gen_buffer = mmap(addr, code_gen_buffer_size,
480 PROT_WRITE | PROT_READ | PROT_EXEC,
481 flags, -1, 0);
482 if (code_gen_buffer == MAP_FAILED) {
483 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
484 exit(1);
485 }
486 }
487 #else
488 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
489 map_exec(code_gen_buffer, code_gen_buffer_size);
490 #endif
491 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
492 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
493 code_gen_buffer_max_size = code_gen_buffer_size -
494 code_gen_max_block_size();
495 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
496 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
497 }
498
499 /* Must be called before using the QEMU cpus. 'tb_size' is the size
500 (in bytes) allocated to the translation buffer. Zero means default
501 size. */
502 void cpu_exec_init_all(unsigned long tb_size)
503 {
504 cpu_gen_init();
505 code_gen_alloc(tb_size);
506 code_gen_ptr = code_gen_buffer;
507 page_init();
508 #if !defined(CONFIG_USER_ONLY)
509 io_mem_init();
510 #endif
511 }
512
513 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
514
515 static int cpu_common_post_load(void *opaque, int version_id)
516 {
517 CPUState *env = opaque;
518
519 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
520 version_id is increased. */
521 env->interrupt_request &= ~0x01;
522 tlb_flush(env, 1);
523
524 return 0;
525 }
526
527 static const VMStateDescription vmstate_cpu_common = {
528 .name = "cpu_common",
529 .version_id = 1,
530 .minimum_version_id = 1,
531 .minimum_version_id_old = 1,
532 .post_load = cpu_common_post_load,
533 .fields = (VMStateField []) {
534 VMSTATE_UINT32(halted, CPUState),
535 VMSTATE_UINT32(interrupt_request, CPUState),
536 VMSTATE_END_OF_LIST()
537 }
538 };
539 #endif
540
541 CPUState *qemu_get_cpu(int cpu)
542 {
543 CPUState *env = first_cpu;
544
545 while (env) {
546 if (env->cpu_index == cpu)
547 break;
548 env = env->next_cpu;
549 }
550
551 return env;
552 }
553
554 void cpu_exec_init(CPUState *env)
555 {
556 CPUState **penv;
557 int cpu_index;
558
559 #if defined(CONFIG_USER_ONLY)
560 cpu_list_lock();
561 #endif
562 env->next_cpu = NULL;
563 penv = &first_cpu;
564 cpu_index = 0;
565 while (*penv != NULL) {
566 penv = &(*penv)->next_cpu;
567 cpu_index++;
568 }
569 env->cpu_index = cpu_index;
570 env->numa_node = 0;
571 QTAILQ_INIT(&env->breakpoints);
572 QTAILQ_INIT(&env->watchpoints);
573 *penv = env;
574 #if defined(CONFIG_USER_ONLY)
575 cpu_list_unlock();
576 #endif
577 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
578 vmstate_register(cpu_index, &vmstate_cpu_common, env);
579 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
580 cpu_save, cpu_load, env);
581 #endif
582 }
583
584 static inline void invalidate_page_bitmap(PageDesc *p)
585 {
586 if (p->code_bitmap) {
587 qemu_free(p->code_bitmap);
588 p->code_bitmap = NULL;
589 }
590 p->code_write_count = 0;
591 }
592
593 /* set to NULL all the 'first_tb' fields in all PageDescs */
594 static void page_flush_tb(void)
595 {
596 int i, j;
597 PageDesc *p;
598
599 for(i = 0; i < L1_SIZE; i++) {
600 p = l1_map[i];
601 if (p) {
602 for(j = 0; j < L2_SIZE; j++) {
603 p->first_tb = NULL;
604 invalidate_page_bitmap(p);
605 p++;
606 }
607 }
608 }
609 }
610
611 /* flush all the translation blocks */
612 /* XXX: tb_flush is currently not thread safe */
613 void tb_flush(CPUState *env1)
614 {
615 CPUState *env;
616 #if defined(DEBUG_FLUSH)
617 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
618 (unsigned long)(code_gen_ptr - code_gen_buffer),
619 nb_tbs, nb_tbs > 0 ?
620 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
621 #endif
622 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
623 cpu_abort(env1, "Internal error: code buffer overflow\n");
624
625 nb_tbs = 0;
626
627 for(env = first_cpu; env != NULL; env = env->next_cpu) {
628 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
629 }
630
631 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
632 page_flush_tb();
633
634 code_gen_ptr = code_gen_buffer;
635 /* XXX: flush processor icache at this point if cache flush is
636 expensive */
637 tb_flush_count++;
638 }
639
640 #ifdef DEBUG_TB_CHECK
641
642 static void tb_invalidate_check(target_ulong address)
643 {
644 TranslationBlock *tb;
645 int i;
646 address &= TARGET_PAGE_MASK;
647 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
648 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
649 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
650 address >= tb->pc + tb->size)) {
651 printf("ERROR invalidate: address=" TARGET_FMT_lx
652 " PC=%08lx size=%04x\n",
653 address, (long)tb->pc, tb->size);
654 }
655 }
656 }
657 }
658
659 /* verify that all the pages have correct rights for code */
660 static void tb_page_check(void)
661 {
662 TranslationBlock *tb;
663 int i, flags1, flags2;
664
665 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
666 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
667 flags1 = page_get_flags(tb->pc);
668 flags2 = page_get_flags(tb->pc + tb->size - 1);
669 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
670 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
671 (long)tb->pc, tb->size, flags1, flags2);
672 }
673 }
674 }
675 }
676
677 #endif
678
679 /* invalidate one TB */
680 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
681 int next_offset)
682 {
683 TranslationBlock *tb1;
684 for(;;) {
685 tb1 = *ptb;
686 if (tb1 == tb) {
687 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
688 break;
689 }
690 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
691 }
692 }
693
694 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
695 {
696 TranslationBlock *tb1;
697 unsigned int n1;
698
699 for(;;) {
700 tb1 = *ptb;
701 n1 = (long)tb1 & 3;
702 tb1 = (TranslationBlock *)((long)tb1 & ~3);
703 if (tb1 == tb) {
704 *ptb = tb1->page_next[n1];
705 break;
706 }
707 ptb = &tb1->page_next[n1];
708 }
709 }
710
711 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
712 {
713 TranslationBlock *tb1, **ptb;
714 unsigned int n1;
715
716 ptb = &tb->jmp_next[n];
717 tb1 = *ptb;
718 if (tb1) {
719 /* find tb(n) in circular list */
720 for(;;) {
721 tb1 = *ptb;
722 n1 = (long)tb1 & 3;
723 tb1 = (TranslationBlock *)((long)tb1 & ~3);
724 if (n1 == n && tb1 == tb)
725 break;
726 if (n1 == 2) {
727 ptb = &tb1->jmp_first;
728 } else {
729 ptb = &tb1->jmp_next[n1];
730 }
731 }
732 /* now we can suppress tb(n) from the list */
733 *ptb = tb->jmp_next[n];
734
735 tb->jmp_next[n] = NULL;
736 }
737 }
738
739 /* reset the jump entry 'n' of a TB so that it is not chained to
740 another TB */
741 static inline void tb_reset_jump(TranslationBlock *tb, int n)
742 {
743 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
744 }
745
746 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
747 {
748 CPUState *env;
749 PageDesc *p;
750 unsigned int h, n1;
751 target_phys_addr_t phys_pc;
752 TranslationBlock *tb1, *tb2;
753
754 /* remove the TB from the hash list */
755 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
756 h = tb_phys_hash_func(phys_pc);
757 tb_remove(&tb_phys_hash[h], tb,
758 offsetof(TranslationBlock, phys_hash_next));
759
760 /* remove the TB from the page list */
761 if (tb->page_addr[0] != page_addr) {
762 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
763 tb_page_remove(&p->first_tb, tb);
764 invalidate_page_bitmap(p);
765 }
766 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
767 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
768 tb_page_remove(&p->first_tb, tb);
769 invalidate_page_bitmap(p);
770 }
771
772 tb_invalidated_flag = 1;
773
774 /* remove the TB from the hash list */
775 h = tb_jmp_cache_hash_func(tb->pc);
776 for(env = first_cpu; env != NULL; env = env->next_cpu) {
777 if (env->tb_jmp_cache[h] == tb)
778 env->tb_jmp_cache[h] = NULL;
779 }
780
781 /* suppress this TB from the two jump lists */
782 tb_jmp_remove(tb, 0);
783 tb_jmp_remove(tb, 1);
784
785 /* suppress any remaining jumps to this TB */
786 tb1 = tb->jmp_first;
787 for(;;) {
788 n1 = (long)tb1 & 3;
789 if (n1 == 2)
790 break;
791 tb1 = (TranslationBlock *)((long)tb1 & ~3);
792 tb2 = tb1->jmp_next[n1];
793 tb_reset_jump(tb1, n1);
794 tb1->jmp_next[n1] = NULL;
795 tb1 = tb2;
796 }
797 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
798
799 tb_phys_invalidate_count++;
800 }
801
802 static inline void set_bits(uint8_t *tab, int start, int len)
803 {
804 int end, mask, end1;
805
806 end = start + len;
807 tab += start >> 3;
808 mask = 0xff << (start & 7);
809 if ((start & ~7) == (end & ~7)) {
810 if (start < end) {
811 mask &= ~(0xff << (end & 7));
812 *tab |= mask;
813 }
814 } else {
815 *tab++ |= mask;
816 start = (start + 8) & ~7;
817 end1 = end & ~7;
818 while (start < end1) {
819 *tab++ = 0xff;
820 start += 8;
821 }
822 if (start < end) {
823 mask = ~(0xff << (end & 7));
824 *tab |= mask;
825 }
826 }
827 }
828
829 static void build_page_bitmap(PageDesc *p)
830 {
831 int n, tb_start, tb_end;
832 TranslationBlock *tb;
833
834 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
835
836 tb = p->first_tb;
837 while (tb != NULL) {
838 n = (long)tb & 3;
839 tb = (TranslationBlock *)((long)tb & ~3);
840 /* NOTE: this is subtle as a TB may span two physical pages */
841 if (n == 0) {
842 /* NOTE: tb_end may be after the end of the page, but
843 it is not a problem */
844 tb_start = tb->pc & ~TARGET_PAGE_MASK;
845 tb_end = tb_start + tb->size;
846 if (tb_end > TARGET_PAGE_SIZE)
847 tb_end = TARGET_PAGE_SIZE;
848 } else {
849 tb_start = 0;
850 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
851 }
852 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
853 tb = tb->page_next[n];
854 }
855 }
856
857 TranslationBlock *tb_gen_code(CPUState *env,
858 target_ulong pc, target_ulong cs_base,
859 int flags, int cflags)
860 {
861 TranslationBlock *tb;
862 uint8_t *tc_ptr;
863 target_ulong phys_pc, phys_page2, virt_page2;
864 int code_gen_size;
865
866 phys_pc = get_phys_addr_code(env, pc);
867 tb = tb_alloc(pc);
868 if (!tb) {
869 /* flush must be done */
870 tb_flush(env);
871 /* cannot fail at this point */
872 tb = tb_alloc(pc);
873 /* Don't forget to invalidate previous TB info. */
874 tb_invalidated_flag = 1;
875 }
876 tc_ptr = code_gen_ptr;
877 tb->tc_ptr = tc_ptr;
878 tb->cs_base = cs_base;
879 tb->flags = flags;
880 tb->cflags = cflags;
881 cpu_gen_code(env, tb, &code_gen_size);
882 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
883
884 /* check next page if needed */
885 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
886 phys_page2 = -1;
887 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
888 phys_page2 = get_phys_addr_code(env, virt_page2);
889 }
890 tb_link_phys(tb, phys_pc, phys_page2);
891 return tb;
892 }
893
894 /* invalidate all TBs which intersect with the target physical page
895 starting in range [start;end[. NOTE: start and end must refer to
896 the same physical page. 'is_cpu_write_access' should be true if called
897 from a real cpu write access: the virtual CPU will exit the current
898 TB if code is modified inside this TB. */
899 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
900 int is_cpu_write_access)
901 {
902 TranslationBlock *tb, *tb_next, *saved_tb;
903 CPUState *env = cpu_single_env;
904 target_ulong tb_start, tb_end;
905 PageDesc *p;
906 int n;
907 #ifdef TARGET_HAS_PRECISE_SMC
908 int current_tb_not_found = is_cpu_write_access;
909 TranslationBlock *current_tb = NULL;
910 int current_tb_modified = 0;
911 target_ulong current_pc = 0;
912 target_ulong current_cs_base = 0;
913 int current_flags = 0;
914 #endif /* TARGET_HAS_PRECISE_SMC */
915
916 p = page_find(start >> TARGET_PAGE_BITS);
917 if (!p)
918 return;
919 if (!p->code_bitmap &&
920 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
921 is_cpu_write_access) {
922 /* build code bitmap */
923 build_page_bitmap(p);
924 }
925
926 /* we remove all the TBs in the range [start, end[ */
927 /* XXX: see if in some cases it could be faster to invalidate all the code */
928 tb = p->first_tb;
929 while (tb != NULL) {
930 n = (long)tb & 3;
931 tb = (TranslationBlock *)((long)tb & ~3);
932 tb_next = tb->page_next[n];
933 /* NOTE: this is subtle as a TB may span two physical pages */
934 if (n == 0) {
935 /* NOTE: tb_end may be after the end of the page, but
936 it is not a problem */
937 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
938 tb_end = tb_start + tb->size;
939 } else {
940 tb_start = tb->page_addr[1];
941 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
942 }
943 if (!(tb_end <= start || tb_start >= end)) {
944 #ifdef TARGET_HAS_PRECISE_SMC
945 if (current_tb_not_found) {
946 current_tb_not_found = 0;
947 current_tb = NULL;
948 if (env->mem_io_pc) {
949 /* now we have a real cpu fault */
950 current_tb = tb_find_pc(env->mem_io_pc);
951 }
952 }
953 if (current_tb == tb &&
954 (current_tb->cflags & CF_COUNT_MASK) != 1) {
955 /* If we are modifying the current TB, we must stop
956 its execution. We could be more precise by checking
957 that the modification is after the current PC, but it
958 would require a specialized function to partially
959 restore the CPU state */
960
961 current_tb_modified = 1;
962 cpu_restore_state(current_tb, env,
963 env->mem_io_pc, NULL);
964 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
965 &current_flags);
966 }
967 #endif /* TARGET_HAS_PRECISE_SMC */
968 /* we need to do that to handle the case where a signal
969 occurs while doing tb_phys_invalidate() */
970 saved_tb = NULL;
971 if (env) {
972 saved_tb = env->current_tb;
973 env->current_tb = NULL;
974 }
975 tb_phys_invalidate(tb, -1);
976 if (env) {
977 env->current_tb = saved_tb;
978 if (env->interrupt_request && env->current_tb)
979 cpu_interrupt(env, env->interrupt_request);
980 }
981 }
982 tb = tb_next;
983 }
984 #if !defined(CONFIG_USER_ONLY)
985 /* if no code remaining, no need to continue to use slow writes */
986 if (!p->first_tb) {
987 invalidate_page_bitmap(p);
988 if (is_cpu_write_access) {
989 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
990 }
991 }
992 #endif
993 #ifdef TARGET_HAS_PRECISE_SMC
994 if (current_tb_modified) {
995 /* we generate a block containing just the instruction
996 modifying the memory. It will ensure that it cannot modify
997 itself */
998 env->current_tb = NULL;
999 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1000 cpu_resume_from_signal(env, NULL);
1001 }
1002 #endif
1003 }
1004
1005 /* len must be <= 8 and start must be a multiple of len */
1006 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1007 {
1008 PageDesc *p;
1009 int offset, b;
1010 #if 0
1011 if (1) {
1012 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1013 cpu_single_env->mem_io_vaddr, len,
1014 cpu_single_env->eip,
1015 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1016 }
1017 #endif
1018 p = page_find(start >> TARGET_PAGE_BITS);
1019 if (!p)
1020 return;
1021 if (p->code_bitmap) {
1022 offset = start & ~TARGET_PAGE_MASK;
1023 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1024 if (b & ((1 << len) - 1))
1025 goto do_invalidate;
1026 } else {
1027 do_invalidate:
1028 tb_invalidate_phys_page_range(start, start + len, 1);
1029 }
1030 }
1031
1032 #if !defined(CONFIG_SOFTMMU)
1033 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1034 unsigned long pc, void *puc)
1035 {
1036 TranslationBlock *tb;
1037 PageDesc *p;
1038 int n;
1039 #ifdef TARGET_HAS_PRECISE_SMC
1040 TranslationBlock *current_tb = NULL;
1041 CPUState *env = cpu_single_env;
1042 int current_tb_modified = 0;
1043 target_ulong current_pc = 0;
1044 target_ulong current_cs_base = 0;
1045 int current_flags = 0;
1046 #endif
1047
1048 addr &= TARGET_PAGE_MASK;
1049 p = page_find(addr >> TARGET_PAGE_BITS);
1050 if (!p)
1051 return;
1052 tb = p->first_tb;
1053 #ifdef TARGET_HAS_PRECISE_SMC
1054 if (tb && pc != 0) {
1055 current_tb = tb_find_pc(pc);
1056 }
1057 #endif
1058 while (tb != NULL) {
1059 n = (long)tb & 3;
1060 tb = (TranslationBlock *)((long)tb & ~3);
1061 #ifdef TARGET_HAS_PRECISE_SMC
1062 if (current_tb == tb &&
1063 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1064 /* If we are modifying the current TB, we must stop
1065 its execution. We could be more precise by checking
1066 that the modification is after the current PC, but it
1067 would require a specialized function to partially
1068 restore the CPU state */
1069
1070 current_tb_modified = 1;
1071 cpu_restore_state(current_tb, env, pc, puc);
1072 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1073 &current_flags);
1074 }
1075 #endif /* TARGET_HAS_PRECISE_SMC */
1076 tb_phys_invalidate(tb, addr);
1077 tb = tb->page_next[n];
1078 }
1079 p->first_tb = NULL;
1080 #ifdef TARGET_HAS_PRECISE_SMC
1081 if (current_tb_modified) {
1082 /* we generate a block containing just the instruction
1083 modifying the memory. It will ensure that it cannot modify
1084 itself */
1085 env->current_tb = NULL;
1086 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1087 cpu_resume_from_signal(env, puc);
1088 }
1089 #endif
1090 }
1091 #endif
1092
1093 /* add the tb in the target page and protect it if necessary */
1094 static inline void tb_alloc_page(TranslationBlock *tb,
1095 unsigned int n, target_ulong page_addr)
1096 {
1097 PageDesc *p;
1098 TranslationBlock *last_first_tb;
1099
1100 tb->page_addr[n] = page_addr;
1101 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1102 tb->page_next[n] = p->first_tb;
1103 last_first_tb = p->first_tb;
1104 p->first_tb = (TranslationBlock *)((long)tb | n);
1105 invalidate_page_bitmap(p);
1106
1107 #if defined(TARGET_HAS_SMC) || 1
1108
1109 #if defined(CONFIG_USER_ONLY)
1110 if (p->flags & PAGE_WRITE) {
1111 target_ulong addr;
1112 PageDesc *p2;
1113 int prot;
1114
1115 /* force the host page as non writable (writes will have a
1116 page fault + mprotect overhead) */
1117 page_addr &= qemu_host_page_mask;
1118 prot = 0;
1119 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1120 addr += TARGET_PAGE_SIZE) {
1121
1122 p2 = page_find (addr >> TARGET_PAGE_BITS);
1123 if (!p2)
1124 continue;
1125 prot |= p2->flags;
1126 p2->flags &= ~PAGE_WRITE;
1127 page_get_flags(addr);
1128 }
1129 mprotect(g2h(page_addr), qemu_host_page_size,
1130 (prot & PAGE_BITS) & ~PAGE_WRITE);
1131 #ifdef DEBUG_TB_INVALIDATE
1132 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1133 page_addr);
1134 #endif
1135 }
1136 #else
1137 /* if some code is already present, then the pages are already
1138 protected. So we handle the case where only the first TB is
1139 allocated in a physical page */
1140 if (!last_first_tb) {
1141 tlb_protect_code(page_addr);
1142 }
1143 #endif
1144
1145 #endif /* TARGET_HAS_SMC */
1146 }
1147
1148 /* Allocate a new translation block. Flush the translation buffer if
1149 too many translation blocks or too much generated code. */
1150 TranslationBlock *tb_alloc(target_ulong pc)
1151 {
1152 TranslationBlock *tb;
1153
1154 if (nb_tbs >= code_gen_max_blocks ||
1155 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1156 return NULL;
1157 tb = &tbs[nb_tbs++];
1158 tb->pc = pc;
1159 tb->cflags = 0;
1160 return tb;
1161 }
1162
1163 void tb_free(TranslationBlock *tb)
1164 {
1165 /* In practice this is mostly used for single use temporary TB
1166 Ignore the hard cases and just back up if this TB happens to
1167 be the last one generated. */
1168 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1169 code_gen_ptr = tb->tc_ptr;
1170 nb_tbs--;
1171 }
1172 }
1173
1174 /* add a new TB and link it to the physical page tables. phys_page2 is
1175 (-1) to indicate that only one page contains the TB. */
1176 void tb_link_phys(TranslationBlock *tb,
1177 target_ulong phys_pc, target_ulong phys_page2)
1178 {
1179 unsigned int h;
1180 TranslationBlock **ptb;
1181
1182 /* Grab the mmap lock to stop another thread invalidating this TB
1183 before we are done. */
1184 mmap_lock();
1185 /* add in the physical hash table */
1186 h = tb_phys_hash_func(phys_pc);
1187 ptb = &tb_phys_hash[h];
1188 tb->phys_hash_next = *ptb;
1189 *ptb = tb;
1190
1191 /* add in the page list */
1192 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1193 if (phys_page2 != -1)
1194 tb_alloc_page(tb, 1, phys_page2);
1195 else
1196 tb->page_addr[1] = -1;
1197
1198 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1199 tb->jmp_next[0] = NULL;
1200 tb->jmp_next[1] = NULL;
1201
1202 /* init original jump addresses */
1203 if (tb->tb_next_offset[0] != 0xffff)
1204 tb_reset_jump(tb, 0);
1205 if (tb->tb_next_offset[1] != 0xffff)
1206 tb_reset_jump(tb, 1);
1207
1208 #ifdef DEBUG_TB_CHECK
1209 tb_page_check();
1210 #endif
1211 mmap_unlock();
1212 }
1213
1214 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1215 tb[1].tc_ptr. Return NULL if not found */
1216 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1217 {
1218 int m_min, m_max, m;
1219 unsigned long v;
1220 TranslationBlock *tb;
1221
1222 if (nb_tbs <= 0)
1223 return NULL;
1224 if (tc_ptr < (unsigned long)code_gen_buffer ||
1225 tc_ptr >= (unsigned long)code_gen_ptr)
1226 return NULL;
1227 /* binary search (cf Knuth) */
1228 m_min = 0;
1229 m_max = nb_tbs - 1;
1230 while (m_min <= m_max) {
1231 m = (m_min + m_max) >> 1;
1232 tb = &tbs[m];
1233 v = (unsigned long)tb->tc_ptr;
1234 if (v == tc_ptr)
1235 return tb;
1236 else if (tc_ptr < v) {
1237 m_max = m - 1;
1238 } else {
1239 m_min = m + 1;
1240 }
1241 }
1242 return &tbs[m_max];
1243 }
1244
1245 static void tb_reset_jump_recursive(TranslationBlock *tb);
1246
1247 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1248 {
1249 TranslationBlock *tb1, *tb_next, **ptb;
1250 unsigned int n1;
1251
1252 tb1 = tb->jmp_next[n];
1253 if (tb1 != NULL) {
1254 /* find head of list */
1255 for(;;) {
1256 n1 = (long)tb1 & 3;
1257 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1258 if (n1 == 2)
1259 break;
1260 tb1 = tb1->jmp_next[n1];
1261 }
1262 /* we are now sure now that tb jumps to tb1 */
1263 tb_next = tb1;
1264
1265 /* remove tb from the jmp_first list */
1266 ptb = &tb_next->jmp_first;
1267 for(;;) {
1268 tb1 = *ptb;
1269 n1 = (long)tb1 & 3;
1270 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1271 if (n1 == n && tb1 == tb)
1272 break;
1273 ptb = &tb1->jmp_next[n1];
1274 }
1275 *ptb = tb->jmp_next[n];
1276 tb->jmp_next[n] = NULL;
1277
1278 /* suppress the jump to next tb in generated code */
1279 tb_reset_jump(tb, n);
1280
1281 /* suppress jumps in the tb on which we could have jumped */
1282 tb_reset_jump_recursive(tb_next);
1283 }
1284 }
1285
1286 static void tb_reset_jump_recursive(TranslationBlock *tb)
1287 {
1288 tb_reset_jump_recursive2(tb, 0);
1289 tb_reset_jump_recursive2(tb, 1);
1290 }
1291
1292 #if defined(TARGET_HAS_ICE)
1293 #if defined(CONFIG_USER_ONLY)
1294 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1295 {
1296 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1297 }
1298 #else
1299 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1300 {
1301 target_phys_addr_t addr;
1302 target_ulong pd;
1303 ram_addr_t ram_addr;
1304 PhysPageDesc *p;
1305
1306 addr = cpu_get_phys_page_debug(env, pc);
1307 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1308 if (!p) {
1309 pd = IO_MEM_UNASSIGNED;
1310 } else {
1311 pd = p->phys_offset;
1312 }
1313 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1314 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1315 }
1316 #endif
1317 #endif /* TARGET_HAS_ICE */
1318
1319 #if defined(CONFIG_USER_ONLY)
1320 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1321
1322 {
1323 }
1324
1325 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1326 int flags, CPUWatchpoint **watchpoint)
1327 {
1328 return -ENOSYS;
1329 }
1330 #else
1331 /* Add a watchpoint. */
1332 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1333 int flags, CPUWatchpoint **watchpoint)
1334 {
1335 target_ulong len_mask = ~(len - 1);
1336 CPUWatchpoint *wp;
1337
1338 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1339 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1340 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1341 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1342 return -EINVAL;
1343 }
1344 wp = qemu_malloc(sizeof(*wp));
1345
1346 wp->vaddr = addr;
1347 wp->len_mask = len_mask;
1348 wp->flags = flags;
1349
1350 /* keep all GDB-injected watchpoints in front */
1351 if (flags & BP_GDB)
1352 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1353 else
1354 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1355
1356 tlb_flush_page(env, addr);
1357
1358 if (watchpoint)
1359 *watchpoint = wp;
1360 return 0;
1361 }
1362
1363 /* Remove a specific watchpoint. */
1364 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1365 int flags)
1366 {
1367 target_ulong len_mask = ~(len - 1);
1368 CPUWatchpoint *wp;
1369
1370 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1371 if (addr == wp->vaddr && len_mask == wp->len_mask
1372 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1373 cpu_watchpoint_remove_by_ref(env, wp);
1374 return 0;
1375 }
1376 }
1377 return -ENOENT;
1378 }
1379
1380 /* Remove a specific watchpoint by reference. */
1381 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1382 {
1383 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1384
1385 tlb_flush_page(env, watchpoint->vaddr);
1386
1387 qemu_free(watchpoint);
1388 }
1389
1390 /* Remove all matching watchpoints. */
1391 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1392 {
1393 CPUWatchpoint *wp, *next;
1394
1395 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1396 if (wp->flags & mask)
1397 cpu_watchpoint_remove_by_ref(env, wp);
1398 }
1399 }
1400 #endif
1401
1402 /* Add a breakpoint. */
1403 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1404 CPUBreakpoint **breakpoint)
1405 {
1406 #if defined(TARGET_HAS_ICE)
1407 CPUBreakpoint *bp;
1408
1409 bp = qemu_malloc(sizeof(*bp));
1410
1411 bp->pc = pc;
1412 bp->flags = flags;
1413
1414 /* keep all GDB-injected breakpoints in front */
1415 if (flags & BP_GDB)
1416 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1417 else
1418 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1419
1420 breakpoint_invalidate(env, pc);
1421
1422 if (breakpoint)
1423 *breakpoint = bp;
1424 return 0;
1425 #else
1426 return -ENOSYS;
1427 #endif
1428 }
1429
1430 /* Remove a specific breakpoint. */
1431 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1432 {
1433 #if defined(TARGET_HAS_ICE)
1434 CPUBreakpoint *bp;
1435
1436 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1437 if (bp->pc == pc && bp->flags == flags) {
1438 cpu_breakpoint_remove_by_ref(env, bp);
1439 return 0;
1440 }
1441 }
1442 return -ENOENT;
1443 #else
1444 return -ENOSYS;
1445 #endif
1446 }
1447
1448 /* Remove a specific breakpoint by reference. */
1449 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1450 {
1451 #if defined(TARGET_HAS_ICE)
1452 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1453
1454 breakpoint_invalidate(env, breakpoint->pc);
1455
1456 qemu_free(breakpoint);
1457 #endif
1458 }
1459
1460 /* Remove all matching breakpoints. */
1461 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1462 {
1463 #if defined(TARGET_HAS_ICE)
1464 CPUBreakpoint *bp, *next;
1465
1466 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1467 if (bp->flags & mask)
1468 cpu_breakpoint_remove_by_ref(env, bp);
1469 }
1470 #endif
1471 }
1472
1473 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1474 CPU loop after each instruction */
1475 void cpu_single_step(CPUState *env, int enabled)
1476 {
1477 #if defined(TARGET_HAS_ICE)
1478 if (env->singlestep_enabled != enabled) {
1479 env->singlestep_enabled = enabled;
1480 if (kvm_enabled())
1481 kvm_update_guest_debug(env, 0);
1482 else {
1483 /* must flush all the translated code to avoid inconsistencies */
1484 /* XXX: only flush what is necessary */
1485 tb_flush(env);
1486 }
1487 }
1488 #endif
1489 }
1490
1491 /* enable or disable low levels log */
1492 void cpu_set_log(int log_flags)
1493 {
1494 loglevel = log_flags;
1495 if (loglevel && !logfile) {
1496 logfile = fopen(logfilename, log_append ? "a" : "w");
1497 if (!logfile) {
1498 perror(logfilename);
1499 _exit(1);
1500 }
1501 #if !defined(CONFIG_SOFTMMU)
1502 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1503 {
1504 static char logfile_buf[4096];
1505 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1506 }
1507 #elif !defined(_WIN32)
1508 /* Win32 doesn't support line-buffering and requires size >= 2 */
1509 setvbuf(logfile, NULL, _IOLBF, 0);
1510 #endif
1511 log_append = 1;
1512 }
1513 if (!loglevel && logfile) {
1514 fclose(logfile);
1515 logfile = NULL;
1516 }
1517 }
1518
1519 void cpu_set_log_filename(const char *filename)
1520 {
1521 logfilename = strdup(filename);
1522 if (logfile) {
1523 fclose(logfile);
1524 logfile = NULL;
1525 }
1526 cpu_set_log(loglevel);
1527 }
1528
1529 static void cpu_unlink_tb(CPUState *env)
1530 {
1531 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1532 problem and hope the cpu will stop of its own accord. For userspace
1533 emulation this often isn't actually as bad as it sounds. Often
1534 signals are used primarily to interrupt blocking syscalls. */
1535 TranslationBlock *tb;
1536 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1537
1538 spin_lock(&interrupt_lock);
1539 tb = env->current_tb;
1540 /* if the cpu is currently executing code, we must unlink it and
1541 all the potentially executing TB */
1542 if (tb) {
1543 env->current_tb = NULL;
1544 tb_reset_jump_recursive(tb);
1545 }
1546 spin_unlock(&interrupt_lock);
1547 }
1548
1549 /* mask must never be zero, except for A20 change call */
1550 void cpu_interrupt(CPUState *env, int mask)
1551 {
1552 int old_mask;
1553
1554 old_mask = env->interrupt_request;
1555 env->interrupt_request |= mask;
1556
1557 #ifndef CONFIG_USER_ONLY
1558 /*
1559 * If called from iothread context, wake the target cpu in
1560 * case its halted.
1561 */
1562 if (!qemu_cpu_self(env)) {
1563 qemu_cpu_kick(env);
1564 return;
1565 }
1566 #endif
1567
1568 if (use_icount) {
1569 env->icount_decr.u16.high = 0xffff;
1570 #ifndef CONFIG_USER_ONLY
1571 if (!can_do_io(env)
1572 && (mask & ~old_mask) != 0) {
1573 cpu_abort(env, "Raised interrupt while not in I/O function");
1574 }
1575 #endif
1576 } else {
1577 cpu_unlink_tb(env);
1578 }
1579 }
1580
1581 void cpu_reset_interrupt(CPUState *env, int mask)
1582 {
1583 env->interrupt_request &= ~mask;
1584 }
1585
1586 void cpu_exit(CPUState *env)
1587 {
1588 env->exit_request = 1;
1589 cpu_unlink_tb(env);
1590 }
1591
1592 const CPULogItem cpu_log_items[] = {
1593 { CPU_LOG_TB_OUT_ASM, "out_asm",
1594 "show generated host assembly code for each compiled TB" },
1595 { CPU_LOG_TB_IN_ASM, "in_asm",
1596 "show target assembly code for each compiled TB" },
1597 { CPU_LOG_TB_OP, "op",
1598 "show micro ops for each compiled TB" },
1599 { CPU_LOG_TB_OP_OPT, "op_opt",
1600 "show micro ops "
1601 #ifdef TARGET_I386
1602 "before eflags optimization and "
1603 #endif
1604 "after liveness analysis" },
1605 { CPU_LOG_INT, "int",
1606 "show interrupts/exceptions in short format" },
1607 { CPU_LOG_EXEC, "exec",
1608 "show trace before each executed TB (lots of logs)" },
1609 { CPU_LOG_TB_CPU, "cpu",
1610 "show CPU state before block translation" },
1611 #ifdef TARGET_I386
1612 { CPU_LOG_PCALL, "pcall",
1613 "show protected mode far calls/returns/exceptions" },
1614 { CPU_LOG_RESET, "cpu_reset",
1615 "show CPU state before CPU resets" },
1616 #endif
1617 #ifdef DEBUG_IOPORT
1618 { CPU_LOG_IOPORT, "ioport",
1619 "show all i/o ports accesses" },
1620 #endif
1621 { 0, NULL, NULL },
1622 };
1623
1624 #ifndef CONFIG_USER_ONLY
1625 static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1626 = QLIST_HEAD_INITIALIZER(memory_client_list);
1627
1628 static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1629 ram_addr_t size,
1630 ram_addr_t phys_offset)
1631 {
1632 CPUPhysMemoryClient *client;
1633 QLIST_FOREACH(client, &memory_client_list, list) {
1634 client->set_memory(client, start_addr, size, phys_offset);
1635 }
1636 }
1637
1638 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1639 target_phys_addr_t end)
1640 {
1641 CPUPhysMemoryClient *client;
1642 QLIST_FOREACH(client, &memory_client_list, list) {
1643 int r = client->sync_dirty_bitmap(client, start, end);
1644 if (r < 0)
1645 return r;
1646 }
1647 return 0;
1648 }
1649
1650 static int cpu_notify_migration_log(int enable)
1651 {
1652 CPUPhysMemoryClient *client;
1653 QLIST_FOREACH(client, &memory_client_list, list) {
1654 int r = client->migration_log(client, enable);
1655 if (r < 0)
1656 return r;
1657 }
1658 return 0;
1659 }
1660
1661 static void phys_page_for_each_in_l1_map(PhysPageDesc **phys_map,
1662 CPUPhysMemoryClient *client)
1663 {
1664 PhysPageDesc *pd;
1665 int l1, l2;
1666
1667 for (l1 = 0; l1 < L1_SIZE; ++l1) {
1668 pd = phys_map[l1];
1669 if (!pd) {
1670 continue;
1671 }
1672 for (l2 = 0; l2 < L2_SIZE; ++l2) {
1673 if (pd[l2].phys_offset == IO_MEM_UNASSIGNED) {
1674 continue;
1675 }
1676 client->set_memory(client, pd[l2].region_offset,
1677 TARGET_PAGE_SIZE, pd[l2].phys_offset);
1678 }
1679 }
1680 }
1681
1682 static void phys_page_for_each(CPUPhysMemoryClient *client)
1683 {
1684 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
1685
1686 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
1687 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
1688 #endif
1689 void **phys_map = (void **)l1_phys_map;
1690 int l1;
1691 if (!l1_phys_map) {
1692 return;
1693 }
1694 for (l1 = 0; l1 < L1_SIZE; ++l1) {
1695 if (phys_map[l1]) {
1696 phys_page_for_each_in_l1_map(phys_map[l1], client);
1697 }
1698 }
1699 #else
1700 if (!l1_phys_map) {
1701 return;
1702 }
1703 phys_page_for_each_in_l1_map(l1_phys_map, client);
1704 #endif
1705 }
1706
1707 void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1708 {
1709 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1710 phys_page_for_each(client);
1711 }
1712
1713 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1714 {
1715 QLIST_REMOVE(client, list);
1716 }
1717 #endif
1718
1719 static int cmp1(const char *s1, int n, const char *s2)
1720 {
1721 if (strlen(s2) != n)
1722 return 0;
1723 return memcmp(s1, s2, n) == 0;
1724 }
1725
1726 /* takes a comma separated list of log masks. Return 0 if error. */
1727 int cpu_str_to_log_mask(const char *str)
1728 {
1729 const CPULogItem *item;
1730 int mask;
1731 const char *p, *p1;
1732
1733 p = str;
1734 mask = 0;
1735 for(;;) {
1736 p1 = strchr(p, ',');
1737 if (!p1)
1738 p1 = p + strlen(p);
1739 if(cmp1(p,p1-p,"all")) {
1740 for(item = cpu_log_items; item->mask != 0; item++) {
1741 mask |= item->mask;
1742 }
1743 } else {
1744 for(item = cpu_log_items; item->mask != 0; item++) {
1745 if (cmp1(p, p1 - p, item->name))
1746 goto found;
1747 }
1748 return 0;
1749 }
1750 found:
1751 mask |= item->mask;
1752 if (*p1 != ',')
1753 break;
1754 p = p1 + 1;
1755 }
1756 return mask;
1757 }
1758
1759 void cpu_abort(CPUState *env, const char *fmt, ...)
1760 {
1761 va_list ap;
1762 va_list ap2;
1763
1764 va_start(ap, fmt);
1765 va_copy(ap2, ap);
1766 fprintf(stderr, "qemu: fatal: ");
1767 vfprintf(stderr, fmt, ap);
1768 fprintf(stderr, "\n");
1769 #ifdef TARGET_I386
1770 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1771 #else
1772 cpu_dump_state(env, stderr, fprintf, 0);
1773 #endif
1774 if (qemu_log_enabled()) {
1775 qemu_log("qemu: fatal: ");
1776 qemu_log_vprintf(fmt, ap2);
1777 qemu_log("\n");
1778 #ifdef TARGET_I386
1779 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1780 #else
1781 log_cpu_state(env, 0);
1782 #endif
1783 qemu_log_flush();
1784 qemu_log_close();
1785 }
1786 va_end(ap2);
1787 va_end(ap);
1788 #if defined(CONFIG_USER_ONLY)
1789 {
1790 struct sigaction act;
1791 sigfillset(&act.sa_mask);
1792 act.sa_handler = SIG_DFL;
1793 sigaction(SIGABRT, &act, NULL);
1794 }
1795 #endif
1796 abort();
1797 }
1798
1799 CPUState *cpu_copy(CPUState *env)
1800 {
1801 CPUState *new_env = cpu_init(env->cpu_model_str);
1802 CPUState *next_cpu = new_env->next_cpu;
1803 int cpu_index = new_env->cpu_index;
1804 #if defined(TARGET_HAS_ICE)
1805 CPUBreakpoint *bp;
1806 CPUWatchpoint *wp;
1807 #endif
1808
1809 memcpy(new_env, env, sizeof(CPUState));
1810
1811 /* Preserve chaining and index. */
1812 new_env->next_cpu = next_cpu;
1813 new_env->cpu_index = cpu_index;
1814
1815 /* Clone all break/watchpoints.
1816 Note: Once we support ptrace with hw-debug register access, make sure
1817 BP_CPU break/watchpoints are handled correctly on clone. */
1818 QTAILQ_INIT(&env->breakpoints);
1819 QTAILQ_INIT(&env->watchpoints);
1820 #if defined(TARGET_HAS_ICE)
1821 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1822 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1823 }
1824 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1825 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1826 wp->flags, NULL);
1827 }
1828 #endif
1829
1830 return new_env;
1831 }
1832
1833 #if !defined(CONFIG_USER_ONLY)
1834
1835 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1836 {
1837 unsigned int i;
1838
1839 /* Discard jump cache entries for any tb which might potentially
1840 overlap the flushed page. */
1841 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1842 memset (&env->tb_jmp_cache[i], 0,
1843 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1844
1845 i = tb_jmp_cache_hash_page(addr);
1846 memset (&env->tb_jmp_cache[i], 0,
1847 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1848 }
1849
1850 static CPUTLBEntry s_cputlb_empty_entry = {
1851 .addr_read = -1,
1852 .addr_write = -1,
1853 .addr_code = -1,
1854 .addend = -1,
1855 };
1856
1857 /* NOTE: if flush_global is true, also flush global entries (not
1858 implemented yet) */
1859 void tlb_flush(CPUState *env, int flush_global)
1860 {
1861 int i;
1862
1863 #if defined(DEBUG_TLB)
1864 printf("tlb_flush:\n");
1865 #endif
1866 /* must reset current TB so that interrupts cannot modify the
1867 links while we are modifying them */
1868 env->current_tb = NULL;
1869
1870 for(i = 0; i < CPU_TLB_SIZE; i++) {
1871 int mmu_idx;
1872 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1873 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1874 }
1875 }
1876
1877 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1878
1879 tlb_flush_count++;
1880 }
1881
1882 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1883 {
1884 if (addr == (tlb_entry->addr_read &
1885 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1886 addr == (tlb_entry->addr_write &
1887 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1888 addr == (tlb_entry->addr_code &
1889 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1890 *tlb_entry = s_cputlb_empty_entry;
1891 }
1892 }
1893
1894 void tlb_flush_page(CPUState *env, target_ulong addr)
1895 {
1896 int i;
1897 int mmu_idx;
1898
1899 #if defined(DEBUG_TLB)
1900 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1901 #endif
1902 /* must reset current TB so that interrupts cannot modify the
1903 links while we are modifying them */
1904 env->current_tb = NULL;
1905
1906 addr &= TARGET_PAGE_MASK;
1907 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1908 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1909 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1910
1911 tlb_flush_jmp_cache(env, addr);
1912 }
1913
1914 /* update the TLBs so that writes to code in the virtual page 'addr'
1915 can be detected */
1916 static void tlb_protect_code(ram_addr_t ram_addr)
1917 {
1918 cpu_physical_memory_reset_dirty(ram_addr,
1919 ram_addr + TARGET_PAGE_SIZE,
1920 CODE_DIRTY_FLAG);
1921 }
1922
1923 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1924 tested for self modifying code */
1925 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1926 target_ulong vaddr)
1927 {
1928 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1929 }
1930
1931 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1932 unsigned long start, unsigned long length)
1933 {
1934 unsigned long addr;
1935 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1936 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1937 if ((addr - start) < length) {
1938 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1939 }
1940 }
1941 }
1942
1943 /* Note: start and end must be within the same ram block. */
1944 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1945 int dirty_flags)
1946 {
1947 CPUState *env;
1948 unsigned long length, start1;
1949 int i, mask, len;
1950 uint8_t *p;
1951
1952 start &= TARGET_PAGE_MASK;
1953 end = TARGET_PAGE_ALIGN(end);
1954
1955 length = end - start;
1956 if (length == 0)
1957 return;
1958 len = length >> TARGET_PAGE_BITS;
1959 mask = ~dirty_flags;
1960 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1961 for(i = 0; i < len; i++)
1962 p[i] &= mask;
1963
1964 /* we modify the TLB cache so that the dirty bit will be set again
1965 when accessing the range */
1966 start1 = (unsigned long)qemu_get_ram_ptr(start);
1967 /* Chek that we don't span multiple blocks - this breaks the
1968 address comparisons below. */
1969 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1970 != (end - 1) - start) {
1971 abort();
1972 }
1973
1974 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1975 int mmu_idx;
1976 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1977 for(i = 0; i < CPU_TLB_SIZE; i++)
1978 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1979 start1, length);
1980 }
1981 }
1982 }
1983
1984 int cpu_physical_memory_set_dirty_tracking(int enable)
1985 {
1986 int ret = 0;
1987 in_migration = enable;
1988 ret = cpu_notify_migration_log(!!enable);
1989 return ret;
1990 }
1991
1992 int cpu_physical_memory_get_dirty_tracking(void)
1993 {
1994 return in_migration;
1995 }
1996
1997 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1998 target_phys_addr_t end_addr)
1999 {
2000 int ret;
2001
2002 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2003 return ret;
2004 }
2005
2006 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2007 {
2008 ram_addr_t ram_addr;
2009 void *p;
2010
2011 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2012 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2013 + tlb_entry->addend);
2014 ram_addr = qemu_ram_addr_from_host(p);
2015 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2016 tlb_entry->addr_write |= TLB_NOTDIRTY;
2017 }
2018 }
2019 }
2020
2021 /* update the TLB according to the current state of the dirty bits */
2022 void cpu_tlb_update_dirty(CPUState *env)
2023 {
2024 int i;
2025 int mmu_idx;
2026 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2027 for(i = 0; i < CPU_TLB_SIZE; i++)
2028 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2029 }
2030 }
2031
2032 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2033 {
2034 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2035 tlb_entry->addr_write = vaddr;
2036 }
2037
2038 /* update the TLB corresponding to virtual page vaddr
2039 so that it is no longer dirty */
2040 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2041 {
2042 int i;
2043 int mmu_idx;
2044
2045 vaddr &= TARGET_PAGE_MASK;
2046 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2047 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2048 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2049 }
2050
2051 /* add a new TLB entry. At most one entry for a given virtual address
2052 is permitted. Return 0 if OK or 2 if the page could not be mapped
2053 (can only happen in non SOFTMMU mode for I/O pages or pages
2054 conflicting with the host address space). */
2055 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2056 target_phys_addr_t paddr, int prot,
2057 int mmu_idx, int is_softmmu)
2058 {
2059 PhysPageDesc *p;
2060 unsigned long pd;
2061 unsigned int index;
2062 target_ulong address;
2063 target_ulong code_address;
2064 target_phys_addr_t addend;
2065 int ret;
2066 CPUTLBEntry *te;
2067 CPUWatchpoint *wp;
2068 target_phys_addr_t iotlb;
2069
2070 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2071 if (!p) {
2072 pd = IO_MEM_UNASSIGNED;
2073 } else {
2074 pd = p->phys_offset;
2075 }
2076 #if defined(DEBUG_TLB)
2077 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2078 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2079 #endif
2080
2081 ret = 0;
2082 address = vaddr;
2083 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2084 /* IO memory case (romd handled later) */
2085 address |= TLB_MMIO;
2086 }
2087 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2088 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2089 /* Normal RAM. */
2090 iotlb = pd & TARGET_PAGE_MASK;
2091 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2092 iotlb |= IO_MEM_NOTDIRTY;
2093 else
2094 iotlb |= IO_MEM_ROM;
2095 } else {
2096 /* IO handlers are currently passed a physical address.
2097 It would be nice to pass an offset from the base address
2098 of that region. This would avoid having to special case RAM,
2099 and avoid full address decoding in every device.
2100 We can't use the high bits of pd for this because
2101 IO_MEM_ROMD uses these as a ram address. */
2102 iotlb = (pd & ~TARGET_PAGE_MASK);
2103 if (p) {
2104 iotlb += p->region_offset;
2105 } else {
2106 iotlb += paddr;
2107 }
2108 }
2109
2110 code_address = address;
2111 /* Make accesses to pages with watchpoints go via the
2112 watchpoint trap routines. */
2113 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2114 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2115 iotlb = io_mem_watch + paddr;
2116 /* TODO: The memory case can be optimized by not trapping
2117 reads of pages with a write breakpoint. */
2118 address |= TLB_MMIO;
2119 }
2120 }
2121
2122 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2123 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2124 te = &env->tlb_table[mmu_idx][index];
2125 te->addend = addend - vaddr;
2126 if (prot & PAGE_READ) {
2127 te->addr_read = address;
2128 } else {
2129 te->addr_read = -1;
2130 }
2131
2132 if (prot & PAGE_EXEC) {
2133 te->addr_code = code_address;
2134 } else {
2135 te->addr_code = -1;
2136 }
2137 if (prot & PAGE_WRITE) {
2138 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2139 (pd & IO_MEM_ROMD)) {
2140 /* Write access calls the I/O callback. */
2141 te->addr_write = address | TLB_MMIO;
2142 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2143 !cpu_physical_memory_is_dirty(pd)) {
2144 te->addr_write = address | TLB_NOTDIRTY;
2145 } else {
2146 te->addr_write = address;
2147 }
2148 } else {
2149 te->addr_write = -1;
2150 }
2151 return ret;
2152 }
2153
2154 #else
2155
2156 void tlb_flush(CPUState *env, int flush_global)
2157 {
2158 }
2159
2160 void tlb_flush_page(CPUState *env, target_ulong addr)
2161 {
2162 }
2163
2164 /*
2165 * Walks guest process memory "regions" one by one
2166 * and calls callback function 'fn' for each region.
2167 */
2168 int walk_memory_regions(void *priv,
2169 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2170 {
2171 unsigned long start, end;
2172 PageDesc *p = NULL;
2173 int i, j, prot, prot1;
2174 int rc = 0;
2175
2176 start = end = -1;
2177 prot = 0;
2178
2179 for (i = 0; i <= L1_SIZE; i++) {
2180 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2181 for (j = 0; j < L2_SIZE; j++) {
2182 prot1 = (p == NULL) ? 0 : p[j].flags;
2183 /*
2184 * "region" is one continuous chunk of memory
2185 * that has same protection flags set.
2186 */
2187 if (prot1 != prot) {
2188 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2189 if (start != -1) {
2190 rc = (*fn)(priv, start, end, prot);
2191 /* callback can stop iteration by returning != 0 */
2192 if (rc != 0)
2193 return (rc);
2194 }
2195 if (prot1 != 0)
2196 start = end;
2197 else
2198 start = -1;
2199 prot = prot1;
2200 }
2201 if (p == NULL)
2202 break;
2203 }
2204 }
2205 return (rc);
2206 }
2207
2208 static int dump_region(void *priv, unsigned long start,
2209 unsigned long end, unsigned long prot)
2210 {
2211 FILE *f = (FILE *)priv;
2212
2213 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2214 start, end, end - start,
2215 ((prot & PAGE_READ) ? 'r' : '-'),
2216 ((prot & PAGE_WRITE) ? 'w' : '-'),
2217 ((prot & PAGE_EXEC) ? 'x' : '-'));
2218
2219 return (0);
2220 }
2221
2222 /* dump memory mappings */
2223 void page_dump(FILE *f)
2224 {
2225 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2226 "start", "end", "size", "prot");
2227 walk_memory_regions(f, dump_region);
2228 }
2229
2230 int page_get_flags(target_ulong address)
2231 {
2232 PageDesc *p;
2233
2234 p = page_find(address >> TARGET_PAGE_BITS);
2235 if (!p)
2236 return 0;
2237 return p->flags;
2238 }
2239
2240 /* modify the flags of a page and invalidate the code if
2241 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2242 depending on PAGE_WRITE */
2243 void page_set_flags(target_ulong start, target_ulong end, int flags)
2244 {
2245 PageDesc *p;
2246 target_ulong addr;
2247
2248 /* mmap_lock should already be held. */
2249 start = start & TARGET_PAGE_MASK;
2250 end = TARGET_PAGE_ALIGN(end);
2251 if (flags & PAGE_WRITE)
2252 flags |= PAGE_WRITE_ORG;
2253 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2254 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2255 /* We may be called for host regions that are outside guest
2256 address space. */
2257 if (!p)
2258 return;
2259 /* if the write protection is set, then we invalidate the code
2260 inside */
2261 if (!(p->flags & PAGE_WRITE) &&
2262 (flags & PAGE_WRITE) &&
2263 p->first_tb) {
2264 tb_invalidate_phys_page(addr, 0, NULL);
2265 }
2266 p->flags = flags;
2267 }
2268 }
2269
2270 int page_check_range(target_ulong start, target_ulong len, int flags)
2271 {
2272 PageDesc *p;
2273 target_ulong end;
2274 target_ulong addr;
2275
2276 if (start + len < start)
2277 /* we've wrapped around */
2278 return -1;
2279
2280 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2281 start = start & TARGET_PAGE_MASK;
2282
2283 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2284 p = page_find(addr >> TARGET_PAGE_BITS);
2285 if( !p )
2286 return -1;
2287 if( !(p->flags & PAGE_VALID) )
2288 return -1;
2289
2290 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2291 return -1;
2292 if (flags & PAGE_WRITE) {
2293 if (!(p->flags & PAGE_WRITE_ORG))
2294 return -1;
2295 /* unprotect the page if it was put read-only because it
2296 contains translated code */
2297 if (!(p->flags & PAGE_WRITE)) {
2298 if (!page_unprotect(addr, 0, NULL))
2299 return -1;
2300 }
2301 return 0;
2302 }
2303 }
2304 return 0;
2305 }
2306
2307 /* called from signal handler: invalidate the code and unprotect the
2308 page. Return TRUE if the fault was successfully handled. */
2309 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2310 {
2311 unsigned int page_index, prot, pindex;
2312 PageDesc *p, *p1;
2313 target_ulong host_start, host_end, addr;
2314
2315 /* Technically this isn't safe inside a signal handler. However we
2316 know this only ever happens in a synchronous SEGV handler, so in
2317 practice it seems to be ok. */
2318 mmap_lock();
2319
2320 host_start = address & qemu_host_page_mask;
2321 page_index = host_start >> TARGET_PAGE_BITS;
2322 p1 = page_find(page_index);
2323 if (!p1) {
2324 mmap_unlock();
2325 return 0;
2326 }
2327 host_end = host_start + qemu_host_page_size;
2328 p = p1;
2329 prot = 0;
2330 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2331 prot |= p->flags;
2332 p++;
2333 }
2334 /* if the page was really writable, then we change its
2335 protection back to writable */
2336 if (prot & PAGE_WRITE_ORG) {
2337 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2338 if (!(p1[pindex].flags & PAGE_WRITE)) {
2339 mprotect((void *)g2h(host_start), qemu_host_page_size,
2340 (prot & PAGE_BITS) | PAGE_WRITE);
2341 p1[pindex].flags |= PAGE_WRITE;
2342 /* and since the content will be modified, we must invalidate
2343 the corresponding translated code. */
2344 tb_invalidate_phys_page(address, pc, puc);
2345 #ifdef DEBUG_TB_CHECK
2346 tb_invalidate_check(address);
2347 #endif
2348 mmap_unlock();
2349 return 1;
2350 }
2351 }
2352 mmap_unlock();
2353 return 0;
2354 }
2355
2356 static inline void tlb_set_dirty(CPUState *env,
2357 unsigned long addr, target_ulong vaddr)
2358 {
2359 }
2360 #endif /* defined(CONFIG_USER_ONLY) */
2361
2362 #if !defined(CONFIG_USER_ONLY)
2363
2364 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2365 typedef struct subpage_t {
2366 target_phys_addr_t base;
2367 CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
2368 CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
2369 void *opaque[TARGET_PAGE_SIZE][2][4];
2370 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
2371 } subpage_t;
2372
2373 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2374 ram_addr_t memory, ram_addr_t region_offset);
2375 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2376 ram_addr_t orig_memory, ram_addr_t region_offset);
2377 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2378 need_subpage) \
2379 do { \
2380 if (addr > start_addr) \
2381 start_addr2 = 0; \
2382 else { \
2383 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2384 if (start_addr2 > 0) \
2385 need_subpage = 1; \
2386 } \
2387 \
2388 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2389 end_addr2 = TARGET_PAGE_SIZE - 1; \
2390 else { \
2391 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2392 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2393 need_subpage = 1; \
2394 } \
2395 } while (0)
2396
2397 /* register physical memory.
2398 For RAM, 'size' must be a multiple of the target page size.
2399 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2400 io memory page. The address used when calling the IO function is
2401 the offset from the start of the region, plus region_offset. Both
2402 start_addr and region_offset are rounded down to a page boundary
2403 before calculating this offset. This should not be a problem unless
2404 the low bits of start_addr and region_offset differ. */
2405 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2406 ram_addr_t size,
2407 ram_addr_t phys_offset,
2408 ram_addr_t region_offset)
2409 {
2410 target_phys_addr_t addr, end_addr;
2411 PhysPageDesc *p;
2412 CPUState *env;
2413 ram_addr_t orig_size = size;
2414 void *subpage;
2415
2416 cpu_notify_set_memory(start_addr, size, phys_offset);
2417
2418 if (phys_offset == IO_MEM_UNASSIGNED) {
2419 region_offset = start_addr;
2420 }
2421 region_offset &= TARGET_PAGE_MASK;
2422 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2423 end_addr = start_addr + (target_phys_addr_t)size;
2424 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2425 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2426 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2427 ram_addr_t orig_memory = p->phys_offset;
2428 target_phys_addr_t start_addr2, end_addr2;
2429 int need_subpage = 0;
2430
2431 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2432 need_subpage);
2433 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2434 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2435 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2436 &p->phys_offset, orig_memory,
2437 p->region_offset);
2438 } else {
2439 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2440 >> IO_MEM_SHIFT];
2441 }
2442 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2443 region_offset);
2444 p->region_offset = 0;
2445 } else {
2446 p->phys_offset = phys_offset;
2447 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2448 (phys_offset & IO_MEM_ROMD))
2449 phys_offset += TARGET_PAGE_SIZE;
2450 }
2451 } else {
2452 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2453 p->phys_offset = phys_offset;
2454 p->region_offset = region_offset;
2455 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2456 (phys_offset & IO_MEM_ROMD)) {
2457 phys_offset += TARGET_PAGE_SIZE;
2458 } else {
2459 target_phys_addr_t start_addr2, end_addr2;
2460 int need_subpage = 0;
2461
2462 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2463 end_addr2, need_subpage);
2464
2465 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2466 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2467 &p->phys_offset, IO_MEM_UNASSIGNED,
2468 addr & TARGET_PAGE_MASK);
2469 subpage_register(subpage, start_addr2, end_addr2,
2470 phys_offset, region_offset);
2471 p->region_offset = 0;
2472 }
2473 }
2474 }
2475 region_offset += TARGET_PAGE_SIZE;
2476 }
2477
2478 /* since each CPU stores ram addresses in its TLB cache, we must
2479 reset the modified entries */
2480 /* XXX: slow ! */
2481 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2482 tlb_flush(env, 1);
2483 }
2484 }
2485
2486 /* XXX: temporary until new memory mapping API */
2487 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2488 {
2489 PhysPageDesc *p;
2490
2491 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2492 if (!p)
2493 return IO_MEM_UNASSIGNED;
2494 return p->phys_offset;
2495 }
2496
2497 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2498 {
2499 if (kvm_enabled())
2500 kvm_coalesce_mmio_region(addr, size);
2501 }
2502
2503 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2504 {
2505 if (kvm_enabled())
2506 kvm_uncoalesce_mmio_region(addr, size);
2507 }
2508
2509 void qemu_flush_coalesced_mmio_buffer(void)
2510 {
2511 if (kvm_enabled())
2512 kvm_flush_coalesced_mmio_buffer();
2513 }
2514
2515 #if defined(__linux__) && !defined(TARGET_S390X)
2516
2517 #include <sys/vfs.h>
2518
2519 #define HUGETLBFS_MAGIC 0x958458f6
2520
2521 static long gethugepagesize(const char *path)
2522 {
2523 struct statfs fs;
2524 int ret;
2525
2526 do {
2527 ret = statfs(path, &fs);
2528 } while (ret != 0 && errno == EINTR);
2529
2530 if (ret != 0) {
2531 perror("statfs");
2532 return 0;
2533 }
2534
2535 if (fs.f_type != HUGETLBFS_MAGIC)
2536 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2537
2538 return fs.f_bsize;
2539 }
2540
2541 static void *file_ram_alloc(ram_addr_t memory, const char *path)
2542 {
2543 char *filename;
2544 void *area;
2545 int fd;
2546 #ifdef MAP_POPULATE
2547 int flags;
2548 #endif
2549 unsigned long hpagesize;
2550
2551 hpagesize = gethugepagesize(path);
2552 if (!hpagesize) {
2553 return NULL;
2554 }
2555
2556 if (memory < hpagesize) {
2557 return NULL;
2558 }
2559
2560 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2561 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2562 return NULL;
2563 }
2564
2565 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2566 return NULL;
2567 }
2568
2569 fd = mkstemp(filename);
2570 if (fd < 0) {
2571 perror("mkstemp");
2572 free(filename);
2573 return NULL;
2574 }
2575 unlink(filename);
2576 free(filename);
2577
2578 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2579
2580 /*
2581 * ftruncate is not supported by hugetlbfs in older
2582 * hosts, so don't bother bailing out on errors.
2583 * If anything goes wrong with it under other filesystems,
2584 * mmap will fail.
2585 */
2586 if (ftruncate(fd, memory))
2587 perror("ftruncate");
2588
2589 #ifdef MAP_POPULATE
2590 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2591 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2592 * to sidestep this quirk.
2593 */
2594 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2595 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2596 #else
2597 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2598 #endif
2599 if (area == MAP_FAILED) {
2600 perror("file_ram_alloc: can't mmap RAM pages");
2601 close(fd);
2602 return (NULL);
2603 }
2604 return area;
2605 }
2606 #endif
2607
2608 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2609 {
2610 RAMBlock *new_block;
2611
2612 size = TARGET_PAGE_ALIGN(size);
2613 new_block = qemu_malloc(sizeof(*new_block));
2614
2615 if (mem_path) {
2616 #if defined (__linux__) && !defined(TARGET_S390X)
2617 new_block->host = file_ram_alloc(size, mem_path);
2618 if (!new_block->host)
2619 exit(1);
2620 #else
2621 fprintf(stderr, "-mem-path option unsupported\n");
2622 exit(1);
2623 #endif
2624 } else {
2625 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2626 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2627 new_block->host = mmap((void*)0x1000000, size,
2628 PROT_EXEC|PROT_READ|PROT_WRITE,
2629 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2630 #else
2631 new_block->host = qemu_vmalloc(size);
2632 #endif
2633 #ifdef MADV_MERGEABLE
2634 madvise(new_block->host, size, MADV_MERGEABLE);
2635 #endif
2636 }
2637 new_block->offset = last_ram_offset;
2638 new_block->length = size;
2639
2640 new_block->next = ram_blocks;
2641 ram_blocks = new_block;
2642
2643 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2644 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2645 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2646 0xff, size >> TARGET_PAGE_BITS);
2647
2648 last_ram_offset += size;
2649
2650 if (kvm_enabled())
2651 kvm_setup_guest_memory(new_block->host, size);
2652
2653 return new_block->offset;
2654 }
2655
2656 void qemu_ram_free(ram_addr_t addr)
2657 {
2658 /* TODO: implement this. */
2659 }
2660
2661 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2662 With the exception of the softmmu code in this file, this should
2663 only be used for local memory (e.g. video ram) that the device owns,
2664 and knows it isn't going to access beyond the end of the block.
2665
2666 It should not be used for general purpose DMA.
2667 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2668 */
2669 void *qemu_get_ram_ptr(ram_addr_t addr)
2670 {
2671 RAMBlock *prev;
2672 RAMBlock **prevp;
2673 RAMBlock *block;
2674
2675 prev = NULL;
2676 prevp = &ram_blocks;
2677 block = ram_blocks;
2678 while (block && (block->offset > addr
2679 || block->offset + block->length <= addr)) {
2680 if (prev)
2681 prevp = &prev->next;
2682 prev = block;
2683 block = block->next;
2684 }
2685 if (!block) {
2686 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2687 abort();
2688 }
2689 /* Move this entry to to start of the list. */
2690 if (prev) {
2691 prev->next = block->next;
2692 block->next = *prevp;
2693 *prevp = block;
2694 }
2695 return block->host + (addr - block->offset);
2696 }
2697
2698 /* Some of the softmmu routines need to translate from a host pointer
2699 (typically a TLB entry) back to a ram offset. */
2700 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2701 {
2702 RAMBlock *prev;
2703 RAMBlock *block;
2704 uint8_t *host = ptr;
2705
2706 prev = NULL;
2707 block = ram_blocks;
2708 while (block && (block->host > host
2709 || block->host + block->length <= host)) {
2710 prev = block;
2711 block = block->next;
2712 }
2713 if (!block) {
2714 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2715 abort();
2716 }
2717 return block->offset + (host - block->host);
2718 }
2719
2720 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2721 {
2722 #ifdef DEBUG_UNASSIGNED
2723 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2724 #endif
2725 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2726 do_unassigned_access(addr, 0, 0, 0, 1);
2727 #endif
2728 return 0;
2729 }
2730
2731 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2732 {
2733 #ifdef DEBUG_UNASSIGNED
2734 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2735 #endif
2736 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2737 do_unassigned_access(addr, 0, 0, 0, 2);
2738 #endif
2739 return 0;
2740 }
2741
2742 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2743 {
2744 #ifdef DEBUG_UNASSIGNED
2745 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2746 #endif
2747 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2748 do_unassigned_access(addr, 0, 0, 0, 4);
2749 #endif
2750 return 0;
2751 }
2752
2753 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2754 {
2755 #ifdef DEBUG_UNASSIGNED
2756 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2757 #endif
2758 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2759 do_unassigned_access(addr, 1, 0, 0, 1);
2760 #endif
2761 }
2762
2763 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2764 {
2765 #ifdef DEBUG_UNASSIGNED
2766 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2767 #endif
2768 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2769 do_unassigned_access(addr, 1, 0, 0, 2);
2770 #endif
2771 }
2772
2773 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2774 {
2775 #ifdef DEBUG_UNASSIGNED
2776 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2777 #endif
2778 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2779 do_unassigned_access(addr, 1, 0, 0, 4);
2780 #endif
2781 }
2782
2783 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2784 unassigned_mem_readb,
2785 unassigned_mem_readw,
2786 unassigned_mem_readl,
2787 };
2788
2789 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2790 unassigned_mem_writeb,
2791 unassigned_mem_writew,
2792 unassigned_mem_writel,
2793 };
2794
2795 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2796 uint32_t val)
2797 {
2798 int dirty_flags;
2799 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2800 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2801 #if !defined(CONFIG_USER_ONLY)
2802 tb_invalidate_phys_page_fast(ram_addr, 1);
2803 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2804 #endif
2805 }
2806 stb_p(qemu_get_ram_ptr(ram_addr), val);
2807 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2808 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2809 /* we remove the notdirty callback only if the code has been
2810 flushed */
2811 if (dirty_flags == 0xff)
2812 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2813 }
2814
2815 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2816 uint32_t val)
2817 {
2818 int dirty_flags;
2819 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2820 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2821 #if !defined(CONFIG_USER_ONLY)
2822 tb_invalidate_phys_page_fast(ram_addr, 2);
2823 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2824 #endif
2825 }
2826 stw_p(qemu_get_ram_ptr(ram_addr), val);
2827 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2828 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2829 /* we remove the notdirty callback only if the code has been
2830 flushed */
2831 if (dirty_flags == 0xff)
2832 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2833 }
2834
2835 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2836 uint32_t val)
2837 {
2838 int dirty_flags;
2839 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2840 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2841 #if !defined(CONFIG_USER_ONLY)
2842 tb_invalidate_phys_page_fast(ram_addr, 4);
2843 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2844 #endif
2845 }
2846 stl_p(qemu_get_ram_ptr(ram_addr), val);
2847 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2848 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2849 /* we remove the notdirty callback only if the code has been
2850 flushed */
2851 if (dirty_flags == 0xff)
2852 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2853 }
2854
2855 static CPUReadMemoryFunc * const error_mem_read[3] = {
2856 NULL, /* never used */
2857 NULL, /* never used */
2858 NULL, /* never used */
2859 };
2860
2861 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2862 notdirty_mem_writeb,
2863 notdirty_mem_writew,
2864 notdirty_mem_writel,
2865 };
2866
2867 /* Generate a debug exception if a watchpoint has been hit. */
2868 static void check_watchpoint(int offset, int len_mask, int flags)
2869 {
2870 CPUState *env = cpu_single_env;
2871 target_ulong pc, cs_base;
2872 TranslationBlock *tb;
2873 target_ulong vaddr;
2874 CPUWatchpoint *wp;
2875 int cpu_flags;
2876
2877 if (env->watchpoint_hit) {
2878 /* We re-entered the check after replacing the TB. Now raise
2879 * the debug interrupt so that is will trigger after the
2880 * current instruction. */
2881 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2882 return;
2883 }
2884 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2885 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2886 if ((vaddr == (wp->vaddr & len_mask) ||
2887 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2888 wp->flags |= BP_WATCHPOINT_HIT;
2889 if (!env->watchpoint_hit) {
2890 env->watchpoint_hit = wp;
2891 tb = tb_find_pc(env->mem_io_pc);
2892 if (!tb) {
2893 cpu_abort(env, "check_watchpoint: could not find TB for "
2894 "pc=%p", (void *)env->mem_io_pc);
2895 }
2896 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2897 tb_phys_invalidate(tb, -1);
2898 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2899 env->exception_index = EXCP_DEBUG;
2900 } else {
2901 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2902 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2903 }
2904 cpu_resume_from_signal(env, NULL);
2905 }
2906 } else {
2907 wp->flags &= ~BP_WATCHPOINT_HIT;
2908 }
2909 }
2910 }
2911
2912 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2913 so these check for a hit then pass through to the normal out-of-line
2914 phys routines. */
2915 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2916 {
2917 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2918 return ldub_phys(addr);
2919 }
2920
2921 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2922 {
2923 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2924 return lduw_phys(addr);
2925 }
2926
2927 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2928 {
2929 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2930 return ldl_phys(addr);
2931 }
2932
2933 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2934 uint32_t val)
2935 {
2936 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2937 stb_phys(addr, val);
2938 }
2939
2940 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2941 uint32_t val)
2942 {
2943 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2944 stw_phys(addr, val);
2945 }
2946
2947 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2948 uint32_t val)
2949 {
2950 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2951 stl_phys(addr, val);
2952 }
2953
2954 static CPUReadMemoryFunc * const watch_mem_read[3] = {
2955 watch_mem_readb,
2956 watch_mem_readw,
2957 watch_mem_readl,
2958 };
2959
2960 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
2961 watch_mem_writeb,
2962 watch_mem_writew,
2963 watch_mem_writel,
2964 };
2965
2966 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2967 unsigned int len)
2968 {
2969 uint32_t ret;
2970 unsigned int idx;
2971
2972 idx = SUBPAGE_IDX(addr);
2973 #if defined(DEBUG_SUBPAGE)
2974 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2975 mmio, len, addr, idx);
2976 #endif
2977 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2978 addr + mmio->region_offset[idx][0][len]);
2979
2980 return ret;
2981 }
2982
2983 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2984 uint32_t value, unsigned int len)
2985 {
2986 unsigned int idx;
2987
2988 idx = SUBPAGE_IDX(addr);
2989 #if defined(DEBUG_SUBPAGE)
2990 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2991 mmio, len, addr, idx, value);
2992 #endif
2993 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2994 addr + mmio->region_offset[idx][1][len],
2995 value);
2996 }
2997
2998 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2999 {
3000 #if defined(DEBUG_SUBPAGE)
3001 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3002 #endif
3003
3004 return subpage_readlen(opaque, addr, 0);
3005 }
3006
3007 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3008 uint32_t value)
3009 {
3010 #if defined(DEBUG_SUBPAGE)
3011 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3012 #endif
3013 subpage_writelen(opaque, addr, value, 0);
3014 }
3015
3016 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3017 {
3018 #if defined(DEBUG_SUBPAGE)
3019 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3020 #endif
3021
3022 return subpage_readlen(opaque, addr, 1);
3023 }
3024
3025 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3026 uint32_t value)
3027 {
3028 #if defined(DEBUG_SUBPAGE)
3029 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3030 #endif
3031 subpage_writelen(opaque, addr, value, 1);
3032 }
3033
3034 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3035 {
3036 #if defined(DEBUG_SUBPAGE)
3037 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3038 #endif
3039
3040 return subpage_readlen(opaque, addr, 2);
3041 }
3042
3043 static void subpage_writel (void *opaque,
3044 target_phys_addr_t addr, uint32_t value)
3045 {
3046 #if defined(DEBUG_SUBPAGE)
3047 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3048 #endif
3049 subpage_writelen(opaque, addr, value, 2);
3050 }
3051
3052 static CPUReadMemoryFunc * const subpage_read[] = {
3053 &subpage_readb,
3054 &subpage_readw,
3055 &subpage_readl,
3056 };
3057
3058 static CPUWriteMemoryFunc * const subpage_write[] = {
3059 &subpage_writeb,
3060 &subpage_writew,
3061 &subpage_writel,
3062 };
3063
3064 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3065 ram_addr_t memory, ram_addr_t region_offset)
3066 {
3067 int idx, eidx;
3068 unsigned int i;
3069
3070 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3071 return -1;
3072 idx = SUBPAGE_IDX(start);
3073 eidx = SUBPAGE_IDX(end);
3074 #if defined(DEBUG_SUBPAGE)
3075 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3076 mmio, start, end, idx, eidx, memory);
3077 #endif
3078 memory >>= IO_MEM_SHIFT;
3079 for (; idx <= eidx; idx++) {
3080 for (i = 0; i < 4; i++) {
3081 if (io_mem_read[memory][i]) {
3082 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3083 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3084 mmio->region_offset[idx][0][i] = region_offset;
3085 }
3086 if (io_mem_write[memory][i]) {
3087 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3088 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3089 mmio->region_offset[idx][1][i] = region_offset;
3090 }
3091 }
3092 }
3093
3094 return 0;
3095 }
3096
3097 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3098 ram_addr_t orig_memory, ram_addr_t region_offset)
3099 {
3100 subpage_t *mmio;
3101 int subpage_memory;
3102
3103 mmio = qemu_mallocz(sizeof(subpage_t));
3104
3105 mmio->base = base;
3106 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3107 #if defined(DEBUG_SUBPAGE)
3108 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3109 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3110 #endif
3111 *phys = subpage_memory | IO_MEM_SUBPAGE;
3112 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3113 region_offset);
3114
3115 return mmio;
3116 }
3117
3118 static int get_free_io_mem_idx(void)
3119 {
3120 int i;
3121
3122 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3123 if (!io_mem_used[i]) {
3124 io_mem_used[i] = 1;
3125 return i;
3126 }
3127 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3128 return -1;
3129 }
3130
3131 /* mem_read and mem_write are arrays of functions containing the
3132 function to access byte (index 0), word (index 1) and dword (index
3133 2). Functions can be omitted with a NULL function pointer.
3134 If io_index is non zero, the corresponding io zone is
3135 modified. If it is zero, a new io zone is allocated. The return
3136 value can be used with cpu_register_physical_memory(). (-1) is
3137 returned if error. */
3138 static int cpu_register_io_memory_fixed(int io_index,
3139 CPUReadMemoryFunc * const *mem_read,
3140 CPUWriteMemoryFunc * const *mem_write,
3141 void *opaque)
3142 {
3143 int i, subwidth = 0;
3144
3145 if (io_index <= 0) {
3146 io_index = get_free_io_mem_idx();
3147 if (io_index == -1)
3148 return io_index;
3149 } else {
3150 io_index >>= IO_MEM_SHIFT;
3151 if (io_index >= IO_MEM_NB_ENTRIES)
3152 return -1;
3153 }
3154
3155 for(i = 0;i < 3; i++) {
3156 if (!mem_read[i] || !mem_write[i])
3157 subwidth = IO_MEM_SUBWIDTH;
3158 io_mem_read[io_index][i] = mem_read[i];
3159 io_mem_write[io_index][i] = mem_write[i];
3160 }
3161 io_mem_opaque[io_index] = opaque;
3162 return (io_index << IO_MEM_SHIFT) | subwidth;
3163 }
3164
3165 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3166 CPUWriteMemoryFunc * const *mem_write,
3167 void *opaque)
3168 {
3169 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3170 }
3171
3172 void cpu_unregister_io_memory(int io_table_address)
3173 {
3174 int i;
3175 int io_index = io_table_address >> IO_MEM_SHIFT;
3176
3177 for (i=0;i < 3; i++) {
3178 io_mem_read[io_index][i] = unassigned_mem_read[i];
3179 io_mem_write[io_index][i] = unassigned_mem_write[i];
3180 }
3181 io_mem_opaque[io_index] = NULL;
3182 io_mem_used[io_index] = 0;
3183 }
3184
3185 static void io_mem_init(void)
3186 {
3187 int i;
3188
3189 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3190 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3191 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3192 for (i=0; i<5; i++)
3193 io_mem_used[i] = 1;
3194
3195 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3196 watch_mem_write, NULL);
3197 }
3198
3199 #endif /* !defined(CONFIG_USER_ONLY) */
3200
3201 /* physical memory access (slow version, mainly for debug) */
3202 #if defined(CONFIG_USER_ONLY)
3203 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3204 uint8_t *buf, int len, int is_write)
3205 {
3206 int l, flags;
3207 target_ulong page;
3208 void * p;
3209
3210 while (len > 0) {
3211 page = addr & TARGET_PAGE_MASK;
3212 l = (page + TARGET_PAGE_SIZE) - addr;
3213 if (l > len)
3214 l = len;
3215 flags = page_get_flags(page);
3216 if (!(flags & PAGE_VALID))
3217 return -1;
3218 if (is_write) {
3219 if (!(flags & PAGE_WRITE))
3220 return -1;
3221 /* XXX: this code should not depend on lock_user */
3222 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3223 return -1;
3224 memcpy(p, buf, l);
3225 unlock_user(p, addr, l);
3226 } else {
3227 if (!(flags & PAGE_READ))
3228 return -1;
3229 /* XXX: this code should not depend on lock_user */
3230 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3231 return -1;
3232 memcpy(buf, p, l);
3233 unlock_user(p, addr, 0);
3234 }
3235 len -= l;
3236 buf += l;
3237 addr += l;
3238 }
3239 return 0;
3240 }
3241
3242 #else
3243 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3244 int len, int is_write)
3245 {
3246 int l, io_index;
3247 uint8_t *ptr;
3248 uint32_t val;
3249 target_phys_addr_t page;
3250 unsigned long pd;
3251 PhysPageDesc *p;
3252
3253 while (len > 0) {
3254 page = addr & TARGET_PAGE_MASK;
3255 l = (page + TARGET_PAGE_SIZE) - addr;
3256 if (l > len)
3257 l = len;
3258 p = phys_page_find(page >> TARGET_PAGE_BITS);
3259 if (!p) {
3260 pd = IO_MEM_UNASSIGNED;
3261 } else {
3262 pd = p->phys_offset;
3263 }
3264
3265 if (is_write) {
3266 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3267 target_phys_addr_t addr1 = addr;
3268 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3269 if (p)
3270 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3271 /* XXX: could force cpu_single_env to NULL to avoid
3272 potential bugs */
3273 if (l >= 4 && ((addr1 & 3) == 0)) {
3274 /* 32 bit write access */
3275 val = ldl_p(buf);
3276 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3277 l = 4;
3278 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3279 /* 16 bit write access */
3280 val = lduw_p(buf);
3281 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3282 l = 2;
3283 } else {
3284 /* 8 bit write access */
3285 val = ldub_p(buf);
3286 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3287 l = 1;
3288 }
3289 } else {
3290 unsigned long addr1;
3291 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3292 /* RAM case */
3293 ptr = qemu_get_ram_ptr(addr1);
3294 memcpy(ptr, buf, l);
3295 if (!cpu_physical_memory_is_dirty(addr1)) {
3296 /* invalidate code */
3297 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3298 /* set dirty bit */
3299 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3300 (0xff & ~CODE_DIRTY_FLAG);
3301 }
3302 }
3303 } else {
3304 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3305 !(pd & IO_MEM_ROMD)) {
3306 target_phys_addr_t addr1 = addr;
3307 /* I/O case */
3308 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3309 if (p)
3310 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3311 if (l >= 4 && ((addr1 & 3) == 0)) {
3312 /* 32 bit read access */
3313 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3314 stl_p(buf, val);
3315 l = 4;
3316 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3317 /* 16 bit read access */
3318 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3319 stw_p(buf, val);
3320 l = 2;
3321 } else {
3322 /* 8 bit read access */
3323 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3324 stb_p(buf, val);
3325 l = 1;
3326 }
3327 } else {
3328 /* RAM case */
3329 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3330 (addr & ~TARGET_PAGE_MASK);
3331 memcpy(buf, ptr, l);
3332 }
3333 }
3334 len -= l;
3335 buf += l;
3336 addr += l;
3337 }
3338 }
3339
3340 /* used for ROM loading : can write in RAM and ROM */
3341 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3342 const uint8_t *buf, int len)
3343 {
3344 int l;
3345 uint8_t *ptr;
3346 target_phys_addr_t page;
3347 unsigned long pd;
3348 PhysPageDesc *p;
3349
3350 while (len > 0) {
3351 page = addr & TARGET_PAGE_MASK;
3352 l = (page + TARGET_PAGE_SIZE) - addr;
3353 if (l > len)
3354 l = len;
3355 p = phys_page_find(page >> TARGET_PAGE_BITS);
3356 if (!p) {
3357 pd = IO_MEM_UNASSIGNED;
3358 } else {
3359 pd = p->phys_offset;
3360 }
3361
3362 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3363 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3364 !(pd & IO_MEM_ROMD)) {
3365 /* do nothing */
3366 } else {
3367 unsigned long addr1;
3368 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3369 /* ROM/RAM case */
3370 ptr = qemu_get_ram_ptr(addr1);
3371 memcpy(ptr, buf, l);
3372 }
3373 len -= l;
3374 buf += l;
3375 addr += l;
3376 }
3377 }
3378
3379 typedef struct {
3380 void *buffer;
3381 target_phys_addr_t addr;
3382 target_phys_addr_t len;
3383 } BounceBuffer;
3384
3385 static BounceBuffer bounce;
3386
3387 typedef struct MapClient {
3388 void *opaque;
3389 void (*callback)(void *opaque);
3390 QLIST_ENTRY(MapClient) link;
3391 } MapClient;
3392
3393 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3394 = QLIST_HEAD_INITIALIZER(map_client_list);
3395
3396 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3397 {
3398 MapClient *client = qemu_malloc(sizeof(*client));
3399
3400 client->opaque = opaque;
3401 client->callback = callback;
3402 QLIST_INSERT_HEAD(&map_client_list, client, link);
3403 return client;
3404 }
3405
3406 void cpu_unregister_map_client(void *_client)
3407 {
3408 MapClient *client = (MapClient *)_client;
3409
3410 QLIST_REMOVE(client, link);
3411 qemu_free(client);
3412 }
3413
3414 static void cpu_notify_map_clients(void)
3415 {
3416 MapClient *client;
3417
3418 while (!QLIST_EMPTY(&map_client_list)) {
3419 client = QLIST_FIRST(&map_client_list);
3420 client->callback(client->opaque);
3421 cpu_unregister_map_client(client);
3422 }
3423 }
3424
3425 /* Map a physical memory region into a host virtual address.
3426 * May map a subset of the requested range, given by and returned in *plen.
3427 * May return NULL if resources needed to perform the mapping are exhausted.
3428 * Use only for reads OR writes - not for read-modify-write operations.
3429 * Use cpu_register_map_client() to know when retrying the map operation is
3430 * likely to succeed.
3431 */
3432 void *cpu_physical_memory_map(target_phys_addr_t addr,
3433 target_phys_addr_t *plen,
3434 int is_write)
3435 {
3436 target_phys_addr_t len = *plen;
3437 target_phys_addr_t done = 0;
3438 int l;
3439 uint8_t *ret = NULL;
3440 uint8_t *ptr;
3441 target_phys_addr_t page;
3442 unsigned long pd;
3443 PhysPageDesc *p;
3444 unsigned long addr1;
3445
3446 while (len > 0) {
3447 page = addr & TARGET_PAGE_MASK;
3448 l = (page + TARGET_PAGE_SIZE) - addr;
3449 if (l > len)
3450 l = len;
3451 p = phys_page_find(page >> TARGET_PAGE_BITS);
3452 if (!p) {
3453 pd = IO_MEM_UNASSIGNED;
3454 } else {
3455 pd = p->phys_offset;
3456 }
3457
3458 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3459 if (done || bounce.buffer) {
3460 break;
3461 }
3462 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3463 bounce.addr = addr;
3464 bounce.len = l;
3465 if (!is_write) {
3466 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3467 }
3468 ptr = bounce.buffer;
3469 } else {
3470 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3471 ptr = qemu_get_ram_ptr(addr1);
3472 }
3473 if (!done) {
3474 ret = ptr;
3475 } else if (ret + done != ptr) {
3476 break;
3477 }
3478
3479 len -= l;
3480 addr += l;
3481 done += l;
3482 }
3483 *plen = done;
3484 return ret;
3485 }
3486
3487 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3488 * Will also mark the memory as dirty if is_write == 1. access_len gives
3489 * the amount of memory that was actually read or written by the caller.
3490 */
3491 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3492 int is_write, target_phys_addr_t access_len)
3493 {
3494 if (buffer != bounce.buffer) {
3495 if (is_write) {
3496 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3497 while (access_len) {
3498 unsigned l;
3499 l = TARGET_PAGE_SIZE;
3500 if (l > access_len)
3501 l = access_len;
3502 if (!cpu_physical_memory_is_dirty(addr1)) {
3503 /* invalidate code */
3504 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3505 /* set dirty bit */
3506 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3507 (0xff & ~CODE_DIRTY_FLAG);
3508 }
3509 addr1 += l;
3510 access_len -= l;
3511 }
3512 }
3513 return;
3514 }
3515 if (is_write) {
3516 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3517 }
3518 qemu_vfree(bounce.buffer);
3519 bounce.buffer = NULL;
3520 cpu_notify_map_clients();
3521 }
3522
3523 /* warning: addr must be aligned */
3524 uint32_t ldl_phys(target_phys_addr_t addr)
3525 {
3526 int io_index;
3527 uint8_t *ptr;
3528 uint32_t val;
3529 unsigned long pd;
3530 PhysPageDesc *p;
3531
3532 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3533 if (!p) {
3534 pd = IO_MEM_UNASSIGNED;
3535 } else {
3536 pd = p->phys_offset;
3537 }
3538
3539 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3540 !(pd & IO_MEM_ROMD)) {
3541 /* I/O case */
3542 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3543 if (p)
3544 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3545 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3546 } else {
3547 /* RAM case */
3548 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3549 (addr & ~TARGET_PAGE_MASK);
3550 val = ldl_p(ptr);
3551 }
3552 return val;
3553 }
3554
3555 /* warning: addr must be aligned */
3556 uint64_t ldq_phys(target_phys_addr_t addr)
3557 {
3558 int io_index;
3559 uint8_t *ptr;
3560 uint64_t val;
3561 unsigned long pd;
3562 PhysPageDesc *p;
3563
3564 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3565 if (!p) {
3566 pd = IO_MEM_UNASSIGNED;
3567 } else {
3568 pd = p->phys_offset;
3569 }
3570
3571 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3572 !(pd & IO_MEM_ROMD)) {
3573 /* I/O case */
3574 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3575 if (p)
3576 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3577 #ifdef TARGET_WORDS_BIGENDIAN
3578 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3579 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3580 #else
3581 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3582 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3583 #endif
3584 } else {
3585 /* RAM case */
3586 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3587 (addr & ~TARGET_PAGE_MASK);
3588 val = ldq_p(ptr);
3589 }
3590 return val;
3591 }
3592
3593 /* XXX: optimize */
3594 uint32_t ldub_phys(target_phys_addr_t addr)
3595 {
3596 uint8_t val;
3597 cpu_physical_memory_read(addr, &val, 1);
3598 return val;
3599 }
3600
3601 /* XXX: optimize */
3602 uint32_t lduw_phys(target_phys_addr_t addr)
3603 {
3604 uint16_t val;
3605 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3606 return tswap16(val);
3607 }
3608
3609 /* warning: addr must be aligned. The ram page is not masked as dirty
3610 and the code inside is not invalidated. It is useful if the dirty
3611 bits are used to track modified PTEs */
3612 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3613 {
3614 int io_index;
3615 uint8_t *ptr;
3616 unsigned long pd;
3617 PhysPageDesc *p;
3618
3619 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3620 if (!p) {
3621 pd = IO_MEM_UNASSIGNED;
3622 } else {
3623 pd = p->phys_offset;
3624 }
3625
3626 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3627 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3628 if (p)
3629 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3630 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3631 } else {
3632 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3633 ptr = qemu_get_ram_ptr(addr1);
3634 stl_p(ptr, val);
3635
3636 if (unlikely(in_migration)) {
3637 if (!cpu_physical_memory_is_dirty(addr1)) {
3638 /* invalidate code */
3639 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3640 /* set dirty bit */
3641 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3642 (0xff & ~CODE_DIRTY_FLAG);
3643 }
3644 }
3645 }
3646 }
3647
3648 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3649 {
3650 int io_index;
3651 uint8_t *ptr;
3652 unsigned long pd;
3653 PhysPageDesc *p;
3654
3655 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3656 if (!p) {
3657 pd = IO_MEM_UNASSIGNED;
3658 } else {
3659 pd = p->phys_offset;
3660 }
3661
3662 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3663 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3664 if (p)
3665 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3666 #ifdef TARGET_WORDS_BIGENDIAN
3667 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3668 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3669 #else
3670 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3671 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3672 #endif
3673 } else {
3674 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3675 (addr & ~TARGET_PAGE_MASK);
3676 stq_p(ptr, val);
3677 }
3678 }
3679
3680 /* warning: addr must be aligned */
3681 void stl_phys(target_phys_addr_t addr, uint32_t val)
3682 {
3683 int io_index;
3684 uint8_t *ptr;
3685 unsigned long pd;
3686 PhysPageDesc *p;
3687
3688 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3689 if (!p) {
3690 pd = IO_MEM_UNASSIGNED;
3691 } else {
3692 pd = p->phys_offset;
3693 }
3694
3695 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3696 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3697 if (p)
3698 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3699 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3700 } else {
3701 unsigned long addr1;
3702 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3703 /* RAM case */
3704 ptr = qemu_get_ram_ptr(addr1);
3705 stl_p(ptr, val);
3706 if (!cpu_physical_memory_is_dirty(addr1)) {
3707 /* invalidate code */
3708 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3709 /* set dirty bit */
3710 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3711 (0xff & ~CODE_DIRTY_FLAG);
3712 }
3713 }
3714 }
3715
3716 /* XXX: optimize */
3717 void stb_phys(target_phys_addr_t addr, uint32_t val)
3718 {
3719 uint8_t v = val;
3720 cpu_physical_memory_write(addr, &v, 1);
3721 }
3722
3723 /* XXX: optimize */
3724 void stw_phys(target_phys_addr_t addr, uint32_t val)
3725 {
3726 uint16_t v = tswap16(val);
3727 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3728 }
3729
3730 /* XXX: optimize */
3731 void stq_phys(target_phys_addr_t addr, uint64_t val)
3732 {
3733 val = tswap64(val);
3734 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3735 }
3736
3737 /* virtual memory access for debug (includes writing to ROM) */
3738 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3739 uint8_t *buf, int len, int is_write)
3740 {
3741 int l;
3742 target_phys_addr_t phys_addr;
3743 target_ulong page;
3744
3745 while (len > 0) {
3746 page = addr & TARGET_PAGE_MASK;
3747 phys_addr = cpu_get_phys_page_debug(env, page);
3748 /* if no physical page mapped, return an error */
3749 if (phys_addr == -1)
3750 return -1;
3751 l = (page + TARGET_PAGE_SIZE) - addr;
3752 if (l > len)
3753 l = len;
3754 phys_addr += (addr & ~TARGET_PAGE_MASK);
3755 if (is_write)
3756 cpu_physical_memory_write_rom(phys_addr, buf, l);
3757 else
3758 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3759 len -= l;
3760 buf += l;
3761 addr += l;
3762 }
3763 return 0;
3764 }
3765 #endif
3766
3767 /* in deterministic execution mode, instructions doing device I/Os
3768 must be at the end of the TB */
3769 void cpu_io_recompile(CPUState *env, void *retaddr)
3770 {
3771 TranslationBlock *tb;
3772 uint32_t n, cflags;
3773 target_ulong pc, cs_base;
3774 uint64_t flags;
3775
3776 tb = tb_find_pc((unsigned long)retaddr);
3777 if (!tb) {
3778 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3779 retaddr);
3780 }
3781 n = env->icount_decr.u16.low + tb->icount;
3782 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3783 /* Calculate how many instructions had been executed before the fault
3784 occurred. */
3785 n = n - env->icount_decr.u16.low;
3786 /* Generate a new TB ending on the I/O insn. */
3787 n++;
3788 /* On MIPS and SH, delay slot instructions can only be restarted if
3789 they were already the first instruction in the TB. If this is not
3790 the first instruction in a TB then re-execute the preceding
3791 branch. */
3792 #if defined(TARGET_MIPS)
3793 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3794 env->active_tc.PC -= 4;
3795 env->icount_decr.u16.low++;
3796 env->hflags &= ~MIPS_HFLAG_BMASK;
3797 }
3798 #elif defined(TARGET_SH4)
3799 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3800 && n > 1) {
3801 env->pc -= 2;
3802 env->icount_decr.u16.low++;
3803 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3804 }
3805 #endif
3806 /* This should never happen. */
3807 if (n > CF_COUNT_MASK)
3808 cpu_abort(env, "TB too big during recompile");
3809
3810 cflags = n | CF_LAST_IO;
3811 pc = tb->pc;
3812 cs_base = tb->cs_base;
3813 flags = tb->flags;
3814 tb_phys_invalidate(tb, -1);
3815 /* FIXME: In theory this could raise an exception. In practice
3816 we have already translated the block once so it's probably ok. */
3817 tb_gen_code(env, pc, cs_base, flags, cflags);
3818 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3819 the first in the TB) then we end up generating a whole new TB and
3820 repeating the fault, which is horribly inefficient.
3821 Better would be to execute just this insn uncached, or generate a
3822 second new TB. */
3823 cpu_resume_from_signal(env, NULL);
3824 }
3825
3826 void dump_exec_info(FILE *f,
3827 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3828 {
3829 int i, target_code_size, max_target_code_size;
3830 int direct_jmp_count, direct_jmp2_count, cross_page;
3831 TranslationBlock *tb;
3832
3833 target_code_size = 0;
3834 max_target_code_size = 0;
3835 cross_page = 0;
3836 direct_jmp_count = 0;
3837 direct_jmp2_count = 0;
3838 for(i = 0; i < nb_tbs; i++) {
3839 tb = &tbs[i];
3840 target_code_size += tb->size;
3841 if (tb->size > max_target_code_size)
3842 max_target_code_size = tb->size;
3843 if (tb->page_addr[1] != -1)
3844 cross_page++;
3845 if (tb->tb_next_offset[0] != 0xffff) {
3846 direct_jmp_count++;
3847 if (tb->tb_next_offset[1] != 0xffff) {
3848 direct_jmp2_count++;
3849 }
3850 }
3851 }
3852 /* XXX: avoid using doubles ? */
3853 cpu_fprintf(f, "Translation buffer state:\n");
3854 cpu_fprintf(f, "gen code size %ld/%ld\n",
3855 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3856 cpu_fprintf(f, "TB count %d/%d\n",
3857 nb_tbs, code_gen_max_blocks);
3858 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3859 nb_tbs ? target_code_size / nb_tbs : 0,
3860 max_target_code_size);
3861 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3862 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3863 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3864 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3865 cross_page,
3866 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3867 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3868 direct_jmp_count,
3869 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3870 direct_jmp2_count,
3871 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3872 cpu_fprintf(f, "\nStatistics:\n");
3873 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3874 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3875 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3876 tcg_dump_info(f, cpu_fprintf);
3877 }
3878
3879 #if !defined(CONFIG_USER_ONLY)
3880
3881 #define MMUSUFFIX _cmmu
3882 #define GETPC() NULL
3883 #define env cpu_single_env
3884 #define SOFTMMU_CODE_ACCESS
3885
3886 #define SHIFT 0
3887 #include "softmmu_template.h"
3888
3889 #define SHIFT 1
3890 #include "softmmu_template.h"
3891
3892 #define SHIFT 2
3893 #include "softmmu_template.h"
3894
3895 #define SHIFT 3
3896 #include "softmmu_template.h"
3897
3898 #undef env
3899
3900 #endif