]> git.proxmox.com Git - qemu.git/blob - exec.c
memory: move endianness compensation to memory core
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "tcg.h"
30 #include "hw/hw.h"
31 #include "hw/qdev.h"
32 #include "osdep.h"
33 #include "kvm.h"
34 #include "hw/xen.h"
35 #include "qemu-timer.h"
36 #include "memory.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
39 #include <qemu.h>
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
45 #include <sys/time.h>
46 #include <sys/proc.h>
47 #include <machine/profile.h>
48 #define _KERNEL
49 #include <sys/user.h>
50 #undef _KERNEL
51 #undef sigqueue
52 #include <libutil.h>
53 #endif
54 #endif
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
57 #include "trace.h"
58 #endif
59
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
62
63 //#define DEBUG_TB_INVALIDATE
64 //#define DEBUG_FLUSH
65 //#define DEBUG_TLB
66 //#define DEBUG_UNASSIGNED
67
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
71
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
74
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
77 #undef DEBUG_TB_CHECK
78 #endif
79
80 #define SMC_BITMAP_USE_THRESHOLD 10
81
82 static TranslationBlock *tbs;
83 static int code_gen_max_blocks;
84 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 static int nb_tbs;
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
96 #elif defined(_WIN32)
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
100 #else
101 #define code_gen_section \
102 __attribute__((aligned (32)))
103 #endif
104
105 uint8_t code_gen_prologue[1024] code_gen_section;
106 static uint8_t *code_gen_buffer;
107 static unsigned long code_gen_buffer_size;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size;
110 static uint8_t *code_gen_ptr;
111
112 #if !defined(CONFIG_USER_ONLY)
113 int phys_ram_fd;
114 static int in_migration;
115
116 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
117
118 static MemoryRegion *system_memory;
119 static MemoryRegion *system_io;
120
121 #endif
122
123 CPUState *first_cpu;
124 /* current CPU in the current thread. It is only valid inside
125 cpu_exec() */
126 DEFINE_TLS(CPUState *,cpu_single_env);
127 /* 0 = Do not count executed instructions.
128 1 = Precise instruction counting.
129 2 = Adaptive rate instruction counting. */
130 int use_icount = 0;
131
132 typedef struct PageDesc {
133 /* list of TBs intersecting this ram page */
134 TranslationBlock *first_tb;
135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count;
138 uint8_t *code_bitmap;
139 #if defined(CONFIG_USER_ONLY)
140 unsigned long flags;
141 #endif
142 } PageDesc;
143
144 /* In system mode we want L1_MAP to be based on ram offsets,
145 while in user mode we want it to be based on virtual addresses. */
146 #if !defined(CONFIG_USER_ONLY)
147 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
149 #else
150 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
151 #endif
152 #else
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
154 #endif
155
156 /* Size of the L2 (and L3, etc) page tables. */
157 #define L2_BITS 10
158 #define L2_SIZE (1 << L2_BITS)
159
160 /* The bits remaining after N lower levels of page tables. */
161 #define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163 #define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165
166 /* Size of the L1 page table. Avoid silly small sizes. */
167 #if P_L1_BITS_REM < 4
168 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
169 #else
170 #define P_L1_BITS P_L1_BITS_REM
171 #endif
172
173 #if V_L1_BITS_REM < 4
174 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
175 #else
176 #define V_L1_BITS V_L1_BITS_REM
177 #endif
178
179 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
181
182 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
184
185 unsigned long qemu_real_host_page_size;
186 unsigned long qemu_host_page_size;
187 unsigned long qemu_host_page_mask;
188
189 /* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191 static void *l1_map[V_L1_SIZE];
192
193 #if !defined(CONFIG_USER_ONLY)
194 typedef struct PhysPageDesc {
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset;
197 ram_addr_t region_offset;
198 } PhysPageDesc;
199
200 /* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202 static void *l1_phys_map[P_L1_SIZE];
203
204 static void io_mem_init(void);
205 static void memory_map_init(void);
206
207 /* io memory support */
208 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
209 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
210 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
211 static char io_mem_used[IO_MEM_NB_ENTRIES];
212 static int io_mem_watch;
213 #endif
214
215 /* log support */
216 #ifdef WIN32
217 static const char *logfilename = "qemu.log";
218 #else
219 static const char *logfilename = "/tmp/qemu.log";
220 #endif
221 FILE *logfile;
222 int loglevel;
223 static int log_append = 0;
224
225 /* statistics */
226 #if !defined(CONFIG_USER_ONLY)
227 static int tlb_flush_count;
228 #endif
229 static int tb_flush_count;
230 static int tb_phys_invalidate_count;
231
232 #ifdef _WIN32
233 static void map_exec(void *addr, long size)
234 {
235 DWORD old_protect;
236 VirtualProtect(addr, size,
237 PAGE_EXECUTE_READWRITE, &old_protect);
238
239 }
240 #else
241 static void map_exec(void *addr, long size)
242 {
243 unsigned long start, end, page_size;
244
245 page_size = getpagesize();
246 start = (unsigned long)addr;
247 start &= ~(page_size - 1);
248
249 end = (unsigned long)addr + size;
250 end += page_size - 1;
251 end &= ~(page_size - 1);
252
253 mprotect((void *)start, end - start,
254 PROT_READ | PROT_WRITE | PROT_EXEC);
255 }
256 #endif
257
258 static void page_init(void)
259 {
260 /* NOTE: we can always suppose that qemu_host_page_size >=
261 TARGET_PAGE_SIZE */
262 #ifdef _WIN32
263 {
264 SYSTEM_INFO system_info;
265
266 GetSystemInfo(&system_info);
267 qemu_real_host_page_size = system_info.dwPageSize;
268 }
269 #else
270 qemu_real_host_page_size = getpagesize();
271 #endif
272 if (qemu_host_page_size == 0)
273 qemu_host_page_size = qemu_real_host_page_size;
274 if (qemu_host_page_size < TARGET_PAGE_SIZE)
275 qemu_host_page_size = TARGET_PAGE_SIZE;
276 qemu_host_page_mask = ~(qemu_host_page_size - 1);
277
278 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
279 {
280 #ifdef HAVE_KINFO_GETVMMAP
281 struct kinfo_vmentry *freep;
282 int i, cnt;
283
284 freep = kinfo_getvmmap(getpid(), &cnt);
285 if (freep) {
286 mmap_lock();
287 for (i = 0; i < cnt; i++) {
288 unsigned long startaddr, endaddr;
289
290 startaddr = freep[i].kve_start;
291 endaddr = freep[i].kve_end;
292 if (h2g_valid(startaddr)) {
293 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
294
295 if (h2g_valid(endaddr)) {
296 endaddr = h2g(endaddr);
297 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
298 } else {
299 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
300 endaddr = ~0ul;
301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
302 #endif
303 }
304 }
305 }
306 free(freep);
307 mmap_unlock();
308 }
309 #else
310 FILE *f;
311
312 last_brk = (unsigned long)sbrk(0);
313
314 f = fopen("/compat/linux/proc/self/maps", "r");
315 if (f) {
316 mmap_lock();
317
318 do {
319 unsigned long startaddr, endaddr;
320 int n;
321
322 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
323
324 if (n == 2 && h2g_valid(startaddr)) {
325 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
326
327 if (h2g_valid(endaddr)) {
328 endaddr = h2g(endaddr);
329 } else {
330 endaddr = ~0ul;
331 }
332 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
333 }
334 } while (!feof(f));
335
336 fclose(f);
337 mmap_unlock();
338 }
339 #endif
340 }
341 #endif
342 }
343
344 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
345 {
346 PageDesc *pd;
347 void **lp;
348 int i;
349
350 #if defined(CONFIG_USER_ONLY)
351 /* We can't use g_malloc because it may recurse into a locked mutex. */
352 # define ALLOC(P, SIZE) \
353 do { \
354 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
355 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
356 } while (0)
357 #else
358 # define ALLOC(P, SIZE) \
359 do { P = g_malloc0(SIZE); } while (0)
360 #endif
361
362 /* Level 1. Always allocated. */
363 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
364
365 /* Level 2..N-1. */
366 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
367 void **p = *lp;
368
369 if (p == NULL) {
370 if (!alloc) {
371 return NULL;
372 }
373 ALLOC(p, sizeof(void *) * L2_SIZE);
374 *lp = p;
375 }
376
377 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
378 }
379
380 pd = *lp;
381 if (pd == NULL) {
382 if (!alloc) {
383 return NULL;
384 }
385 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
386 *lp = pd;
387 }
388
389 #undef ALLOC
390
391 return pd + (index & (L2_SIZE - 1));
392 }
393
394 static inline PageDesc *page_find(tb_page_addr_t index)
395 {
396 return page_find_alloc(index, 0);
397 }
398
399 #if !defined(CONFIG_USER_ONLY)
400 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
401 {
402 PhysPageDesc *pd;
403 void **lp;
404 int i;
405
406 /* Level 1. Always allocated. */
407 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
408
409 /* Level 2..N-1. */
410 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
411 void **p = *lp;
412 if (p == NULL) {
413 if (!alloc) {
414 return NULL;
415 }
416 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
417 }
418 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
419 }
420
421 pd = *lp;
422 if (pd == NULL) {
423 int i;
424 int first_index = index & ~(L2_SIZE - 1);
425
426 if (!alloc) {
427 return NULL;
428 }
429
430 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
431
432 for (i = 0; i < L2_SIZE; i++) {
433 pd[i].phys_offset = IO_MEM_UNASSIGNED;
434 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
435 }
436 }
437
438 return pd + (index & (L2_SIZE - 1));
439 }
440
441 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
442 {
443 return phys_page_find_alloc(index, 0);
444 }
445
446 static void tlb_protect_code(ram_addr_t ram_addr);
447 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
448 target_ulong vaddr);
449 #define mmap_lock() do { } while(0)
450 #define mmap_unlock() do { } while(0)
451 #endif
452
453 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
454
455 #if defined(CONFIG_USER_ONLY)
456 /* Currently it is not recommended to allocate big chunks of data in
457 user mode. It will change when a dedicated libc will be used */
458 #define USE_STATIC_CODE_GEN_BUFFER
459 #endif
460
461 #ifdef USE_STATIC_CODE_GEN_BUFFER
462 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
463 __attribute__((aligned (CODE_GEN_ALIGN)));
464 #endif
465
466 static void code_gen_alloc(unsigned long tb_size)
467 {
468 #ifdef USE_STATIC_CODE_GEN_BUFFER
469 code_gen_buffer = static_code_gen_buffer;
470 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
471 map_exec(code_gen_buffer, code_gen_buffer_size);
472 #else
473 code_gen_buffer_size = tb_size;
474 if (code_gen_buffer_size == 0) {
475 #if defined(CONFIG_USER_ONLY)
476 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
477 #else
478 /* XXX: needs adjustments */
479 code_gen_buffer_size = (unsigned long)(ram_size / 4);
480 #endif
481 }
482 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
483 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
484 /* The code gen buffer location may have constraints depending on
485 the host cpu and OS */
486 #if defined(__linux__)
487 {
488 int flags;
489 void *start = NULL;
490
491 flags = MAP_PRIVATE | MAP_ANONYMOUS;
492 #if defined(__x86_64__)
493 flags |= MAP_32BIT;
494 /* Cannot map more than that */
495 if (code_gen_buffer_size > (800 * 1024 * 1024))
496 code_gen_buffer_size = (800 * 1024 * 1024);
497 #elif defined(__sparc_v9__)
498 // Map the buffer below 2G, so we can use direct calls and branches
499 flags |= MAP_FIXED;
500 start = (void *) 0x60000000UL;
501 if (code_gen_buffer_size > (512 * 1024 * 1024))
502 code_gen_buffer_size = (512 * 1024 * 1024);
503 #elif defined(__arm__)
504 /* Keep the buffer no bigger than 16GB to branch between blocks */
505 if (code_gen_buffer_size > 16 * 1024 * 1024)
506 code_gen_buffer_size = 16 * 1024 * 1024;
507 #elif defined(__s390x__)
508 /* Map the buffer so that we can use direct calls and branches. */
509 /* We have a +- 4GB range on the branches; leave some slop. */
510 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
511 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
512 }
513 start = (void *)0x90000000UL;
514 #endif
515 code_gen_buffer = mmap(start, code_gen_buffer_size,
516 PROT_WRITE | PROT_READ | PROT_EXEC,
517 flags, -1, 0);
518 if (code_gen_buffer == MAP_FAILED) {
519 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
520 exit(1);
521 }
522 }
523 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
524 || defined(__DragonFly__) || defined(__OpenBSD__) \
525 || defined(__NetBSD__)
526 {
527 int flags;
528 void *addr = NULL;
529 flags = MAP_PRIVATE | MAP_ANONYMOUS;
530 #if defined(__x86_64__)
531 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
532 * 0x40000000 is free */
533 flags |= MAP_FIXED;
534 addr = (void *)0x40000000;
535 /* Cannot map more than that */
536 if (code_gen_buffer_size > (800 * 1024 * 1024))
537 code_gen_buffer_size = (800 * 1024 * 1024);
538 #elif defined(__sparc_v9__)
539 // Map the buffer below 2G, so we can use direct calls and branches
540 flags |= MAP_FIXED;
541 addr = (void *) 0x60000000UL;
542 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
543 code_gen_buffer_size = (512 * 1024 * 1024);
544 }
545 #endif
546 code_gen_buffer = mmap(addr, code_gen_buffer_size,
547 PROT_WRITE | PROT_READ | PROT_EXEC,
548 flags, -1, 0);
549 if (code_gen_buffer == MAP_FAILED) {
550 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
551 exit(1);
552 }
553 }
554 #else
555 code_gen_buffer = g_malloc(code_gen_buffer_size);
556 map_exec(code_gen_buffer, code_gen_buffer_size);
557 #endif
558 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
559 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
560 code_gen_buffer_max_size = code_gen_buffer_size -
561 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
562 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
563 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
564 }
565
566 /* Must be called before using the QEMU cpus. 'tb_size' is the size
567 (in bytes) allocated to the translation buffer. Zero means default
568 size. */
569 void tcg_exec_init(unsigned long tb_size)
570 {
571 cpu_gen_init();
572 code_gen_alloc(tb_size);
573 code_gen_ptr = code_gen_buffer;
574 page_init();
575 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
576 /* There's no guest base to take into account, so go ahead and
577 initialize the prologue now. */
578 tcg_prologue_init(&tcg_ctx);
579 #endif
580 }
581
582 bool tcg_enabled(void)
583 {
584 return code_gen_buffer != NULL;
585 }
586
587 void cpu_exec_init_all(void)
588 {
589 #if !defined(CONFIG_USER_ONLY)
590 memory_map_init();
591 io_mem_init();
592 #endif
593 }
594
595 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
596
597 static int cpu_common_post_load(void *opaque, int version_id)
598 {
599 CPUState *env = opaque;
600
601 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
602 version_id is increased. */
603 env->interrupt_request &= ~0x01;
604 tlb_flush(env, 1);
605
606 return 0;
607 }
608
609 static const VMStateDescription vmstate_cpu_common = {
610 .name = "cpu_common",
611 .version_id = 1,
612 .minimum_version_id = 1,
613 .minimum_version_id_old = 1,
614 .post_load = cpu_common_post_load,
615 .fields = (VMStateField []) {
616 VMSTATE_UINT32(halted, CPUState),
617 VMSTATE_UINT32(interrupt_request, CPUState),
618 VMSTATE_END_OF_LIST()
619 }
620 };
621 #endif
622
623 CPUState *qemu_get_cpu(int cpu)
624 {
625 CPUState *env = first_cpu;
626
627 while (env) {
628 if (env->cpu_index == cpu)
629 break;
630 env = env->next_cpu;
631 }
632
633 return env;
634 }
635
636 void cpu_exec_init(CPUState *env)
637 {
638 CPUState **penv;
639 int cpu_index;
640
641 #if defined(CONFIG_USER_ONLY)
642 cpu_list_lock();
643 #endif
644 env->next_cpu = NULL;
645 penv = &first_cpu;
646 cpu_index = 0;
647 while (*penv != NULL) {
648 penv = &(*penv)->next_cpu;
649 cpu_index++;
650 }
651 env->cpu_index = cpu_index;
652 env->numa_node = 0;
653 QTAILQ_INIT(&env->breakpoints);
654 QTAILQ_INIT(&env->watchpoints);
655 #ifndef CONFIG_USER_ONLY
656 env->thread_id = qemu_get_thread_id();
657 #endif
658 *penv = env;
659 #if defined(CONFIG_USER_ONLY)
660 cpu_list_unlock();
661 #endif
662 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
663 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
664 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
665 cpu_save, cpu_load, env);
666 #endif
667 }
668
669 /* Allocate a new translation block. Flush the translation buffer if
670 too many translation blocks or too much generated code. */
671 static TranslationBlock *tb_alloc(target_ulong pc)
672 {
673 TranslationBlock *tb;
674
675 if (nb_tbs >= code_gen_max_blocks ||
676 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
677 return NULL;
678 tb = &tbs[nb_tbs++];
679 tb->pc = pc;
680 tb->cflags = 0;
681 return tb;
682 }
683
684 void tb_free(TranslationBlock *tb)
685 {
686 /* In practice this is mostly used for single use temporary TB
687 Ignore the hard cases and just back up if this TB happens to
688 be the last one generated. */
689 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
690 code_gen_ptr = tb->tc_ptr;
691 nb_tbs--;
692 }
693 }
694
695 static inline void invalidate_page_bitmap(PageDesc *p)
696 {
697 if (p->code_bitmap) {
698 g_free(p->code_bitmap);
699 p->code_bitmap = NULL;
700 }
701 p->code_write_count = 0;
702 }
703
704 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
705
706 static void page_flush_tb_1 (int level, void **lp)
707 {
708 int i;
709
710 if (*lp == NULL) {
711 return;
712 }
713 if (level == 0) {
714 PageDesc *pd = *lp;
715 for (i = 0; i < L2_SIZE; ++i) {
716 pd[i].first_tb = NULL;
717 invalidate_page_bitmap(pd + i);
718 }
719 } else {
720 void **pp = *lp;
721 for (i = 0; i < L2_SIZE; ++i) {
722 page_flush_tb_1 (level - 1, pp + i);
723 }
724 }
725 }
726
727 static void page_flush_tb(void)
728 {
729 int i;
730 for (i = 0; i < V_L1_SIZE; i++) {
731 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
732 }
733 }
734
735 /* flush all the translation blocks */
736 /* XXX: tb_flush is currently not thread safe */
737 void tb_flush(CPUState *env1)
738 {
739 CPUState *env;
740 #if defined(DEBUG_FLUSH)
741 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
742 (unsigned long)(code_gen_ptr - code_gen_buffer),
743 nb_tbs, nb_tbs > 0 ?
744 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
745 #endif
746 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
747 cpu_abort(env1, "Internal error: code buffer overflow\n");
748
749 nb_tbs = 0;
750
751 for(env = first_cpu; env != NULL; env = env->next_cpu) {
752 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
753 }
754
755 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
756 page_flush_tb();
757
758 code_gen_ptr = code_gen_buffer;
759 /* XXX: flush processor icache at this point if cache flush is
760 expensive */
761 tb_flush_count++;
762 }
763
764 #ifdef DEBUG_TB_CHECK
765
766 static void tb_invalidate_check(target_ulong address)
767 {
768 TranslationBlock *tb;
769 int i;
770 address &= TARGET_PAGE_MASK;
771 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
772 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
773 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
774 address >= tb->pc + tb->size)) {
775 printf("ERROR invalidate: address=" TARGET_FMT_lx
776 " PC=%08lx size=%04x\n",
777 address, (long)tb->pc, tb->size);
778 }
779 }
780 }
781 }
782
783 /* verify that all the pages have correct rights for code */
784 static void tb_page_check(void)
785 {
786 TranslationBlock *tb;
787 int i, flags1, flags2;
788
789 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
790 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
791 flags1 = page_get_flags(tb->pc);
792 flags2 = page_get_flags(tb->pc + tb->size - 1);
793 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
794 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
795 (long)tb->pc, tb->size, flags1, flags2);
796 }
797 }
798 }
799 }
800
801 #endif
802
803 /* invalidate one TB */
804 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
805 int next_offset)
806 {
807 TranslationBlock *tb1;
808 for(;;) {
809 tb1 = *ptb;
810 if (tb1 == tb) {
811 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
812 break;
813 }
814 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
815 }
816 }
817
818 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
819 {
820 TranslationBlock *tb1;
821 unsigned int n1;
822
823 for(;;) {
824 tb1 = *ptb;
825 n1 = (long)tb1 & 3;
826 tb1 = (TranslationBlock *)((long)tb1 & ~3);
827 if (tb1 == tb) {
828 *ptb = tb1->page_next[n1];
829 break;
830 }
831 ptb = &tb1->page_next[n1];
832 }
833 }
834
835 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
836 {
837 TranslationBlock *tb1, **ptb;
838 unsigned int n1;
839
840 ptb = &tb->jmp_next[n];
841 tb1 = *ptb;
842 if (tb1) {
843 /* find tb(n) in circular list */
844 for(;;) {
845 tb1 = *ptb;
846 n1 = (long)tb1 & 3;
847 tb1 = (TranslationBlock *)((long)tb1 & ~3);
848 if (n1 == n && tb1 == tb)
849 break;
850 if (n1 == 2) {
851 ptb = &tb1->jmp_first;
852 } else {
853 ptb = &tb1->jmp_next[n1];
854 }
855 }
856 /* now we can suppress tb(n) from the list */
857 *ptb = tb->jmp_next[n];
858
859 tb->jmp_next[n] = NULL;
860 }
861 }
862
863 /* reset the jump entry 'n' of a TB so that it is not chained to
864 another TB */
865 static inline void tb_reset_jump(TranslationBlock *tb, int n)
866 {
867 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
868 }
869
870 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
871 {
872 CPUState *env;
873 PageDesc *p;
874 unsigned int h, n1;
875 tb_page_addr_t phys_pc;
876 TranslationBlock *tb1, *tb2;
877
878 /* remove the TB from the hash list */
879 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
880 h = tb_phys_hash_func(phys_pc);
881 tb_remove(&tb_phys_hash[h], tb,
882 offsetof(TranslationBlock, phys_hash_next));
883
884 /* remove the TB from the page list */
885 if (tb->page_addr[0] != page_addr) {
886 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
887 tb_page_remove(&p->first_tb, tb);
888 invalidate_page_bitmap(p);
889 }
890 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
891 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
892 tb_page_remove(&p->first_tb, tb);
893 invalidate_page_bitmap(p);
894 }
895
896 tb_invalidated_flag = 1;
897
898 /* remove the TB from the hash list */
899 h = tb_jmp_cache_hash_func(tb->pc);
900 for(env = first_cpu; env != NULL; env = env->next_cpu) {
901 if (env->tb_jmp_cache[h] == tb)
902 env->tb_jmp_cache[h] = NULL;
903 }
904
905 /* suppress this TB from the two jump lists */
906 tb_jmp_remove(tb, 0);
907 tb_jmp_remove(tb, 1);
908
909 /* suppress any remaining jumps to this TB */
910 tb1 = tb->jmp_first;
911 for(;;) {
912 n1 = (long)tb1 & 3;
913 if (n1 == 2)
914 break;
915 tb1 = (TranslationBlock *)((long)tb1 & ~3);
916 tb2 = tb1->jmp_next[n1];
917 tb_reset_jump(tb1, n1);
918 tb1->jmp_next[n1] = NULL;
919 tb1 = tb2;
920 }
921 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
922
923 tb_phys_invalidate_count++;
924 }
925
926 static inline void set_bits(uint8_t *tab, int start, int len)
927 {
928 int end, mask, end1;
929
930 end = start + len;
931 tab += start >> 3;
932 mask = 0xff << (start & 7);
933 if ((start & ~7) == (end & ~7)) {
934 if (start < end) {
935 mask &= ~(0xff << (end & 7));
936 *tab |= mask;
937 }
938 } else {
939 *tab++ |= mask;
940 start = (start + 8) & ~7;
941 end1 = end & ~7;
942 while (start < end1) {
943 *tab++ = 0xff;
944 start += 8;
945 }
946 if (start < end) {
947 mask = ~(0xff << (end & 7));
948 *tab |= mask;
949 }
950 }
951 }
952
953 static void build_page_bitmap(PageDesc *p)
954 {
955 int n, tb_start, tb_end;
956 TranslationBlock *tb;
957
958 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
959
960 tb = p->first_tb;
961 while (tb != NULL) {
962 n = (long)tb & 3;
963 tb = (TranslationBlock *)((long)tb & ~3);
964 /* NOTE: this is subtle as a TB may span two physical pages */
965 if (n == 0) {
966 /* NOTE: tb_end may be after the end of the page, but
967 it is not a problem */
968 tb_start = tb->pc & ~TARGET_PAGE_MASK;
969 tb_end = tb_start + tb->size;
970 if (tb_end > TARGET_PAGE_SIZE)
971 tb_end = TARGET_PAGE_SIZE;
972 } else {
973 tb_start = 0;
974 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
975 }
976 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
977 tb = tb->page_next[n];
978 }
979 }
980
981 TranslationBlock *tb_gen_code(CPUState *env,
982 target_ulong pc, target_ulong cs_base,
983 int flags, int cflags)
984 {
985 TranslationBlock *tb;
986 uint8_t *tc_ptr;
987 tb_page_addr_t phys_pc, phys_page2;
988 target_ulong virt_page2;
989 int code_gen_size;
990
991 phys_pc = get_page_addr_code(env, pc);
992 tb = tb_alloc(pc);
993 if (!tb) {
994 /* flush must be done */
995 tb_flush(env);
996 /* cannot fail at this point */
997 tb = tb_alloc(pc);
998 /* Don't forget to invalidate previous TB info. */
999 tb_invalidated_flag = 1;
1000 }
1001 tc_ptr = code_gen_ptr;
1002 tb->tc_ptr = tc_ptr;
1003 tb->cs_base = cs_base;
1004 tb->flags = flags;
1005 tb->cflags = cflags;
1006 cpu_gen_code(env, tb, &code_gen_size);
1007 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1008
1009 /* check next page if needed */
1010 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1011 phys_page2 = -1;
1012 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1013 phys_page2 = get_page_addr_code(env, virt_page2);
1014 }
1015 tb_link_page(tb, phys_pc, phys_page2);
1016 return tb;
1017 }
1018
1019 /* invalidate all TBs which intersect with the target physical page
1020 starting in range [start;end[. NOTE: start and end must refer to
1021 the same physical page. 'is_cpu_write_access' should be true if called
1022 from a real cpu write access: the virtual CPU will exit the current
1023 TB if code is modified inside this TB. */
1024 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1025 int is_cpu_write_access)
1026 {
1027 TranslationBlock *tb, *tb_next, *saved_tb;
1028 CPUState *env = cpu_single_env;
1029 tb_page_addr_t tb_start, tb_end;
1030 PageDesc *p;
1031 int n;
1032 #ifdef TARGET_HAS_PRECISE_SMC
1033 int current_tb_not_found = is_cpu_write_access;
1034 TranslationBlock *current_tb = NULL;
1035 int current_tb_modified = 0;
1036 target_ulong current_pc = 0;
1037 target_ulong current_cs_base = 0;
1038 int current_flags = 0;
1039 #endif /* TARGET_HAS_PRECISE_SMC */
1040
1041 p = page_find(start >> TARGET_PAGE_BITS);
1042 if (!p)
1043 return;
1044 if (!p->code_bitmap &&
1045 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1046 is_cpu_write_access) {
1047 /* build code bitmap */
1048 build_page_bitmap(p);
1049 }
1050
1051 /* we remove all the TBs in the range [start, end[ */
1052 /* XXX: see if in some cases it could be faster to invalidate all the code */
1053 tb = p->first_tb;
1054 while (tb != NULL) {
1055 n = (long)tb & 3;
1056 tb = (TranslationBlock *)((long)tb & ~3);
1057 tb_next = tb->page_next[n];
1058 /* NOTE: this is subtle as a TB may span two physical pages */
1059 if (n == 0) {
1060 /* NOTE: tb_end may be after the end of the page, but
1061 it is not a problem */
1062 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1063 tb_end = tb_start + tb->size;
1064 } else {
1065 tb_start = tb->page_addr[1];
1066 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1067 }
1068 if (!(tb_end <= start || tb_start >= end)) {
1069 #ifdef TARGET_HAS_PRECISE_SMC
1070 if (current_tb_not_found) {
1071 current_tb_not_found = 0;
1072 current_tb = NULL;
1073 if (env->mem_io_pc) {
1074 /* now we have a real cpu fault */
1075 current_tb = tb_find_pc(env->mem_io_pc);
1076 }
1077 }
1078 if (current_tb == tb &&
1079 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1080 /* If we are modifying the current TB, we must stop
1081 its execution. We could be more precise by checking
1082 that the modification is after the current PC, but it
1083 would require a specialized function to partially
1084 restore the CPU state */
1085
1086 current_tb_modified = 1;
1087 cpu_restore_state(current_tb, env, env->mem_io_pc);
1088 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1089 &current_flags);
1090 }
1091 #endif /* TARGET_HAS_PRECISE_SMC */
1092 /* we need to do that to handle the case where a signal
1093 occurs while doing tb_phys_invalidate() */
1094 saved_tb = NULL;
1095 if (env) {
1096 saved_tb = env->current_tb;
1097 env->current_tb = NULL;
1098 }
1099 tb_phys_invalidate(tb, -1);
1100 if (env) {
1101 env->current_tb = saved_tb;
1102 if (env->interrupt_request && env->current_tb)
1103 cpu_interrupt(env, env->interrupt_request);
1104 }
1105 }
1106 tb = tb_next;
1107 }
1108 #if !defined(CONFIG_USER_ONLY)
1109 /* if no code remaining, no need to continue to use slow writes */
1110 if (!p->first_tb) {
1111 invalidate_page_bitmap(p);
1112 if (is_cpu_write_access) {
1113 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1114 }
1115 }
1116 #endif
1117 #ifdef TARGET_HAS_PRECISE_SMC
1118 if (current_tb_modified) {
1119 /* we generate a block containing just the instruction
1120 modifying the memory. It will ensure that it cannot modify
1121 itself */
1122 env->current_tb = NULL;
1123 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1124 cpu_resume_from_signal(env, NULL);
1125 }
1126 #endif
1127 }
1128
1129 /* len must be <= 8 and start must be a multiple of len */
1130 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1131 {
1132 PageDesc *p;
1133 int offset, b;
1134 #if 0
1135 if (1) {
1136 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1137 cpu_single_env->mem_io_vaddr, len,
1138 cpu_single_env->eip,
1139 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1140 }
1141 #endif
1142 p = page_find(start >> TARGET_PAGE_BITS);
1143 if (!p)
1144 return;
1145 if (p->code_bitmap) {
1146 offset = start & ~TARGET_PAGE_MASK;
1147 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1148 if (b & ((1 << len) - 1))
1149 goto do_invalidate;
1150 } else {
1151 do_invalidate:
1152 tb_invalidate_phys_page_range(start, start + len, 1);
1153 }
1154 }
1155
1156 #if !defined(CONFIG_SOFTMMU)
1157 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1158 unsigned long pc, void *puc)
1159 {
1160 TranslationBlock *tb;
1161 PageDesc *p;
1162 int n;
1163 #ifdef TARGET_HAS_PRECISE_SMC
1164 TranslationBlock *current_tb = NULL;
1165 CPUState *env = cpu_single_env;
1166 int current_tb_modified = 0;
1167 target_ulong current_pc = 0;
1168 target_ulong current_cs_base = 0;
1169 int current_flags = 0;
1170 #endif
1171
1172 addr &= TARGET_PAGE_MASK;
1173 p = page_find(addr >> TARGET_PAGE_BITS);
1174 if (!p)
1175 return;
1176 tb = p->first_tb;
1177 #ifdef TARGET_HAS_PRECISE_SMC
1178 if (tb && pc != 0) {
1179 current_tb = tb_find_pc(pc);
1180 }
1181 #endif
1182 while (tb != NULL) {
1183 n = (long)tb & 3;
1184 tb = (TranslationBlock *)((long)tb & ~3);
1185 #ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb == tb &&
1187 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1188 /* If we are modifying the current TB, we must stop
1189 its execution. We could be more precise by checking
1190 that the modification is after the current PC, but it
1191 would require a specialized function to partially
1192 restore the CPU state */
1193
1194 current_tb_modified = 1;
1195 cpu_restore_state(current_tb, env, pc);
1196 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1197 &current_flags);
1198 }
1199 #endif /* TARGET_HAS_PRECISE_SMC */
1200 tb_phys_invalidate(tb, addr);
1201 tb = tb->page_next[n];
1202 }
1203 p->first_tb = NULL;
1204 #ifdef TARGET_HAS_PRECISE_SMC
1205 if (current_tb_modified) {
1206 /* we generate a block containing just the instruction
1207 modifying the memory. It will ensure that it cannot modify
1208 itself */
1209 env->current_tb = NULL;
1210 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1211 cpu_resume_from_signal(env, puc);
1212 }
1213 #endif
1214 }
1215 #endif
1216
1217 /* add the tb in the target page and protect it if necessary */
1218 static inline void tb_alloc_page(TranslationBlock *tb,
1219 unsigned int n, tb_page_addr_t page_addr)
1220 {
1221 PageDesc *p;
1222 #ifndef CONFIG_USER_ONLY
1223 bool page_already_protected;
1224 #endif
1225
1226 tb->page_addr[n] = page_addr;
1227 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1228 tb->page_next[n] = p->first_tb;
1229 #ifndef CONFIG_USER_ONLY
1230 page_already_protected = p->first_tb != NULL;
1231 #endif
1232 p->first_tb = (TranslationBlock *)((long)tb | n);
1233 invalidate_page_bitmap(p);
1234
1235 #if defined(TARGET_HAS_SMC) || 1
1236
1237 #if defined(CONFIG_USER_ONLY)
1238 if (p->flags & PAGE_WRITE) {
1239 target_ulong addr;
1240 PageDesc *p2;
1241 int prot;
1242
1243 /* force the host page as non writable (writes will have a
1244 page fault + mprotect overhead) */
1245 page_addr &= qemu_host_page_mask;
1246 prot = 0;
1247 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1248 addr += TARGET_PAGE_SIZE) {
1249
1250 p2 = page_find (addr >> TARGET_PAGE_BITS);
1251 if (!p2)
1252 continue;
1253 prot |= p2->flags;
1254 p2->flags &= ~PAGE_WRITE;
1255 }
1256 mprotect(g2h(page_addr), qemu_host_page_size,
1257 (prot & PAGE_BITS) & ~PAGE_WRITE);
1258 #ifdef DEBUG_TB_INVALIDATE
1259 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1260 page_addr);
1261 #endif
1262 }
1263 #else
1264 /* if some code is already present, then the pages are already
1265 protected. So we handle the case where only the first TB is
1266 allocated in a physical page */
1267 if (!page_already_protected) {
1268 tlb_protect_code(page_addr);
1269 }
1270 #endif
1271
1272 #endif /* TARGET_HAS_SMC */
1273 }
1274
1275 /* add a new TB and link it to the physical page tables. phys_page2 is
1276 (-1) to indicate that only one page contains the TB. */
1277 void tb_link_page(TranslationBlock *tb,
1278 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1279 {
1280 unsigned int h;
1281 TranslationBlock **ptb;
1282
1283 /* Grab the mmap lock to stop another thread invalidating this TB
1284 before we are done. */
1285 mmap_lock();
1286 /* add in the physical hash table */
1287 h = tb_phys_hash_func(phys_pc);
1288 ptb = &tb_phys_hash[h];
1289 tb->phys_hash_next = *ptb;
1290 *ptb = tb;
1291
1292 /* add in the page list */
1293 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1294 if (phys_page2 != -1)
1295 tb_alloc_page(tb, 1, phys_page2);
1296 else
1297 tb->page_addr[1] = -1;
1298
1299 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1300 tb->jmp_next[0] = NULL;
1301 tb->jmp_next[1] = NULL;
1302
1303 /* init original jump addresses */
1304 if (tb->tb_next_offset[0] != 0xffff)
1305 tb_reset_jump(tb, 0);
1306 if (tb->tb_next_offset[1] != 0xffff)
1307 tb_reset_jump(tb, 1);
1308
1309 #ifdef DEBUG_TB_CHECK
1310 tb_page_check();
1311 #endif
1312 mmap_unlock();
1313 }
1314
1315 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1316 tb[1].tc_ptr. Return NULL if not found */
1317 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1318 {
1319 int m_min, m_max, m;
1320 unsigned long v;
1321 TranslationBlock *tb;
1322
1323 if (nb_tbs <= 0)
1324 return NULL;
1325 if (tc_ptr < (unsigned long)code_gen_buffer ||
1326 tc_ptr >= (unsigned long)code_gen_ptr)
1327 return NULL;
1328 /* binary search (cf Knuth) */
1329 m_min = 0;
1330 m_max = nb_tbs - 1;
1331 while (m_min <= m_max) {
1332 m = (m_min + m_max) >> 1;
1333 tb = &tbs[m];
1334 v = (unsigned long)tb->tc_ptr;
1335 if (v == tc_ptr)
1336 return tb;
1337 else if (tc_ptr < v) {
1338 m_max = m - 1;
1339 } else {
1340 m_min = m + 1;
1341 }
1342 }
1343 return &tbs[m_max];
1344 }
1345
1346 static void tb_reset_jump_recursive(TranslationBlock *tb);
1347
1348 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1349 {
1350 TranslationBlock *tb1, *tb_next, **ptb;
1351 unsigned int n1;
1352
1353 tb1 = tb->jmp_next[n];
1354 if (tb1 != NULL) {
1355 /* find head of list */
1356 for(;;) {
1357 n1 = (long)tb1 & 3;
1358 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1359 if (n1 == 2)
1360 break;
1361 tb1 = tb1->jmp_next[n1];
1362 }
1363 /* we are now sure now that tb jumps to tb1 */
1364 tb_next = tb1;
1365
1366 /* remove tb from the jmp_first list */
1367 ptb = &tb_next->jmp_first;
1368 for(;;) {
1369 tb1 = *ptb;
1370 n1 = (long)tb1 & 3;
1371 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1372 if (n1 == n && tb1 == tb)
1373 break;
1374 ptb = &tb1->jmp_next[n1];
1375 }
1376 *ptb = tb->jmp_next[n];
1377 tb->jmp_next[n] = NULL;
1378
1379 /* suppress the jump to next tb in generated code */
1380 tb_reset_jump(tb, n);
1381
1382 /* suppress jumps in the tb on which we could have jumped */
1383 tb_reset_jump_recursive(tb_next);
1384 }
1385 }
1386
1387 static void tb_reset_jump_recursive(TranslationBlock *tb)
1388 {
1389 tb_reset_jump_recursive2(tb, 0);
1390 tb_reset_jump_recursive2(tb, 1);
1391 }
1392
1393 #if defined(TARGET_HAS_ICE)
1394 #if defined(CONFIG_USER_ONLY)
1395 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1396 {
1397 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1398 }
1399 #else
1400 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1401 {
1402 target_phys_addr_t addr;
1403 target_ulong pd;
1404 ram_addr_t ram_addr;
1405 PhysPageDesc *p;
1406
1407 addr = cpu_get_phys_page_debug(env, pc);
1408 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1409 if (!p) {
1410 pd = IO_MEM_UNASSIGNED;
1411 } else {
1412 pd = p->phys_offset;
1413 }
1414 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1415 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1416 }
1417 #endif
1418 #endif /* TARGET_HAS_ICE */
1419
1420 #if defined(CONFIG_USER_ONLY)
1421 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1422
1423 {
1424 }
1425
1426 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1427 int flags, CPUWatchpoint **watchpoint)
1428 {
1429 return -ENOSYS;
1430 }
1431 #else
1432 /* Add a watchpoint. */
1433 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1434 int flags, CPUWatchpoint **watchpoint)
1435 {
1436 target_ulong len_mask = ~(len - 1);
1437 CPUWatchpoint *wp;
1438
1439 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1440 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1441 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1442 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1443 return -EINVAL;
1444 }
1445 wp = g_malloc(sizeof(*wp));
1446
1447 wp->vaddr = addr;
1448 wp->len_mask = len_mask;
1449 wp->flags = flags;
1450
1451 /* keep all GDB-injected watchpoints in front */
1452 if (flags & BP_GDB)
1453 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1454 else
1455 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1456
1457 tlb_flush_page(env, addr);
1458
1459 if (watchpoint)
1460 *watchpoint = wp;
1461 return 0;
1462 }
1463
1464 /* Remove a specific watchpoint. */
1465 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1466 int flags)
1467 {
1468 target_ulong len_mask = ~(len - 1);
1469 CPUWatchpoint *wp;
1470
1471 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1472 if (addr == wp->vaddr && len_mask == wp->len_mask
1473 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1474 cpu_watchpoint_remove_by_ref(env, wp);
1475 return 0;
1476 }
1477 }
1478 return -ENOENT;
1479 }
1480
1481 /* Remove a specific watchpoint by reference. */
1482 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1483 {
1484 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1485
1486 tlb_flush_page(env, watchpoint->vaddr);
1487
1488 g_free(watchpoint);
1489 }
1490
1491 /* Remove all matching watchpoints. */
1492 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1493 {
1494 CPUWatchpoint *wp, *next;
1495
1496 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1497 if (wp->flags & mask)
1498 cpu_watchpoint_remove_by_ref(env, wp);
1499 }
1500 }
1501 #endif
1502
1503 /* Add a breakpoint. */
1504 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1505 CPUBreakpoint **breakpoint)
1506 {
1507 #if defined(TARGET_HAS_ICE)
1508 CPUBreakpoint *bp;
1509
1510 bp = g_malloc(sizeof(*bp));
1511
1512 bp->pc = pc;
1513 bp->flags = flags;
1514
1515 /* keep all GDB-injected breakpoints in front */
1516 if (flags & BP_GDB)
1517 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1518 else
1519 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1520
1521 breakpoint_invalidate(env, pc);
1522
1523 if (breakpoint)
1524 *breakpoint = bp;
1525 return 0;
1526 #else
1527 return -ENOSYS;
1528 #endif
1529 }
1530
1531 /* Remove a specific breakpoint. */
1532 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1533 {
1534 #if defined(TARGET_HAS_ICE)
1535 CPUBreakpoint *bp;
1536
1537 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1538 if (bp->pc == pc && bp->flags == flags) {
1539 cpu_breakpoint_remove_by_ref(env, bp);
1540 return 0;
1541 }
1542 }
1543 return -ENOENT;
1544 #else
1545 return -ENOSYS;
1546 #endif
1547 }
1548
1549 /* Remove a specific breakpoint by reference. */
1550 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1551 {
1552 #if defined(TARGET_HAS_ICE)
1553 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1554
1555 breakpoint_invalidate(env, breakpoint->pc);
1556
1557 g_free(breakpoint);
1558 #endif
1559 }
1560
1561 /* Remove all matching breakpoints. */
1562 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1563 {
1564 #if defined(TARGET_HAS_ICE)
1565 CPUBreakpoint *bp, *next;
1566
1567 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1568 if (bp->flags & mask)
1569 cpu_breakpoint_remove_by_ref(env, bp);
1570 }
1571 #endif
1572 }
1573
1574 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1575 CPU loop after each instruction */
1576 void cpu_single_step(CPUState *env, int enabled)
1577 {
1578 #if defined(TARGET_HAS_ICE)
1579 if (env->singlestep_enabled != enabled) {
1580 env->singlestep_enabled = enabled;
1581 if (kvm_enabled())
1582 kvm_update_guest_debug(env, 0);
1583 else {
1584 /* must flush all the translated code to avoid inconsistencies */
1585 /* XXX: only flush what is necessary */
1586 tb_flush(env);
1587 }
1588 }
1589 #endif
1590 }
1591
1592 /* enable or disable low levels log */
1593 void cpu_set_log(int log_flags)
1594 {
1595 loglevel = log_flags;
1596 if (loglevel && !logfile) {
1597 logfile = fopen(logfilename, log_append ? "a" : "w");
1598 if (!logfile) {
1599 perror(logfilename);
1600 _exit(1);
1601 }
1602 #if !defined(CONFIG_SOFTMMU)
1603 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1604 {
1605 static char logfile_buf[4096];
1606 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1607 }
1608 #elif defined(_WIN32)
1609 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1610 setvbuf(logfile, NULL, _IONBF, 0);
1611 #else
1612 setvbuf(logfile, NULL, _IOLBF, 0);
1613 #endif
1614 log_append = 1;
1615 }
1616 if (!loglevel && logfile) {
1617 fclose(logfile);
1618 logfile = NULL;
1619 }
1620 }
1621
1622 void cpu_set_log_filename(const char *filename)
1623 {
1624 logfilename = strdup(filename);
1625 if (logfile) {
1626 fclose(logfile);
1627 logfile = NULL;
1628 }
1629 cpu_set_log(loglevel);
1630 }
1631
1632 static void cpu_unlink_tb(CPUState *env)
1633 {
1634 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1635 problem and hope the cpu will stop of its own accord. For userspace
1636 emulation this often isn't actually as bad as it sounds. Often
1637 signals are used primarily to interrupt blocking syscalls. */
1638 TranslationBlock *tb;
1639 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1640
1641 spin_lock(&interrupt_lock);
1642 tb = env->current_tb;
1643 /* if the cpu is currently executing code, we must unlink it and
1644 all the potentially executing TB */
1645 if (tb) {
1646 env->current_tb = NULL;
1647 tb_reset_jump_recursive(tb);
1648 }
1649 spin_unlock(&interrupt_lock);
1650 }
1651
1652 #ifndef CONFIG_USER_ONLY
1653 /* mask must never be zero, except for A20 change call */
1654 static void tcg_handle_interrupt(CPUState *env, int mask)
1655 {
1656 int old_mask;
1657
1658 old_mask = env->interrupt_request;
1659 env->interrupt_request |= mask;
1660
1661 /*
1662 * If called from iothread context, wake the target cpu in
1663 * case its halted.
1664 */
1665 if (!qemu_cpu_is_self(env)) {
1666 qemu_cpu_kick(env);
1667 return;
1668 }
1669
1670 if (use_icount) {
1671 env->icount_decr.u16.high = 0xffff;
1672 if (!can_do_io(env)
1673 && (mask & ~old_mask) != 0) {
1674 cpu_abort(env, "Raised interrupt while not in I/O function");
1675 }
1676 } else {
1677 cpu_unlink_tb(env);
1678 }
1679 }
1680
1681 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1682
1683 #else /* CONFIG_USER_ONLY */
1684
1685 void cpu_interrupt(CPUState *env, int mask)
1686 {
1687 env->interrupt_request |= mask;
1688 cpu_unlink_tb(env);
1689 }
1690 #endif /* CONFIG_USER_ONLY */
1691
1692 void cpu_reset_interrupt(CPUState *env, int mask)
1693 {
1694 env->interrupt_request &= ~mask;
1695 }
1696
1697 void cpu_exit(CPUState *env)
1698 {
1699 env->exit_request = 1;
1700 cpu_unlink_tb(env);
1701 }
1702
1703 const CPULogItem cpu_log_items[] = {
1704 { CPU_LOG_TB_OUT_ASM, "out_asm",
1705 "show generated host assembly code for each compiled TB" },
1706 { CPU_LOG_TB_IN_ASM, "in_asm",
1707 "show target assembly code for each compiled TB" },
1708 { CPU_LOG_TB_OP, "op",
1709 "show micro ops for each compiled TB" },
1710 { CPU_LOG_TB_OP_OPT, "op_opt",
1711 "show micro ops "
1712 #ifdef TARGET_I386
1713 "before eflags optimization and "
1714 #endif
1715 "after liveness analysis" },
1716 { CPU_LOG_INT, "int",
1717 "show interrupts/exceptions in short format" },
1718 { CPU_LOG_EXEC, "exec",
1719 "show trace before each executed TB (lots of logs)" },
1720 { CPU_LOG_TB_CPU, "cpu",
1721 "show CPU state before block translation" },
1722 #ifdef TARGET_I386
1723 { CPU_LOG_PCALL, "pcall",
1724 "show protected mode far calls/returns/exceptions" },
1725 { CPU_LOG_RESET, "cpu_reset",
1726 "show CPU state before CPU resets" },
1727 #endif
1728 #ifdef DEBUG_IOPORT
1729 { CPU_LOG_IOPORT, "ioport",
1730 "show all i/o ports accesses" },
1731 #endif
1732 { 0, NULL, NULL },
1733 };
1734
1735 static int cmp1(const char *s1, int n, const char *s2)
1736 {
1737 if (strlen(s2) != n)
1738 return 0;
1739 return memcmp(s1, s2, n) == 0;
1740 }
1741
1742 /* takes a comma separated list of log masks. Return 0 if error. */
1743 int cpu_str_to_log_mask(const char *str)
1744 {
1745 const CPULogItem *item;
1746 int mask;
1747 const char *p, *p1;
1748
1749 p = str;
1750 mask = 0;
1751 for(;;) {
1752 p1 = strchr(p, ',');
1753 if (!p1)
1754 p1 = p + strlen(p);
1755 if(cmp1(p,p1-p,"all")) {
1756 for(item = cpu_log_items; item->mask != 0; item++) {
1757 mask |= item->mask;
1758 }
1759 } else {
1760 for(item = cpu_log_items; item->mask != 0; item++) {
1761 if (cmp1(p, p1 - p, item->name))
1762 goto found;
1763 }
1764 return 0;
1765 }
1766 found:
1767 mask |= item->mask;
1768 if (*p1 != ',')
1769 break;
1770 p = p1 + 1;
1771 }
1772 return mask;
1773 }
1774
1775 void cpu_abort(CPUState *env, const char *fmt, ...)
1776 {
1777 va_list ap;
1778 va_list ap2;
1779
1780 va_start(ap, fmt);
1781 va_copy(ap2, ap);
1782 fprintf(stderr, "qemu: fatal: ");
1783 vfprintf(stderr, fmt, ap);
1784 fprintf(stderr, "\n");
1785 #ifdef TARGET_I386
1786 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1787 #else
1788 cpu_dump_state(env, stderr, fprintf, 0);
1789 #endif
1790 if (qemu_log_enabled()) {
1791 qemu_log("qemu: fatal: ");
1792 qemu_log_vprintf(fmt, ap2);
1793 qemu_log("\n");
1794 #ifdef TARGET_I386
1795 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1796 #else
1797 log_cpu_state(env, 0);
1798 #endif
1799 qemu_log_flush();
1800 qemu_log_close();
1801 }
1802 va_end(ap2);
1803 va_end(ap);
1804 #if defined(CONFIG_USER_ONLY)
1805 {
1806 struct sigaction act;
1807 sigfillset(&act.sa_mask);
1808 act.sa_handler = SIG_DFL;
1809 sigaction(SIGABRT, &act, NULL);
1810 }
1811 #endif
1812 abort();
1813 }
1814
1815 CPUState *cpu_copy(CPUState *env)
1816 {
1817 CPUState *new_env = cpu_init(env->cpu_model_str);
1818 CPUState *next_cpu = new_env->next_cpu;
1819 int cpu_index = new_env->cpu_index;
1820 #if defined(TARGET_HAS_ICE)
1821 CPUBreakpoint *bp;
1822 CPUWatchpoint *wp;
1823 #endif
1824
1825 memcpy(new_env, env, sizeof(CPUState));
1826
1827 /* Preserve chaining and index. */
1828 new_env->next_cpu = next_cpu;
1829 new_env->cpu_index = cpu_index;
1830
1831 /* Clone all break/watchpoints.
1832 Note: Once we support ptrace with hw-debug register access, make sure
1833 BP_CPU break/watchpoints are handled correctly on clone. */
1834 QTAILQ_INIT(&env->breakpoints);
1835 QTAILQ_INIT(&env->watchpoints);
1836 #if defined(TARGET_HAS_ICE)
1837 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1838 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1839 }
1840 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1841 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1842 wp->flags, NULL);
1843 }
1844 #endif
1845
1846 return new_env;
1847 }
1848
1849 #if !defined(CONFIG_USER_ONLY)
1850
1851 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1852 {
1853 unsigned int i;
1854
1855 /* Discard jump cache entries for any tb which might potentially
1856 overlap the flushed page. */
1857 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1858 memset (&env->tb_jmp_cache[i], 0,
1859 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1860
1861 i = tb_jmp_cache_hash_page(addr);
1862 memset (&env->tb_jmp_cache[i], 0,
1863 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1864 }
1865
1866 static CPUTLBEntry s_cputlb_empty_entry = {
1867 .addr_read = -1,
1868 .addr_write = -1,
1869 .addr_code = -1,
1870 .addend = -1,
1871 };
1872
1873 /* NOTE: if flush_global is true, also flush global entries (not
1874 implemented yet) */
1875 void tlb_flush(CPUState *env, int flush_global)
1876 {
1877 int i;
1878
1879 #if defined(DEBUG_TLB)
1880 printf("tlb_flush:\n");
1881 #endif
1882 /* must reset current TB so that interrupts cannot modify the
1883 links while we are modifying them */
1884 env->current_tb = NULL;
1885
1886 for(i = 0; i < CPU_TLB_SIZE; i++) {
1887 int mmu_idx;
1888 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1889 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1890 }
1891 }
1892
1893 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1894
1895 env->tlb_flush_addr = -1;
1896 env->tlb_flush_mask = 0;
1897 tlb_flush_count++;
1898 }
1899
1900 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1901 {
1902 if (addr == (tlb_entry->addr_read &
1903 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1904 addr == (tlb_entry->addr_write &
1905 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1906 addr == (tlb_entry->addr_code &
1907 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1908 *tlb_entry = s_cputlb_empty_entry;
1909 }
1910 }
1911
1912 void tlb_flush_page(CPUState *env, target_ulong addr)
1913 {
1914 int i;
1915 int mmu_idx;
1916
1917 #if defined(DEBUG_TLB)
1918 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1919 #endif
1920 /* Check if we need to flush due to large pages. */
1921 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1922 #if defined(DEBUG_TLB)
1923 printf("tlb_flush_page: forced full flush ("
1924 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1925 env->tlb_flush_addr, env->tlb_flush_mask);
1926 #endif
1927 tlb_flush(env, 1);
1928 return;
1929 }
1930 /* must reset current TB so that interrupts cannot modify the
1931 links while we are modifying them */
1932 env->current_tb = NULL;
1933
1934 addr &= TARGET_PAGE_MASK;
1935 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1936 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1937 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1938
1939 tlb_flush_jmp_cache(env, addr);
1940 }
1941
1942 /* update the TLBs so that writes to code in the virtual page 'addr'
1943 can be detected */
1944 static void tlb_protect_code(ram_addr_t ram_addr)
1945 {
1946 cpu_physical_memory_reset_dirty(ram_addr,
1947 ram_addr + TARGET_PAGE_SIZE,
1948 CODE_DIRTY_FLAG);
1949 }
1950
1951 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1952 tested for self modifying code */
1953 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1954 target_ulong vaddr)
1955 {
1956 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1957 }
1958
1959 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1960 unsigned long start, unsigned long length)
1961 {
1962 unsigned long addr;
1963 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1964 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1965 if ((addr - start) < length) {
1966 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1967 }
1968 }
1969 }
1970
1971 /* Note: start and end must be within the same ram block. */
1972 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1973 int dirty_flags)
1974 {
1975 CPUState *env;
1976 unsigned long length, start1;
1977 int i;
1978
1979 start &= TARGET_PAGE_MASK;
1980 end = TARGET_PAGE_ALIGN(end);
1981
1982 length = end - start;
1983 if (length == 0)
1984 return;
1985 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1986
1987 /* we modify the TLB cache so that the dirty bit will be set again
1988 when accessing the range */
1989 start1 = (unsigned long)qemu_safe_ram_ptr(start);
1990 /* Check that we don't span multiple blocks - this breaks the
1991 address comparisons below. */
1992 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
1993 != (end - 1) - start) {
1994 abort();
1995 }
1996
1997 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1998 int mmu_idx;
1999 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2000 for(i = 0; i < CPU_TLB_SIZE; i++)
2001 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2002 start1, length);
2003 }
2004 }
2005 }
2006
2007 int cpu_physical_memory_set_dirty_tracking(int enable)
2008 {
2009 int ret = 0;
2010 in_migration = enable;
2011 return ret;
2012 }
2013
2014 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2015 {
2016 ram_addr_t ram_addr;
2017 void *p;
2018
2019 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2020 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2021 + tlb_entry->addend);
2022 ram_addr = qemu_ram_addr_from_host_nofail(p);
2023 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2024 tlb_entry->addr_write |= TLB_NOTDIRTY;
2025 }
2026 }
2027 }
2028
2029 /* update the TLB according to the current state of the dirty bits */
2030 void cpu_tlb_update_dirty(CPUState *env)
2031 {
2032 int i;
2033 int mmu_idx;
2034 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2035 for(i = 0; i < CPU_TLB_SIZE; i++)
2036 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2037 }
2038 }
2039
2040 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2041 {
2042 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2043 tlb_entry->addr_write = vaddr;
2044 }
2045
2046 /* update the TLB corresponding to virtual page vaddr
2047 so that it is no longer dirty */
2048 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2049 {
2050 int i;
2051 int mmu_idx;
2052
2053 vaddr &= TARGET_PAGE_MASK;
2054 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2055 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2056 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2057 }
2058
2059 /* Our TLB does not support large pages, so remember the area covered by
2060 large pages and trigger a full TLB flush if these are invalidated. */
2061 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2062 target_ulong size)
2063 {
2064 target_ulong mask = ~(size - 1);
2065
2066 if (env->tlb_flush_addr == (target_ulong)-1) {
2067 env->tlb_flush_addr = vaddr & mask;
2068 env->tlb_flush_mask = mask;
2069 return;
2070 }
2071 /* Extend the existing region to include the new page.
2072 This is a compromise between unnecessary flushes and the cost
2073 of maintaining a full variable size TLB. */
2074 mask &= env->tlb_flush_mask;
2075 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2076 mask <<= 1;
2077 }
2078 env->tlb_flush_addr &= mask;
2079 env->tlb_flush_mask = mask;
2080 }
2081
2082 /* Add a new TLB entry. At most one entry for a given virtual address
2083 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2084 supplied size is only used by tlb_flush_page. */
2085 void tlb_set_page(CPUState *env, target_ulong vaddr,
2086 target_phys_addr_t paddr, int prot,
2087 int mmu_idx, target_ulong size)
2088 {
2089 PhysPageDesc *p;
2090 unsigned long pd;
2091 unsigned int index;
2092 target_ulong address;
2093 target_ulong code_address;
2094 unsigned long addend;
2095 CPUTLBEntry *te;
2096 CPUWatchpoint *wp;
2097 target_phys_addr_t iotlb;
2098
2099 assert(size >= TARGET_PAGE_SIZE);
2100 if (size != TARGET_PAGE_SIZE) {
2101 tlb_add_large_page(env, vaddr, size);
2102 }
2103 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2104 if (!p) {
2105 pd = IO_MEM_UNASSIGNED;
2106 } else {
2107 pd = p->phys_offset;
2108 }
2109 #if defined(DEBUG_TLB)
2110 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2111 " prot=%x idx=%d pd=0x%08lx\n",
2112 vaddr, paddr, prot, mmu_idx, pd);
2113 #endif
2114
2115 address = vaddr;
2116 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2117 /* IO memory case (romd handled later) */
2118 address |= TLB_MMIO;
2119 }
2120 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2121 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2122 /* Normal RAM. */
2123 iotlb = pd & TARGET_PAGE_MASK;
2124 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2125 iotlb |= IO_MEM_NOTDIRTY;
2126 else
2127 iotlb |= IO_MEM_ROM;
2128 } else {
2129 /* IO handlers are currently passed a physical address.
2130 It would be nice to pass an offset from the base address
2131 of that region. This would avoid having to special case RAM,
2132 and avoid full address decoding in every device.
2133 We can't use the high bits of pd for this because
2134 IO_MEM_ROMD uses these as a ram address. */
2135 iotlb = (pd & ~TARGET_PAGE_MASK);
2136 if (p) {
2137 iotlb += p->region_offset;
2138 } else {
2139 iotlb += paddr;
2140 }
2141 }
2142
2143 code_address = address;
2144 /* Make accesses to pages with watchpoints go via the
2145 watchpoint trap routines. */
2146 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2147 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2148 /* Avoid trapping reads of pages with a write breakpoint. */
2149 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2150 iotlb = io_mem_watch + paddr;
2151 address |= TLB_MMIO;
2152 break;
2153 }
2154 }
2155 }
2156
2157 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2158 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2159 te = &env->tlb_table[mmu_idx][index];
2160 te->addend = addend - vaddr;
2161 if (prot & PAGE_READ) {
2162 te->addr_read = address;
2163 } else {
2164 te->addr_read = -1;
2165 }
2166
2167 if (prot & PAGE_EXEC) {
2168 te->addr_code = code_address;
2169 } else {
2170 te->addr_code = -1;
2171 }
2172 if (prot & PAGE_WRITE) {
2173 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2174 (pd & IO_MEM_ROMD)) {
2175 /* Write access calls the I/O callback. */
2176 te->addr_write = address | TLB_MMIO;
2177 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2178 !cpu_physical_memory_is_dirty(pd)) {
2179 te->addr_write = address | TLB_NOTDIRTY;
2180 } else {
2181 te->addr_write = address;
2182 }
2183 } else {
2184 te->addr_write = -1;
2185 }
2186 }
2187
2188 #else
2189
2190 void tlb_flush(CPUState *env, int flush_global)
2191 {
2192 }
2193
2194 void tlb_flush_page(CPUState *env, target_ulong addr)
2195 {
2196 }
2197
2198 /*
2199 * Walks guest process memory "regions" one by one
2200 * and calls callback function 'fn' for each region.
2201 */
2202
2203 struct walk_memory_regions_data
2204 {
2205 walk_memory_regions_fn fn;
2206 void *priv;
2207 unsigned long start;
2208 int prot;
2209 };
2210
2211 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2212 abi_ulong end, int new_prot)
2213 {
2214 if (data->start != -1ul) {
2215 int rc = data->fn(data->priv, data->start, end, data->prot);
2216 if (rc != 0) {
2217 return rc;
2218 }
2219 }
2220
2221 data->start = (new_prot ? end : -1ul);
2222 data->prot = new_prot;
2223
2224 return 0;
2225 }
2226
2227 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2228 abi_ulong base, int level, void **lp)
2229 {
2230 abi_ulong pa;
2231 int i, rc;
2232
2233 if (*lp == NULL) {
2234 return walk_memory_regions_end(data, base, 0);
2235 }
2236
2237 if (level == 0) {
2238 PageDesc *pd = *lp;
2239 for (i = 0; i < L2_SIZE; ++i) {
2240 int prot = pd[i].flags;
2241
2242 pa = base | (i << TARGET_PAGE_BITS);
2243 if (prot != data->prot) {
2244 rc = walk_memory_regions_end(data, pa, prot);
2245 if (rc != 0) {
2246 return rc;
2247 }
2248 }
2249 }
2250 } else {
2251 void **pp = *lp;
2252 for (i = 0; i < L2_SIZE; ++i) {
2253 pa = base | ((abi_ulong)i <<
2254 (TARGET_PAGE_BITS + L2_BITS * level));
2255 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2256 if (rc != 0) {
2257 return rc;
2258 }
2259 }
2260 }
2261
2262 return 0;
2263 }
2264
2265 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2266 {
2267 struct walk_memory_regions_data data;
2268 unsigned long i;
2269
2270 data.fn = fn;
2271 data.priv = priv;
2272 data.start = -1ul;
2273 data.prot = 0;
2274
2275 for (i = 0; i < V_L1_SIZE; i++) {
2276 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2277 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2278 if (rc != 0) {
2279 return rc;
2280 }
2281 }
2282
2283 return walk_memory_regions_end(&data, 0, 0);
2284 }
2285
2286 static int dump_region(void *priv, abi_ulong start,
2287 abi_ulong end, unsigned long prot)
2288 {
2289 FILE *f = (FILE *)priv;
2290
2291 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2292 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2293 start, end, end - start,
2294 ((prot & PAGE_READ) ? 'r' : '-'),
2295 ((prot & PAGE_WRITE) ? 'w' : '-'),
2296 ((prot & PAGE_EXEC) ? 'x' : '-'));
2297
2298 return (0);
2299 }
2300
2301 /* dump memory mappings */
2302 void page_dump(FILE *f)
2303 {
2304 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2305 "start", "end", "size", "prot");
2306 walk_memory_regions(f, dump_region);
2307 }
2308
2309 int page_get_flags(target_ulong address)
2310 {
2311 PageDesc *p;
2312
2313 p = page_find(address >> TARGET_PAGE_BITS);
2314 if (!p)
2315 return 0;
2316 return p->flags;
2317 }
2318
2319 /* Modify the flags of a page and invalidate the code if necessary.
2320 The flag PAGE_WRITE_ORG is positioned automatically depending
2321 on PAGE_WRITE. The mmap_lock should already be held. */
2322 void page_set_flags(target_ulong start, target_ulong end, int flags)
2323 {
2324 target_ulong addr, len;
2325
2326 /* This function should never be called with addresses outside the
2327 guest address space. If this assert fires, it probably indicates
2328 a missing call to h2g_valid. */
2329 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2330 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2331 #endif
2332 assert(start < end);
2333
2334 start = start & TARGET_PAGE_MASK;
2335 end = TARGET_PAGE_ALIGN(end);
2336
2337 if (flags & PAGE_WRITE) {
2338 flags |= PAGE_WRITE_ORG;
2339 }
2340
2341 for (addr = start, len = end - start;
2342 len != 0;
2343 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2344 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2345
2346 /* If the write protection bit is set, then we invalidate
2347 the code inside. */
2348 if (!(p->flags & PAGE_WRITE) &&
2349 (flags & PAGE_WRITE) &&
2350 p->first_tb) {
2351 tb_invalidate_phys_page(addr, 0, NULL);
2352 }
2353 p->flags = flags;
2354 }
2355 }
2356
2357 int page_check_range(target_ulong start, target_ulong len, int flags)
2358 {
2359 PageDesc *p;
2360 target_ulong end;
2361 target_ulong addr;
2362
2363 /* This function should never be called with addresses outside the
2364 guest address space. If this assert fires, it probably indicates
2365 a missing call to h2g_valid. */
2366 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2367 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2368 #endif
2369
2370 if (len == 0) {
2371 return 0;
2372 }
2373 if (start + len - 1 < start) {
2374 /* We've wrapped around. */
2375 return -1;
2376 }
2377
2378 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2379 start = start & TARGET_PAGE_MASK;
2380
2381 for (addr = start, len = end - start;
2382 len != 0;
2383 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2384 p = page_find(addr >> TARGET_PAGE_BITS);
2385 if( !p )
2386 return -1;
2387 if( !(p->flags & PAGE_VALID) )
2388 return -1;
2389
2390 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2391 return -1;
2392 if (flags & PAGE_WRITE) {
2393 if (!(p->flags & PAGE_WRITE_ORG))
2394 return -1;
2395 /* unprotect the page if it was put read-only because it
2396 contains translated code */
2397 if (!(p->flags & PAGE_WRITE)) {
2398 if (!page_unprotect(addr, 0, NULL))
2399 return -1;
2400 }
2401 return 0;
2402 }
2403 }
2404 return 0;
2405 }
2406
2407 /* called from signal handler: invalidate the code and unprotect the
2408 page. Return TRUE if the fault was successfully handled. */
2409 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2410 {
2411 unsigned int prot;
2412 PageDesc *p;
2413 target_ulong host_start, host_end, addr;
2414
2415 /* Technically this isn't safe inside a signal handler. However we
2416 know this only ever happens in a synchronous SEGV handler, so in
2417 practice it seems to be ok. */
2418 mmap_lock();
2419
2420 p = page_find(address >> TARGET_PAGE_BITS);
2421 if (!p) {
2422 mmap_unlock();
2423 return 0;
2424 }
2425
2426 /* if the page was really writable, then we change its
2427 protection back to writable */
2428 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2429 host_start = address & qemu_host_page_mask;
2430 host_end = host_start + qemu_host_page_size;
2431
2432 prot = 0;
2433 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2434 p = page_find(addr >> TARGET_PAGE_BITS);
2435 p->flags |= PAGE_WRITE;
2436 prot |= p->flags;
2437
2438 /* and since the content will be modified, we must invalidate
2439 the corresponding translated code. */
2440 tb_invalidate_phys_page(addr, pc, puc);
2441 #ifdef DEBUG_TB_CHECK
2442 tb_invalidate_check(addr);
2443 #endif
2444 }
2445 mprotect((void *)g2h(host_start), qemu_host_page_size,
2446 prot & PAGE_BITS);
2447
2448 mmap_unlock();
2449 return 1;
2450 }
2451 mmap_unlock();
2452 return 0;
2453 }
2454
2455 static inline void tlb_set_dirty(CPUState *env,
2456 unsigned long addr, target_ulong vaddr)
2457 {
2458 }
2459 #endif /* defined(CONFIG_USER_ONLY) */
2460
2461 #if !defined(CONFIG_USER_ONLY)
2462
2463 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2464 typedef struct subpage_t {
2465 target_phys_addr_t base;
2466 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2467 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2468 } subpage_t;
2469
2470 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2471 ram_addr_t memory, ram_addr_t region_offset);
2472 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2473 ram_addr_t orig_memory,
2474 ram_addr_t region_offset);
2475 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2476 need_subpage) \
2477 do { \
2478 if (addr > start_addr) \
2479 start_addr2 = 0; \
2480 else { \
2481 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2482 if (start_addr2 > 0) \
2483 need_subpage = 1; \
2484 } \
2485 \
2486 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2487 end_addr2 = TARGET_PAGE_SIZE - 1; \
2488 else { \
2489 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2490 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2491 need_subpage = 1; \
2492 } \
2493 } while (0)
2494
2495 /* register physical memory.
2496 For RAM, 'size' must be a multiple of the target page size.
2497 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2498 io memory page. The address used when calling the IO function is
2499 the offset from the start of the region, plus region_offset. Both
2500 start_addr and region_offset are rounded down to a page boundary
2501 before calculating this offset. This should not be a problem unless
2502 the low bits of start_addr and region_offset differ. */
2503 void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
2504 ram_addr_t size,
2505 ram_addr_t phys_offset,
2506 ram_addr_t region_offset,
2507 bool log_dirty)
2508 {
2509 target_phys_addr_t addr, end_addr;
2510 PhysPageDesc *p;
2511 CPUState *env;
2512 ram_addr_t orig_size = size;
2513 subpage_t *subpage;
2514
2515 assert(size);
2516
2517 if (phys_offset == IO_MEM_UNASSIGNED) {
2518 region_offset = start_addr;
2519 }
2520 region_offset &= TARGET_PAGE_MASK;
2521 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2522 end_addr = start_addr + (target_phys_addr_t)size;
2523
2524 addr = start_addr;
2525 do {
2526 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2527 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2528 ram_addr_t orig_memory = p->phys_offset;
2529 target_phys_addr_t start_addr2, end_addr2;
2530 int need_subpage = 0;
2531
2532 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2533 need_subpage);
2534 if (need_subpage) {
2535 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2536 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2537 &p->phys_offset, orig_memory,
2538 p->region_offset);
2539 } else {
2540 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2541 >> IO_MEM_SHIFT];
2542 }
2543 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2544 region_offset);
2545 p->region_offset = 0;
2546 } else {
2547 p->phys_offset = phys_offset;
2548 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2549 (phys_offset & IO_MEM_ROMD))
2550 phys_offset += TARGET_PAGE_SIZE;
2551 }
2552 } else {
2553 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2554 p->phys_offset = phys_offset;
2555 p->region_offset = region_offset;
2556 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2557 (phys_offset & IO_MEM_ROMD)) {
2558 phys_offset += TARGET_PAGE_SIZE;
2559 } else {
2560 target_phys_addr_t start_addr2, end_addr2;
2561 int need_subpage = 0;
2562
2563 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2564 end_addr2, need_subpage);
2565
2566 if (need_subpage) {
2567 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2568 &p->phys_offset, IO_MEM_UNASSIGNED,
2569 addr & TARGET_PAGE_MASK);
2570 subpage_register(subpage, start_addr2, end_addr2,
2571 phys_offset, region_offset);
2572 p->region_offset = 0;
2573 }
2574 }
2575 }
2576 region_offset += TARGET_PAGE_SIZE;
2577 addr += TARGET_PAGE_SIZE;
2578 } while (addr != end_addr);
2579
2580 /* since each CPU stores ram addresses in its TLB cache, we must
2581 reset the modified entries */
2582 /* XXX: slow ! */
2583 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2584 tlb_flush(env, 1);
2585 }
2586 }
2587
2588 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2589 {
2590 if (kvm_enabled())
2591 kvm_coalesce_mmio_region(addr, size);
2592 }
2593
2594 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2595 {
2596 if (kvm_enabled())
2597 kvm_uncoalesce_mmio_region(addr, size);
2598 }
2599
2600 void qemu_flush_coalesced_mmio_buffer(void)
2601 {
2602 if (kvm_enabled())
2603 kvm_flush_coalesced_mmio_buffer();
2604 }
2605
2606 #if defined(__linux__) && !defined(TARGET_S390X)
2607
2608 #include <sys/vfs.h>
2609
2610 #define HUGETLBFS_MAGIC 0x958458f6
2611
2612 static long gethugepagesize(const char *path)
2613 {
2614 struct statfs fs;
2615 int ret;
2616
2617 do {
2618 ret = statfs(path, &fs);
2619 } while (ret != 0 && errno == EINTR);
2620
2621 if (ret != 0) {
2622 perror(path);
2623 return 0;
2624 }
2625
2626 if (fs.f_type != HUGETLBFS_MAGIC)
2627 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2628
2629 return fs.f_bsize;
2630 }
2631
2632 static void *file_ram_alloc(RAMBlock *block,
2633 ram_addr_t memory,
2634 const char *path)
2635 {
2636 char *filename;
2637 void *area;
2638 int fd;
2639 #ifdef MAP_POPULATE
2640 int flags;
2641 #endif
2642 unsigned long hpagesize;
2643
2644 hpagesize = gethugepagesize(path);
2645 if (!hpagesize) {
2646 return NULL;
2647 }
2648
2649 if (memory < hpagesize) {
2650 return NULL;
2651 }
2652
2653 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2654 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2655 return NULL;
2656 }
2657
2658 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2659 return NULL;
2660 }
2661
2662 fd = mkstemp(filename);
2663 if (fd < 0) {
2664 perror("unable to create backing store for hugepages");
2665 free(filename);
2666 return NULL;
2667 }
2668 unlink(filename);
2669 free(filename);
2670
2671 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2672
2673 /*
2674 * ftruncate is not supported by hugetlbfs in older
2675 * hosts, so don't bother bailing out on errors.
2676 * If anything goes wrong with it under other filesystems,
2677 * mmap will fail.
2678 */
2679 if (ftruncate(fd, memory))
2680 perror("ftruncate");
2681
2682 #ifdef MAP_POPULATE
2683 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2684 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2685 * to sidestep this quirk.
2686 */
2687 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2688 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2689 #else
2690 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2691 #endif
2692 if (area == MAP_FAILED) {
2693 perror("file_ram_alloc: can't mmap RAM pages");
2694 close(fd);
2695 return (NULL);
2696 }
2697 block->fd = fd;
2698 return area;
2699 }
2700 #endif
2701
2702 static ram_addr_t find_ram_offset(ram_addr_t size)
2703 {
2704 RAMBlock *block, *next_block;
2705 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
2706
2707 if (QLIST_EMPTY(&ram_list.blocks))
2708 return 0;
2709
2710 QLIST_FOREACH(block, &ram_list.blocks, next) {
2711 ram_addr_t end, next = RAM_ADDR_MAX;
2712
2713 end = block->offset + block->length;
2714
2715 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2716 if (next_block->offset >= end) {
2717 next = MIN(next, next_block->offset);
2718 }
2719 }
2720 if (next - end >= size && next - end < mingap) {
2721 offset = end;
2722 mingap = next - end;
2723 }
2724 }
2725
2726 if (offset == RAM_ADDR_MAX) {
2727 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2728 (uint64_t)size);
2729 abort();
2730 }
2731
2732 return offset;
2733 }
2734
2735 static ram_addr_t last_ram_offset(void)
2736 {
2737 RAMBlock *block;
2738 ram_addr_t last = 0;
2739
2740 QLIST_FOREACH(block, &ram_list.blocks, next)
2741 last = MAX(last, block->offset + block->length);
2742
2743 return last;
2744 }
2745
2746 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
2747 {
2748 RAMBlock *new_block, *block;
2749
2750 new_block = NULL;
2751 QLIST_FOREACH(block, &ram_list.blocks, next) {
2752 if (block->offset == addr) {
2753 new_block = block;
2754 break;
2755 }
2756 }
2757 assert(new_block);
2758 assert(!new_block->idstr[0]);
2759
2760 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2761 char *id = dev->parent_bus->info->get_dev_path(dev);
2762 if (id) {
2763 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2764 g_free(id);
2765 }
2766 }
2767 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2768
2769 QLIST_FOREACH(block, &ram_list.blocks, next) {
2770 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
2771 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2772 new_block->idstr);
2773 abort();
2774 }
2775 }
2776 }
2777
2778 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2779 MemoryRegion *mr)
2780 {
2781 RAMBlock *new_block;
2782
2783 size = TARGET_PAGE_ALIGN(size);
2784 new_block = g_malloc0(sizeof(*new_block));
2785
2786 new_block->mr = mr;
2787 new_block->offset = find_ram_offset(size);
2788 if (host) {
2789 new_block->host = host;
2790 new_block->flags |= RAM_PREALLOC_MASK;
2791 } else {
2792 if (mem_path) {
2793 #if defined (__linux__) && !defined(TARGET_S390X)
2794 new_block->host = file_ram_alloc(new_block, size, mem_path);
2795 if (!new_block->host) {
2796 new_block->host = qemu_vmalloc(size);
2797 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2798 }
2799 #else
2800 fprintf(stderr, "-mem-path option unsupported\n");
2801 exit(1);
2802 #endif
2803 } else {
2804 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2805 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2806 an system defined value, which is at least 256GB. Larger systems
2807 have larger values. We put the guest between the end of data
2808 segment (system break) and this value. We use 32GB as a base to
2809 have enough room for the system break to grow. */
2810 new_block->host = mmap((void*)0x800000000, size,
2811 PROT_EXEC|PROT_READ|PROT_WRITE,
2812 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2813 if (new_block->host == MAP_FAILED) {
2814 fprintf(stderr, "Allocating RAM failed\n");
2815 abort();
2816 }
2817 #else
2818 if (xen_enabled()) {
2819 xen_ram_alloc(new_block->offset, size, mr);
2820 } else {
2821 new_block->host = qemu_vmalloc(size);
2822 }
2823 #endif
2824 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2825 }
2826 }
2827 new_block->length = size;
2828
2829 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2830
2831 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
2832 last_ram_offset() >> TARGET_PAGE_BITS);
2833 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2834 0xff, size >> TARGET_PAGE_BITS);
2835
2836 if (kvm_enabled())
2837 kvm_setup_guest_memory(new_block->host, size);
2838
2839 return new_block->offset;
2840 }
2841
2842 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
2843 {
2844 return qemu_ram_alloc_from_ptr(size, NULL, mr);
2845 }
2846
2847 void qemu_ram_free_from_ptr(ram_addr_t addr)
2848 {
2849 RAMBlock *block;
2850
2851 QLIST_FOREACH(block, &ram_list.blocks, next) {
2852 if (addr == block->offset) {
2853 QLIST_REMOVE(block, next);
2854 g_free(block);
2855 return;
2856 }
2857 }
2858 }
2859
2860 void qemu_ram_free(ram_addr_t addr)
2861 {
2862 RAMBlock *block;
2863
2864 QLIST_FOREACH(block, &ram_list.blocks, next) {
2865 if (addr == block->offset) {
2866 QLIST_REMOVE(block, next);
2867 if (block->flags & RAM_PREALLOC_MASK) {
2868 ;
2869 } else if (mem_path) {
2870 #if defined (__linux__) && !defined(TARGET_S390X)
2871 if (block->fd) {
2872 munmap(block->host, block->length);
2873 close(block->fd);
2874 } else {
2875 qemu_vfree(block->host);
2876 }
2877 #else
2878 abort();
2879 #endif
2880 } else {
2881 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2882 munmap(block->host, block->length);
2883 #else
2884 if (xen_enabled()) {
2885 xen_invalidate_map_cache_entry(block->host);
2886 } else {
2887 qemu_vfree(block->host);
2888 }
2889 #endif
2890 }
2891 g_free(block);
2892 return;
2893 }
2894 }
2895
2896 }
2897
2898 #ifndef _WIN32
2899 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2900 {
2901 RAMBlock *block;
2902 ram_addr_t offset;
2903 int flags;
2904 void *area, *vaddr;
2905
2906 QLIST_FOREACH(block, &ram_list.blocks, next) {
2907 offset = addr - block->offset;
2908 if (offset < block->length) {
2909 vaddr = block->host + offset;
2910 if (block->flags & RAM_PREALLOC_MASK) {
2911 ;
2912 } else {
2913 flags = MAP_FIXED;
2914 munmap(vaddr, length);
2915 if (mem_path) {
2916 #if defined(__linux__) && !defined(TARGET_S390X)
2917 if (block->fd) {
2918 #ifdef MAP_POPULATE
2919 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2920 MAP_PRIVATE;
2921 #else
2922 flags |= MAP_PRIVATE;
2923 #endif
2924 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2925 flags, block->fd, offset);
2926 } else {
2927 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2928 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2929 flags, -1, 0);
2930 }
2931 #else
2932 abort();
2933 #endif
2934 } else {
2935 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2936 flags |= MAP_SHARED | MAP_ANONYMOUS;
2937 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2938 flags, -1, 0);
2939 #else
2940 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2941 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2942 flags, -1, 0);
2943 #endif
2944 }
2945 if (area != vaddr) {
2946 fprintf(stderr, "Could not remap addr: "
2947 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2948 length, addr);
2949 exit(1);
2950 }
2951 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2952 }
2953 return;
2954 }
2955 }
2956 }
2957 #endif /* !_WIN32 */
2958
2959 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2960 With the exception of the softmmu code in this file, this should
2961 only be used for local memory (e.g. video ram) that the device owns,
2962 and knows it isn't going to access beyond the end of the block.
2963
2964 It should not be used for general purpose DMA.
2965 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2966 */
2967 void *qemu_get_ram_ptr(ram_addr_t addr)
2968 {
2969 RAMBlock *block;
2970
2971 QLIST_FOREACH(block, &ram_list.blocks, next) {
2972 if (addr - block->offset < block->length) {
2973 /* Move this entry to to start of the list. */
2974 if (block != QLIST_FIRST(&ram_list.blocks)) {
2975 QLIST_REMOVE(block, next);
2976 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2977 }
2978 if (xen_enabled()) {
2979 /* We need to check if the requested address is in the RAM
2980 * because we don't want to map the entire memory in QEMU.
2981 * In that case just map until the end of the page.
2982 */
2983 if (block->offset == 0) {
2984 return xen_map_cache(addr, 0, 0);
2985 } else if (block->host == NULL) {
2986 block->host =
2987 xen_map_cache(block->offset, block->length, 1);
2988 }
2989 }
2990 return block->host + (addr - block->offset);
2991 }
2992 }
2993
2994 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2995 abort();
2996
2997 return NULL;
2998 }
2999
3000 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3001 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3002 */
3003 void *qemu_safe_ram_ptr(ram_addr_t addr)
3004 {
3005 RAMBlock *block;
3006
3007 QLIST_FOREACH(block, &ram_list.blocks, next) {
3008 if (addr - block->offset < block->length) {
3009 if (xen_enabled()) {
3010 /* We need to check if the requested address is in the RAM
3011 * because we don't want to map the entire memory in QEMU.
3012 * In that case just map until the end of the page.
3013 */
3014 if (block->offset == 0) {
3015 return xen_map_cache(addr, 0, 0);
3016 } else if (block->host == NULL) {
3017 block->host =
3018 xen_map_cache(block->offset, block->length, 1);
3019 }
3020 }
3021 return block->host + (addr - block->offset);
3022 }
3023 }
3024
3025 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3026 abort();
3027
3028 return NULL;
3029 }
3030
3031 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3032 * but takes a size argument */
3033 void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
3034 {
3035 if (*size == 0) {
3036 return NULL;
3037 }
3038 if (xen_enabled()) {
3039 return xen_map_cache(addr, *size, 1);
3040 } else {
3041 RAMBlock *block;
3042
3043 QLIST_FOREACH(block, &ram_list.blocks, next) {
3044 if (addr - block->offset < block->length) {
3045 if (addr - block->offset + *size > block->length)
3046 *size = block->length - addr + block->offset;
3047 return block->host + (addr - block->offset);
3048 }
3049 }
3050
3051 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3052 abort();
3053 }
3054 }
3055
3056 void qemu_put_ram_ptr(void *addr)
3057 {
3058 trace_qemu_put_ram_ptr(addr);
3059 }
3060
3061 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3062 {
3063 RAMBlock *block;
3064 uint8_t *host = ptr;
3065
3066 if (xen_enabled()) {
3067 *ram_addr = xen_ram_addr_from_mapcache(ptr);
3068 return 0;
3069 }
3070
3071 QLIST_FOREACH(block, &ram_list.blocks, next) {
3072 /* This case append when the block is not mapped. */
3073 if (block->host == NULL) {
3074 continue;
3075 }
3076 if (host - block->host < block->length) {
3077 *ram_addr = block->offset + (host - block->host);
3078 return 0;
3079 }
3080 }
3081
3082 return -1;
3083 }
3084
3085 /* Some of the softmmu routines need to translate from a host pointer
3086 (typically a TLB entry) back to a ram offset. */
3087 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3088 {
3089 ram_addr_t ram_addr;
3090
3091 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3092 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3093 abort();
3094 }
3095 return ram_addr;
3096 }
3097
3098 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
3099 {
3100 #ifdef DEBUG_UNASSIGNED
3101 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3102 #endif
3103 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3104 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
3105 #endif
3106 return 0;
3107 }
3108
3109 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3110 {
3111 #ifdef DEBUG_UNASSIGNED
3112 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3113 #endif
3114 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3115 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
3116 #endif
3117 return 0;
3118 }
3119
3120 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3121 {
3122 #ifdef DEBUG_UNASSIGNED
3123 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3124 #endif
3125 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3126 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
3127 #endif
3128 return 0;
3129 }
3130
3131 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3132 {
3133 #ifdef DEBUG_UNASSIGNED
3134 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3135 #endif
3136 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3137 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
3138 #endif
3139 }
3140
3141 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3142 {
3143 #ifdef DEBUG_UNASSIGNED
3144 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3145 #endif
3146 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3147 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
3148 #endif
3149 }
3150
3151 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3152 {
3153 #ifdef DEBUG_UNASSIGNED
3154 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3155 #endif
3156 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3157 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
3158 #endif
3159 }
3160
3161 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3162 unassigned_mem_readb,
3163 unassigned_mem_readw,
3164 unassigned_mem_readl,
3165 };
3166
3167 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3168 unassigned_mem_writeb,
3169 unassigned_mem_writew,
3170 unassigned_mem_writel,
3171 };
3172
3173 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3174 uint32_t val)
3175 {
3176 int dirty_flags;
3177 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3178 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3179 #if !defined(CONFIG_USER_ONLY)
3180 tb_invalidate_phys_page_fast(ram_addr, 1);
3181 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3182 #endif
3183 }
3184 stb_p(qemu_get_ram_ptr(ram_addr), val);
3185 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3186 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3187 /* we remove the notdirty callback only if the code has been
3188 flushed */
3189 if (dirty_flags == 0xff)
3190 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3191 }
3192
3193 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3194 uint32_t val)
3195 {
3196 int dirty_flags;
3197 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3198 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3199 #if !defined(CONFIG_USER_ONLY)
3200 tb_invalidate_phys_page_fast(ram_addr, 2);
3201 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3202 #endif
3203 }
3204 stw_p(qemu_get_ram_ptr(ram_addr), val);
3205 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3206 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3207 /* we remove the notdirty callback only if the code has been
3208 flushed */
3209 if (dirty_flags == 0xff)
3210 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3211 }
3212
3213 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3214 uint32_t val)
3215 {
3216 int dirty_flags;
3217 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3218 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3219 #if !defined(CONFIG_USER_ONLY)
3220 tb_invalidate_phys_page_fast(ram_addr, 4);
3221 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3222 #endif
3223 }
3224 stl_p(qemu_get_ram_ptr(ram_addr), val);
3225 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3226 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3227 /* we remove the notdirty callback only if the code has been
3228 flushed */
3229 if (dirty_flags == 0xff)
3230 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3231 }
3232
3233 static CPUReadMemoryFunc * const error_mem_read[3] = {
3234 NULL, /* never used */
3235 NULL, /* never used */
3236 NULL, /* never used */
3237 };
3238
3239 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3240 notdirty_mem_writeb,
3241 notdirty_mem_writew,
3242 notdirty_mem_writel,
3243 };
3244
3245 /* Generate a debug exception if a watchpoint has been hit. */
3246 static void check_watchpoint(int offset, int len_mask, int flags)
3247 {
3248 CPUState *env = cpu_single_env;
3249 target_ulong pc, cs_base;
3250 TranslationBlock *tb;
3251 target_ulong vaddr;
3252 CPUWatchpoint *wp;
3253 int cpu_flags;
3254
3255 if (env->watchpoint_hit) {
3256 /* We re-entered the check after replacing the TB. Now raise
3257 * the debug interrupt so that is will trigger after the
3258 * current instruction. */
3259 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3260 return;
3261 }
3262 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3263 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3264 if ((vaddr == (wp->vaddr & len_mask) ||
3265 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3266 wp->flags |= BP_WATCHPOINT_HIT;
3267 if (!env->watchpoint_hit) {
3268 env->watchpoint_hit = wp;
3269 tb = tb_find_pc(env->mem_io_pc);
3270 if (!tb) {
3271 cpu_abort(env, "check_watchpoint: could not find TB for "
3272 "pc=%p", (void *)env->mem_io_pc);
3273 }
3274 cpu_restore_state(tb, env, env->mem_io_pc);
3275 tb_phys_invalidate(tb, -1);
3276 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3277 env->exception_index = EXCP_DEBUG;
3278 } else {
3279 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3280 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3281 }
3282 cpu_resume_from_signal(env, NULL);
3283 }
3284 } else {
3285 wp->flags &= ~BP_WATCHPOINT_HIT;
3286 }
3287 }
3288 }
3289
3290 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3291 so these check for a hit then pass through to the normal out-of-line
3292 phys routines. */
3293 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3294 {
3295 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3296 return ldub_phys(addr);
3297 }
3298
3299 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3300 {
3301 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3302 return lduw_phys(addr);
3303 }
3304
3305 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3306 {
3307 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3308 return ldl_phys(addr);
3309 }
3310
3311 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3312 uint32_t val)
3313 {
3314 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3315 stb_phys(addr, val);
3316 }
3317
3318 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3319 uint32_t val)
3320 {
3321 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3322 stw_phys(addr, val);
3323 }
3324
3325 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3326 uint32_t val)
3327 {
3328 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3329 stl_phys(addr, val);
3330 }
3331
3332 static CPUReadMemoryFunc * const watch_mem_read[3] = {
3333 watch_mem_readb,
3334 watch_mem_readw,
3335 watch_mem_readl,
3336 };
3337
3338 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3339 watch_mem_writeb,
3340 watch_mem_writew,
3341 watch_mem_writel,
3342 };
3343
3344 static inline uint32_t subpage_readlen (subpage_t *mmio,
3345 target_phys_addr_t addr,
3346 unsigned int len)
3347 {
3348 unsigned int idx = SUBPAGE_IDX(addr);
3349 #if defined(DEBUG_SUBPAGE)
3350 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3351 mmio, len, addr, idx);
3352 #endif
3353
3354 addr += mmio->region_offset[idx];
3355 idx = mmio->sub_io_index[idx];
3356 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3357 }
3358
3359 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3360 uint32_t value, unsigned int len)
3361 {
3362 unsigned int idx = SUBPAGE_IDX(addr);
3363 #if defined(DEBUG_SUBPAGE)
3364 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3365 __func__, mmio, len, addr, idx, value);
3366 #endif
3367
3368 addr += mmio->region_offset[idx];
3369 idx = mmio->sub_io_index[idx];
3370 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3371 }
3372
3373 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3374 {
3375 return subpage_readlen(opaque, addr, 0);
3376 }
3377
3378 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3379 uint32_t value)
3380 {
3381 subpage_writelen(opaque, addr, value, 0);
3382 }
3383
3384 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3385 {
3386 return subpage_readlen(opaque, addr, 1);
3387 }
3388
3389 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3390 uint32_t value)
3391 {
3392 subpage_writelen(opaque, addr, value, 1);
3393 }
3394
3395 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3396 {
3397 return subpage_readlen(opaque, addr, 2);
3398 }
3399
3400 static void subpage_writel (void *opaque, target_phys_addr_t addr,
3401 uint32_t value)
3402 {
3403 subpage_writelen(opaque, addr, value, 2);
3404 }
3405
3406 static CPUReadMemoryFunc * const subpage_read[] = {
3407 &subpage_readb,
3408 &subpage_readw,
3409 &subpage_readl,
3410 };
3411
3412 static CPUWriteMemoryFunc * const subpage_write[] = {
3413 &subpage_writeb,
3414 &subpage_writew,
3415 &subpage_writel,
3416 };
3417
3418 static uint32_t subpage_ram_readb(void *opaque, target_phys_addr_t addr)
3419 {
3420 ram_addr_t raddr = addr;
3421 void *ptr = qemu_get_ram_ptr(raddr);
3422 return ldub_p(ptr);
3423 }
3424
3425 static void subpage_ram_writeb(void *opaque, target_phys_addr_t addr,
3426 uint32_t value)
3427 {
3428 ram_addr_t raddr = addr;
3429 void *ptr = qemu_get_ram_ptr(raddr);
3430 stb_p(ptr, value);
3431 }
3432
3433 static uint32_t subpage_ram_readw(void *opaque, target_phys_addr_t addr)
3434 {
3435 ram_addr_t raddr = addr;
3436 void *ptr = qemu_get_ram_ptr(raddr);
3437 return lduw_p(ptr);
3438 }
3439
3440 static void subpage_ram_writew(void *opaque, target_phys_addr_t addr,
3441 uint32_t value)
3442 {
3443 ram_addr_t raddr = addr;
3444 void *ptr = qemu_get_ram_ptr(raddr);
3445 stw_p(ptr, value);
3446 }
3447
3448 static uint32_t subpage_ram_readl(void *opaque, target_phys_addr_t addr)
3449 {
3450 ram_addr_t raddr = addr;
3451 void *ptr = qemu_get_ram_ptr(raddr);
3452 return ldl_p(ptr);
3453 }
3454
3455 static void subpage_ram_writel(void *opaque, target_phys_addr_t addr,
3456 uint32_t value)
3457 {
3458 ram_addr_t raddr = addr;
3459 void *ptr = qemu_get_ram_ptr(raddr);
3460 stl_p(ptr, value);
3461 }
3462
3463 static CPUReadMemoryFunc * const subpage_ram_read[] = {
3464 &subpage_ram_readb,
3465 &subpage_ram_readw,
3466 &subpage_ram_readl,
3467 };
3468
3469 static CPUWriteMemoryFunc * const subpage_ram_write[] = {
3470 &subpage_ram_writeb,
3471 &subpage_ram_writew,
3472 &subpage_ram_writel,
3473 };
3474
3475 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3476 ram_addr_t memory, ram_addr_t region_offset)
3477 {
3478 int idx, eidx;
3479
3480 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3481 return -1;
3482 idx = SUBPAGE_IDX(start);
3483 eidx = SUBPAGE_IDX(end);
3484 #if defined(DEBUG_SUBPAGE)
3485 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3486 mmio, start, end, idx, eidx, memory);
3487 #endif
3488 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
3489 memory = IO_MEM_SUBPAGE_RAM;
3490 }
3491 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3492 for (; idx <= eidx; idx++) {
3493 mmio->sub_io_index[idx] = memory;
3494 mmio->region_offset[idx] = region_offset;
3495 }
3496
3497 return 0;
3498 }
3499
3500 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3501 ram_addr_t orig_memory,
3502 ram_addr_t region_offset)
3503 {
3504 subpage_t *mmio;
3505 int subpage_memory;
3506
3507 mmio = g_malloc0(sizeof(subpage_t));
3508
3509 mmio->base = base;
3510 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3511 #if defined(DEBUG_SUBPAGE)
3512 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3513 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3514 #endif
3515 *phys = subpage_memory | IO_MEM_SUBPAGE;
3516 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3517
3518 return mmio;
3519 }
3520
3521 static int get_free_io_mem_idx(void)
3522 {
3523 int i;
3524
3525 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3526 if (!io_mem_used[i]) {
3527 io_mem_used[i] = 1;
3528 return i;
3529 }
3530 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3531 return -1;
3532 }
3533
3534 /* mem_read and mem_write are arrays of functions containing the
3535 function to access byte (index 0), word (index 1) and dword (index
3536 2). Functions can be omitted with a NULL function pointer.
3537 If io_index is non zero, the corresponding io zone is
3538 modified. If it is zero, a new io zone is allocated. The return
3539 value can be used with cpu_register_physical_memory(). (-1) is
3540 returned if error. */
3541 static int cpu_register_io_memory_fixed(int io_index,
3542 CPUReadMemoryFunc * const *mem_read,
3543 CPUWriteMemoryFunc * const *mem_write,
3544 void *opaque)
3545 {
3546 int i;
3547
3548 if (io_index <= 0) {
3549 io_index = get_free_io_mem_idx();
3550 if (io_index == -1)
3551 return io_index;
3552 } else {
3553 io_index >>= IO_MEM_SHIFT;
3554 if (io_index >= IO_MEM_NB_ENTRIES)
3555 return -1;
3556 }
3557
3558 for (i = 0; i < 3; ++i) {
3559 io_mem_read[io_index][i]
3560 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3561 }
3562 for (i = 0; i < 3; ++i) {
3563 io_mem_write[io_index][i]
3564 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3565 }
3566 io_mem_opaque[io_index] = opaque;
3567
3568 return (io_index << IO_MEM_SHIFT);
3569 }
3570
3571 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3572 CPUWriteMemoryFunc * const *mem_write,
3573 void *opaque)
3574 {
3575 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3576 }
3577
3578 void cpu_unregister_io_memory(int io_table_address)
3579 {
3580 int i;
3581 int io_index = io_table_address >> IO_MEM_SHIFT;
3582
3583 for (i=0;i < 3; i++) {
3584 io_mem_read[io_index][i] = unassigned_mem_read[i];
3585 io_mem_write[io_index][i] = unassigned_mem_write[i];
3586 }
3587 io_mem_opaque[io_index] = NULL;
3588 io_mem_used[io_index] = 0;
3589 }
3590
3591 static void io_mem_init(void)
3592 {
3593 int i;
3594
3595 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3596 unassigned_mem_write, NULL);
3597 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3598 unassigned_mem_write, NULL);
3599 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3600 notdirty_mem_write, NULL);
3601 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM, subpage_ram_read,
3602 subpage_ram_write, NULL);
3603 for (i=0; i<5; i++)
3604 io_mem_used[i] = 1;
3605
3606 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3607 watch_mem_write, NULL);
3608 }
3609
3610 static void memory_map_init(void)
3611 {
3612 system_memory = g_malloc(sizeof(*system_memory));
3613 memory_region_init(system_memory, "system", INT64_MAX);
3614 set_system_memory_map(system_memory);
3615
3616 system_io = g_malloc(sizeof(*system_io));
3617 memory_region_init(system_io, "io", 65536);
3618 set_system_io_map(system_io);
3619 }
3620
3621 MemoryRegion *get_system_memory(void)
3622 {
3623 return system_memory;
3624 }
3625
3626 MemoryRegion *get_system_io(void)
3627 {
3628 return system_io;
3629 }
3630
3631 #endif /* !defined(CONFIG_USER_ONLY) */
3632
3633 /* physical memory access (slow version, mainly for debug) */
3634 #if defined(CONFIG_USER_ONLY)
3635 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3636 uint8_t *buf, int len, int is_write)
3637 {
3638 int l, flags;
3639 target_ulong page;
3640 void * p;
3641
3642 while (len > 0) {
3643 page = addr & TARGET_PAGE_MASK;
3644 l = (page + TARGET_PAGE_SIZE) - addr;
3645 if (l > len)
3646 l = len;
3647 flags = page_get_flags(page);
3648 if (!(flags & PAGE_VALID))
3649 return -1;
3650 if (is_write) {
3651 if (!(flags & PAGE_WRITE))
3652 return -1;
3653 /* XXX: this code should not depend on lock_user */
3654 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3655 return -1;
3656 memcpy(p, buf, l);
3657 unlock_user(p, addr, l);
3658 } else {
3659 if (!(flags & PAGE_READ))
3660 return -1;
3661 /* XXX: this code should not depend on lock_user */
3662 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3663 return -1;
3664 memcpy(buf, p, l);
3665 unlock_user(p, addr, 0);
3666 }
3667 len -= l;
3668 buf += l;
3669 addr += l;
3670 }
3671 return 0;
3672 }
3673
3674 #else
3675 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3676 int len, int is_write)
3677 {
3678 int l, io_index;
3679 uint8_t *ptr;
3680 uint32_t val;
3681 target_phys_addr_t page;
3682 ram_addr_t pd;
3683 PhysPageDesc *p;
3684
3685 while (len > 0) {
3686 page = addr & TARGET_PAGE_MASK;
3687 l = (page + TARGET_PAGE_SIZE) - addr;
3688 if (l > len)
3689 l = len;
3690 p = phys_page_find(page >> TARGET_PAGE_BITS);
3691 if (!p) {
3692 pd = IO_MEM_UNASSIGNED;
3693 } else {
3694 pd = p->phys_offset;
3695 }
3696
3697 if (is_write) {
3698 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3699 target_phys_addr_t addr1 = addr;
3700 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3701 if (p)
3702 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3703 /* XXX: could force cpu_single_env to NULL to avoid
3704 potential bugs */
3705 if (l >= 4 && ((addr1 & 3) == 0)) {
3706 /* 32 bit write access */
3707 val = ldl_p(buf);
3708 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3709 l = 4;
3710 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3711 /* 16 bit write access */
3712 val = lduw_p(buf);
3713 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3714 l = 2;
3715 } else {
3716 /* 8 bit write access */
3717 val = ldub_p(buf);
3718 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3719 l = 1;
3720 }
3721 } else {
3722 ram_addr_t addr1;
3723 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3724 /* RAM case */
3725 ptr = qemu_get_ram_ptr(addr1);
3726 memcpy(ptr, buf, l);
3727 if (!cpu_physical_memory_is_dirty(addr1)) {
3728 /* invalidate code */
3729 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3730 /* set dirty bit */
3731 cpu_physical_memory_set_dirty_flags(
3732 addr1, (0xff & ~CODE_DIRTY_FLAG));
3733 }
3734 qemu_put_ram_ptr(ptr);
3735 }
3736 } else {
3737 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3738 !(pd & IO_MEM_ROMD)) {
3739 target_phys_addr_t addr1 = addr;
3740 /* I/O case */
3741 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3742 if (p)
3743 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3744 if (l >= 4 && ((addr1 & 3) == 0)) {
3745 /* 32 bit read access */
3746 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3747 stl_p(buf, val);
3748 l = 4;
3749 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3750 /* 16 bit read access */
3751 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3752 stw_p(buf, val);
3753 l = 2;
3754 } else {
3755 /* 8 bit read access */
3756 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3757 stb_p(buf, val);
3758 l = 1;
3759 }
3760 } else {
3761 /* RAM case */
3762 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3763 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3764 qemu_put_ram_ptr(ptr);
3765 }
3766 }
3767 len -= l;
3768 buf += l;
3769 addr += l;
3770 }
3771 }
3772
3773 /* used for ROM loading : can write in RAM and ROM */
3774 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3775 const uint8_t *buf, int len)
3776 {
3777 int l;
3778 uint8_t *ptr;
3779 target_phys_addr_t page;
3780 unsigned long pd;
3781 PhysPageDesc *p;
3782
3783 while (len > 0) {
3784 page = addr & TARGET_PAGE_MASK;
3785 l = (page + TARGET_PAGE_SIZE) - addr;
3786 if (l > len)
3787 l = len;
3788 p = phys_page_find(page >> TARGET_PAGE_BITS);
3789 if (!p) {
3790 pd = IO_MEM_UNASSIGNED;
3791 } else {
3792 pd = p->phys_offset;
3793 }
3794
3795 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3796 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3797 !(pd & IO_MEM_ROMD)) {
3798 /* do nothing */
3799 } else {
3800 unsigned long addr1;
3801 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3802 /* ROM/RAM case */
3803 ptr = qemu_get_ram_ptr(addr1);
3804 memcpy(ptr, buf, l);
3805 qemu_put_ram_ptr(ptr);
3806 }
3807 len -= l;
3808 buf += l;
3809 addr += l;
3810 }
3811 }
3812
3813 typedef struct {
3814 void *buffer;
3815 target_phys_addr_t addr;
3816 target_phys_addr_t len;
3817 } BounceBuffer;
3818
3819 static BounceBuffer bounce;
3820
3821 typedef struct MapClient {
3822 void *opaque;
3823 void (*callback)(void *opaque);
3824 QLIST_ENTRY(MapClient) link;
3825 } MapClient;
3826
3827 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3828 = QLIST_HEAD_INITIALIZER(map_client_list);
3829
3830 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3831 {
3832 MapClient *client = g_malloc(sizeof(*client));
3833
3834 client->opaque = opaque;
3835 client->callback = callback;
3836 QLIST_INSERT_HEAD(&map_client_list, client, link);
3837 return client;
3838 }
3839
3840 void cpu_unregister_map_client(void *_client)
3841 {
3842 MapClient *client = (MapClient *)_client;
3843
3844 QLIST_REMOVE(client, link);
3845 g_free(client);
3846 }
3847
3848 static void cpu_notify_map_clients(void)
3849 {
3850 MapClient *client;
3851
3852 while (!QLIST_EMPTY(&map_client_list)) {
3853 client = QLIST_FIRST(&map_client_list);
3854 client->callback(client->opaque);
3855 cpu_unregister_map_client(client);
3856 }
3857 }
3858
3859 /* Map a physical memory region into a host virtual address.
3860 * May map a subset of the requested range, given by and returned in *plen.
3861 * May return NULL if resources needed to perform the mapping are exhausted.
3862 * Use only for reads OR writes - not for read-modify-write operations.
3863 * Use cpu_register_map_client() to know when retrying the map operation is
3864 * likely to succeed.
3865 */
3866 void *cpu_physical_memory_map(target_phys_addr_t addr,
3867 target_phys_addr_t *plen,
3868 int is_write)
3869 {
3870 target_phys_addr_t len = *plen;
3871 target_phys_addr_t todo = 0;
3872 int l;
3873 target_phys_addr_t page;
3874 unsigned long pd;
3875 PhysPageDesc *p;
3876 ram_addr_t raddr = RAM_ADDR_MAX;
3877 ram_addr_t rlen;
3878 void *ret;
3879
3880 while (len > 0) {
3881 page = addr & TARGET_PAGE_MASK;
3882 l = (page + TARGET_PAGE_SIZE) - addr;
3883 if (l > len)
3884 l = len;
3885 p = phys_page_find(page >> TARGET_PAGE_BITS);
3886 if (!p) {
3887 pd = IO_MEM_UNASSIGNED;
3888 } else {
3889 pd = p->phys_offset;
3890 }
3891
3892 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3893 if (todo || bounce.buffer) {
3894 break;
3895 }
3896 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3897 bounce.addr = addr;
3898 bounce.len = l;
3899 if (!is_write) {
3900 cpu_physical_memory_read(addr, bounce.buffer, l);
3901 }
3902
3903 *plen = l;
3904 return bounce.buffer;
3905 }
3906 if (!todo) {
3907 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3908 }
3909
3910 len -= l;
3911 addr += l;
3912 todo += l;
3913 }
3914 rlen = todo;
3915 ret = qemu_ram_ptr_length(raddr, &rlen);
3916 *plen = rlen;
3917 return ret;
3918 }
3919
3920 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3921 * Will also mark the memory as dirty if is_write == 1. access_len gives
3922 * the amount of memory that was actually read or written by the caller.
3923 */
3924 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3925 int is_write, target_phys_addr_t access_len)
3926 {
3927 if (buffer != bounce.buffer) {
3928 if (is_write) {
3929 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3930 while (access_len) {
3931 unsigned l;
3932 l = TARGET_PAGE_SIZE;
3933 if (l > access_len)
3934 l = access_len;
3935 if (!cpu_physical_memory_is_dirty(addr1)) {
3936 /* invalidate code */
3937 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3938 /* set dirty bit */
3939 cpu_physical_memory_set_dirty_flags(
3940 addr1, (0xff & ~CODE_DIRTY_FLAG));
3941 }
3942 addr1 += l;
3943 access_len -= l;
3944 }
3945 }
3946 if (xen_enabled()) {
3947 xen_invalidate_map_cache_entry(buffer);
3948 }
3949 return;
3950 }
3951 if (is_write) {
3952 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3953 }
3954 qemu_vfree(bounce.buffer);
3955 bounce.buffer = NULL;
3956 cpu_notify_map_clients();
3957 }
3958
3959 /* warning: addr must be aligned */
3960 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3961 enum device_endian endian)
3962 {
3963 int io_index;
3964 uint8_t *ptr;
3965 uint32_t val;
3966 unsigned long pd;
3967 PhysPageDesc *p;
3968
3969 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3970 if (!p) {
3971 pd = IO_MEM_UNASSIGNED;
3972 } else {
3973 pd = p->phys_offset;
3974 }
3975
3976 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3977 !(pd & IO_MEM_ROMD)) {
3978 /* I/O case */
3979 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3980 if (p)
3981 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3982 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3983 #if defined(TARGET_WORDS_BIGENDIAN)
3984 if (endian == DEVICE_LITTLE_ENDIAN) {
3985 val = bswap32(val);
3986 }
3987 #else
3988 if (endian == DEVICE_BIG_ENDIAN) {
3989 val = bswap32(val);
3990 }
3991 #endif
3992 } else {
3993 /* RAM case */
3994 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3995 (addr & ~TARGET_PAGE_MASK);
3996 switch (endian) {
3997 case DEVICE_LITTLE_ENDIAN:
3998 val = ldl_le_p(ptr);
3999 break;
4000 case DEVICE_BIG_ENDIAN:
4001 val = ldl_be_p(ptr);
4002 break;
4003 default:
4004 val = ldl_p(ptr);
4005 break;
4006 }
4007 }
4008 return val;
4009 }
4010
4011 uint32_t ldl_phys(target_phys_addr_t addr)
4012 {
4013 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4014 }
4015
4016 uint32_t ldl_le_phys(target_phys_addr_t addr)
4017 {
4018 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4019 }
4020
4021 uint32_t ldl_be_phys(target_phys_addr_t addr)
4022 {
4023 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4024 }
4025
4026 /* warning: addr must be aligned */
4027 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4028 enum device_endian endian)
4029 {
4030 int io_index;
4031 uint8_t *ptr;
4032 uint64_t val;
4033 unsigned long pd;
4034 PhysPageDesc *p;
4035
4036 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4037 if (!p) {
4038 pd = IO_MEM_UNASSIGNED;
4039 } else {
4040 pd = p->phys_offset;
4041 }
4042
4043 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4044 !(pd & IO_MEM_ROMD)) {
4045 /* I/O case */
4046 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4047 if (p)
4048 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4049
4050 /* XXX This is broken when device endian != cpu endian.
4051 Fix and add "endian" variable check */
4052 #ifdef TARGET_WORDS_BIGENDIAN
4053 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4054 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4055 #else
4056 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4057 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4058 #endif
4059 } else {
4060 /* RAM case */
4061 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4062 (addr & ~TARGET_PAGE_MASK);
4063 switch (endian) {
4064 case DEVICE_LITTLE_ENDIAN:
4065 val = ldq_le_p(ptr);
4066 break;
4067 case DEVICE_BIG_ENDIAN:
4068 val = ldq_be_p(ptr);
4069 break;
4070 default:
4071 val = ldq_p(ptr);
4072 break;
4073 }
4074 }
4075 return val;
4076 }
4077
4078 uint64_t ldq_phys(target_phys_addr_t addr)
4079 {
4080 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4081 }
4082
4083 uint64_t ldq_le_phys(target_phys_addr_t addr)
4084 {
4085 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4086 }
4087
4088 uint64_t ldq_be_phys(target_phys_addr_t addr)
4089 {
4090 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4091 }
4092
4093 /* XXX: optimize */
4094 uint32_t ldub_phys(target_phys_addr_t addr)
4095 {
4096 uint8_t val;
4097 cpu_physical_memory_read(addr, &val, 1);
4098 return val;
4099 }
4100
4101 /* warning: addr must be aligned */
4102 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4103 enum device_endian endian)
4104 {
4105 int io_index;
4106 uint8_t *ptr;
4107 uint64_t val;
4108 unsigned long pd;
4109 PhysPageDesc *p;
4110
4111 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4112 if (!p) {
4113 pd = IO_MEM_UNASSIGNED;
4114 } else {
4115 pd = p->phys_offset;
4116 }
4117
4118 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4119 !(pd & IO_MEM_ROMD)) {
4120 /* I/O case */
4121 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4122 if (p)
4123 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4124 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4125 #if defined(TARGET_WORDS_BIGENDIAN)
4126 if (endian == DEVICE_LITTLE_ENDIAN) {
4127 val = bswap16(val);
4128 }
4129 #else
4130 if (endian == DEVICE_BIG_ENDIAN) {
4131 val = bswap16(val);
4132 }
4133 #endif
4134 } else {
4135 /* RAM case */
4136 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4137 (addr & ~TARGET_PAGE_MASK);
4138 switch (endian) {
4139 case DEVICE_LITTLE_ENDIAN:
4140 val = lduw_le_p(ptr);
4141 break;
4142 case DEVICE_BIG_ENDIAN:
4143 val = lduw_be_p(ptr);
4144 break;
4145 default:
4146 val = lduw_p(ptr);
4147 break;
4148 }
4149 }
4150 return val;
4151 }
4152
4153 uint32_t lduw_phys(target_phys_addr_t addr)
4154 {
4155 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4156 }
4157
4158 uint32_t lduw_le_phys(target_phys_addr_t addr)
4159 {
4160 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4161 }
4162
4163 uint32_t lduw_be_phys(target_phys_addr_t addr)
4164 {
4165 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4166 }
4167
4168 /* warning: addr must be aligned. The ram page is not masked as dirty
4169 and the code inside is not invalidated. It is useful if the dirty
4170 bits are used to track modified PTEs */
4171 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4172 {
4173 int io_index;
4174 uint8_t *ptr;
4175 unsigned long pd;
4176 PhysPageDesc *p;
4177
4178 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4179 if (!p) {
4180 pd = IO_MEM_UNASSIGNED;
4181 } else {
4182 pd = p->phys_offset;
4183 }
4184
4185 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4186 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4187 if (p)
4188 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4189 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4190 } else {
4191 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4192 ptr = qemu_get_ram_ptr(addr1);
4193 stl_p(ptr, val);
4194
4195 if (unlikely(in_migration)) {
4196 if (!cpu_physical_memory_is_dirty(addr1)) {
4197 /* invalidate code */
4198 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4199 /* set dirty bit */
4200 cpu_physical_memory_set_dirty_flags(
4201 addr1, (0xff & ~CODE_DIRTY_FLAG));
4202 }
4203 }
4204 }
4205 }
4206
4207 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4208 {
4209 int io_index;
4210 uint8_t *ptr;
4211 unsigned long pd;
4212 PhysPageDesc *p;
4213
4214 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4215 if (!p) {
4216 pd = IO_MEM_UNASSIGNED;
4217 } else {
4218 pd = p->phys_offset;
4219 }
4220
4221 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4222 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4223 if (p)
4224 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4225 #ifdef TARGET_WORDS_BIGENDIAN
4226 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4227 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4228 #else
4229 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4230 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4231 #endif
4232 } else {
4233 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4234 (addr & ~TARGET_PAGE_MASK);
4235 stq_p(ptr, val);
4236 }
4237 }
4238
4239 /* warning: addr must be aligned */
4240 static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4241 enum device_endian endian)
4242 {
4243 int io_index;
4244 uint8_t *ptr;
4245 unsigned long pd;
4246 PhysPageDesc *p;
4247
4248 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4249 if (!p) {
4250 pd = IO_MEM_UNASSIGNED;
4251 } else {
4252 pd = p->phys_offset;
4253 }
4254
4255 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4256 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4257 if (p)
4258 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4259 #if defined(TARGET_WORDS_BIGENDIAN)
4260 if (endian == DEVICE_LITTLE_ENDIAN) {
4261 val = bswap32(val);
4262 }
4263 #else
4264 if (endian == DEVICE_BIG_ENDIAN) {
4265 val = bswap32(val);
4266 }
4267 #endif
4268 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4269 } else {
4270 unsigned long addr1;
4271 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4272 /* RAM case */
4273 ptr = qemu_get_ram_ptr(addr1);
4274 switch (endian) {
4275 case DEVICE_LITTLE_ENDIAN:
4276 stl_le_p(ptr, val);
4277 break;
4278 case DEVICE_BIG_ENDIAN:
4279 stl_be_p(ptr, val);
4280 break;
4281 default:
4282 stl_p(ptr, val);
4283 break;
4284 }
4285 if (!cpu_physical_memory_is_dirty(addr1)) {
4286 /* invalidate code */
4287 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4288 /* set dirty bit */
4289 cpu_physical_memory_set_dirty_flags(addr1,
4290 (0xff & ~CODE_DIRTY_FLAG));
4291 }
4292 }
4293 }
4294
4295 void stl_phys(target_phys_addr_t addr, uint32_t val)
4296 {
4297 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4298 }
4299
4300 void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4301 {
4302 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4303 }
4304
4305 void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4306 {
4307 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4308 }
4309
4310 /* XXX: optimize */
4311 void stb_phys(target_phys_addr_t addr, uint32_t val)
4312 {
4313 uint8_t v = val;
4314 cpu_physical_memory_write(addr, &v, 1);
4315 }
4316
4317 /* warning: addr must be aligned */
4318 static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4319 enum device_endian endian)
4320 {
4321 int io_index;
4322 uint8_t *ptr;
4323 unsigned long pd;
4324 PhysPageDesc *p;
4325
4326 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4327 if (!p) {
4328 pd = IO_MEM_UNASSIGNED;
4329 } else {
4330 pd = p->phys_offset;
4331 }
4332
4333 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4334 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4335 if (p)
4336 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4337 #if defined(TARGET_WORDS_BIGENDIAN)
4338 if (endian == DEVICE_LITTLE_ENDIAN) {
4339 val = bswap16(val);
4340 }
4341 #else
4342 if (endian == DEVICE_BIG_ENDIAN) {
4343 val = bswap16(val);
4344 }
4345 #endif
4346 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4347 } else {
4348 unsigned long addr1;
4349 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4350 /* RAM case */
4351 ptr = qemu_get_ram_ptr(addr1);
4352 switch (endian) {
4353 case DEVICE_LITTLE_ENDIAN:
4354 stw_le_p(ptr, val);
4355 break;
4356 case DEVICE_BIG_ENDIAN:
4357 stw_be_p(ptr, val);
4358 break;
4359 default:
4360 stw_p(ptr, val);
4361 break;
4362 }
4363 if (!cpu_physical_memory_is_dirty(addr1)) {
4364 /* invalidate code */
4365 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4366 /* set dirty bit */
4367 cpu_physical_memory_set_dirty_flags(addr1,
4368 (0xff & ~CODE_DIRTY_FLAG));
4369 }
4370 }
4371 }
4372
4373 void stw_phys(target_phys_addr_t addr, uint32_t val)
4374 {
4375 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4376 }
4377
4378 void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4379 {
4380 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4381 }
4382
4383 void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4384 {
4385 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4386 }
4387
4388 /* XXX: optimize */
4389 void stq_phys(target_phys_addr_t addr, uint64_t val)
4390 {
4391 val = tswap64(val);
4392 cpu_physical_memory_write(addr, &val, 8);
4393 }
4394
4395 void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4396 {
4397 val = cpu_to_le64(val);
4398 cpu_physical_memory_write(addr, &val, 8);
4399 }
4400
4401 void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4402 {
4403 val = cpu_to_be64(val);
4404 cpu_physical_memory_write(addr, &val, 8);
4405 }
4406
4407 /* virtual memory access for debug (includes writing to ROM) */
4408 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4409 uint8_t *buf, int len, int is_write)
4410 {
4411 int l;
4412 target_phys_addr_t phys_addr;
4413 target_ulong page;
4414
4415 while (len > 0) {
4416 page = addr & TARGET_PAGE_MASK;
4417 phys_addr = cpu_get_phys_page_debug(env, page);
4418 /* if no physical page mapped, return an error */
4419 if (phys_addr == -1)
4420 return -1;
4421 l = (page + TARGET_PAGE_SIZE) - addr;
4422 if (l > len)
4423 l = len;
4424 phys_addr += (addr & ~TARGET_PAGE_MASK);
4425 if (is_write)
4426 cpu_physical_memory_write_rom(phys_addr, buf, l);
4427 else
4428 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4429 len -= l;
4430 buf += l;
4431 addr += l;
4432 }
4433 return 0;
4434 }
4435 #endif
4436
4437 /* in deterministic execution mode, instructions doing device I/Os
4438 must be at the end of the TB */
4439 void cpu_io_recompile(CPUState *env, void *retaddr)
4440 {
4441 TranslationBlock *tb;
4442 uint32_t n, cflags;
4443 target_ulong pc, cs_base;
4444 uint64_t flags;
4445
4446 tb = tb_find_pc((unsigned long)retaddr);
4447 if (!tb) {
4448 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4449 retaddr);
4450 }
4451 n = env->icount_decr.u16.low + tb->icount;
4452 cpu_restore_state(tb, env, (unsigned long)retaddr);
4453 /* Calculate how many instructions had been executed before the fault
4454 occurred. */
4455 n = n - env->icount_decr.u16.low;
4456 /* Generate a new TB ending on the I/O insn. */
4457 n++;
4458 /* On MIPS and SH, delay slot instructions can only be restarted if
4459 they were already the first instruction in the TB. If this is not
4460 the first instruction in a TB then re-execute the preceding
4461 branch. */
4462 #if defined(TARGET_MIPS)
4463 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4464 env->active_tc.PC -= 4;
4465 env->icount_decr.u16.low++;
4466 env->hflags &= ~MIPS_HFLAG_BMASK;
4467 }
4468 #elif defined(TARGET_SH4)
4469 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4470 && n > 1) {
4471 env->pc -= 2;
4472 env->icount_decr.u16.low++;
4473 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4474 }
4475 #endif
4476 /* This should never happen. */
4477 if (n > CF_COUNT_MASK)
4478 cpu_abort(env, "TB too big during recompile");
4479
4480 cflags = n | CF_LAST_IO;
4481 pc = tb->pc;
4482 cs_base = tb->cs_base;
4483 flags = tb->flags;
4484 tb_phys_invalidate(tb, -1);
4485 /* FIXME: In theory this could raise an exception. In practice
4486 we have already translated the block once so it's probably ok. */
4487 tb_gen_code(env, pc, cs_base, flags, cflags);
4488 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4489 the first in the TB) then we end up generating a whole new TB and
4490 repeating the fault, which is horribly inefficient.
4491 Better would be to execute just this insn uncached, or generate a
4492 second new TB. */
4493 cpu_resume_from_signal(env, NULL);
4494 }
4495
4496 #if !defined(CONFIG_USER_ONLY)
4497
4498 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4499 {
4500 int i, target_code_size, max_target_code_size;
4501 int direct_jmp_count, direct_jmp2_count, cross_page;
4502 TranslationBlock *tb;
4503
4504 target_code_size = 0;
4505 max_target_code_size = 0;
4506 cross_page = 0;
4507 direct_jmp_count = 0;
4508 direct_jmp2_count = 0;
4509 for(i = 0; i < nb_tbs; i++) {
4510 tb = &tbs[i];
4511 target_code_size += tb->size;
4512 if (tb->size > max_target_code_size)
4513 max_target_code_size = tb->size;
4514 if (tb->page_addr[1] != -1)
4515 cross_page++;
4516 if (tb->tb_next_offset[0] != 0xffff) {
4517 direct_jmp_count++;
4518 if (tb->tb_next_offset[1] != 0xffff) {
4519 direct_jmp2_count++;
4520 }
4521 }
4522 }
4523 /* XXX: avoid using doubles ? */
4524 cpu_fprintf(f, "Translation buffer state:\n");
4525 cpu_fprintf(f, "gen code size %td/%ld\n",
4526 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4527 cpu_fprintf(f, "TB count %d/%d\n",
4528 nb_tbs, code_gen_max_blocks);
4529 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4530 nb_tbs ? target_code_size / nb_tbs : 0,
4531 max_target_code_size);
4532 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4533 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4534 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4535 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4536 cross_page,
4537 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4538 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4539 direct_jmp_count,
4540 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4541 direct_jmp2_count,
4542 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4543 cpu_fprintf(f, "\nStatistics:\n");
4544 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4545 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4546 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4547 tcg_dump_info(f, cpu_fprintf);
4548 }
4549
4550 #define MMUSUFFIX _cmmu
4551 #undef GETPC
4552 #define GETPC() NULL
4553 #define env cpu_single_env
4554 #define SOFTMMU_CODE_ACCESS
4555
4556 #define SHIFT 0
4557 #include "softmmu_template.h"
4558
4559 #define SHIFT 1
4560 #include "softmmu_template.h"
4561
4562 #define SHIFT 2
4563 #include "softmmu_template.h"
4564
4565 #define SHIFT 3
4566 #include "softmmu_template.h"
4567
4568 #undef env
4569
4570 #endif