]> git.proxmox.com Git - qemu.git/blob - exec.c
exec: last_first_tb was only used in !ONLY_USER case
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "tcg.h"
31 #include "hw/hw.h"
32 #include "hw/qdev.h"
33 #include "osdep.h"
34 #include "kvm.h"
35 #include "hw/xen.h"
36 #include "qemu-timer.h"
37 #if defined(CONFIG_USER_ONLY)
38 #include <qemu.h>
39 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40 #include <sys/param.h>
41 #if __FreeBSD_version >= 700104
42 #define HAVE_KINFO_GETVMMAP
43 #define sigqueue sigqueue_freebsd /* avoid redefinition */
44 #include <sys/time.h>
45 #include <sys/proc.h>
46 #include <machine/profile.h>
47 #define _KERNEL
48 #include <sys/user.h>
49 #undef _KERNEL
50 #undef sigqueue
51 #include <libutil.h>
52 #endif
53 #endif
54 #else /* !CONFIG_USER_ONLY */
55 #include "xen-mapcache.h"
56 #include "trace.h"
57 #endif
58
59 //#define DEBUG_TB_INVALIDATE
60 //#define DEBUG_FLUSH
61 //#define DEBUG_TLB
62 //#define DEBUG_UNASSIGNED
63
64 /* make various TB consistency checks */
65 //#define DEBUG_TB_CHECK
66 //#define DEBUG_TLB_CHECK
67
68 //#define DEBUG_IOPORT
69 //#define DEBUG_SUBPAGE
70
71 #if !defined(CONFIG_USER_ONLY)
72 /* TB consistency checks only implemented for usermode emulation. */
73 #undef DEBUG_TB_CHECK
74 #endif
75
76 #define SMC_BITMAP_USE_THRESHOLD 10
77
78 static TranslationBlock *tbs;
79 static int code_gen_max_blocks;
80 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
81 static int nb_tbs;
82 /* any access to the tbs or the page table must use this lock */
83 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
84
85 #if defined(__arm__) || defined(__sparc_v9__)
86 /* The prologue must be reachable with a direct jump. ARM and Sparc64
87 have limited branch ranges (possibly also PPC) so place it in a
88 section close to code segment. */
89 #define code_gen_section \
90 __attribute__((__section__(".gen_code"))) \
91 __attribute__((aligned (32)))
92 #elif defined(_WIN32)
93 /* Maximum alignment for Win32 is 16. */
94 #define code_gen_section \
95 __attribute__((aligned (16)))
96 #else
97 #define code_gen_section \
98 __attribute__((aligned (32)))
99 #endif
100
101 uint8_t code_gen_prologue[1024] code_gen_section;
102 static uint8_t *code_gen_buffer;
103 static unsigned long code_gen_buffer_size;
104 /* threshold to flush the translated code buffer */
105 static unsigned long code_gen_buffer_max_size;
106 static uint8_t *code_gen_ptr;
107
108 #if !defined(CONFIG_USER_ONLY)
109 int phys_ram_fd;
110 static int in_migration;
111
112 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
113 #endif
114
115 CPUState *first_cpu;
116 /* current CPU in the current thread. It is only valid inside
117 cpu_exec() */
118 CPUState *cpu_single_env;
119 /* 0 = Do not count executed instructions.
120 1 = Precise instruction counting.
121 2 = Adaptive rate instruction counting. */
122 int use_icount = 0;
123 /* Current instruction counter. While executing translated code this may
124 include some instructions that have not yet been executed. */
125 int64_t qemu_icount;
126
127 typedef struct PageDesc {
128 /* list of TBs intersecting this ram page */
129 TranslationBlock *first_tb;
130 /* in order to optimize self modifying code, we count the number
131 of lookups we do to a given page to use a bitmap */
132 unsigned int code_write_count;
133 uint8_t *code_bitmap;
134 #if defined(CONFIG_USER_ONLY)
135 unsigned long flags;
136 #endif
137 } PageDesc;
138
139 /* In system mode we want L1_MAP to be based on ram offsets,
140 while in user mode we want it to be based on virtual addresses. */
141 #if !defined(CONFIG_USER_ONLY)
142 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
143 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
144 #else
145 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
146 #endif
147 #else
148 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
149 #endif
150
151 /* Size of the L2 (and L3, etc) page tables. */
152 #define L2_BITS 10
153 #define L2_SIZE (1 << L2_BITS)
154
155 /* The bits remaining after N lower levels of page tables. */
156 #define P_L1_BITS_REM \
157 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
158 #define V_L1_BITS_REM \
159 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
160
161 /* Size of the L1 page table. Avoid silly small sizes. */
162 #if P_L1_BITS_REM < 4
163 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
164 #else
165 #define P_L1_BITS P_L1_BITS_REM
166 #endif
167
168 #if V_L1_BITS_REM < 4
169 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
170 #else
171 #define V_L1_BITS V_L1_BITS_REM
172 #endif
173
174 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
175 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
177 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
178 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
180 unsigned long qemu_real_host_page_size;
181 unsigned long qemu_host_page_bits;
182 unsigned long qemu_host_page_size;
183 unsigned long qemu_host_page_mask;
184
185 /* This is a multi-level map on the virtual address space.
186 The bottom level has pointers to PageDesc. */
187 static void *l1_map[V_L1_SIZE];
188
189 #if !defined(CONFIG_USER_ONLY)
190 typedef struct PhysPageDesc {
191 /* offset in host memory of the page + io_index in the low bits */
192 ram_addr_t phys_offset;
193 ram_addr_t region_offset;
194 } PhysPageDesc;
195
196 /* This is a multi-level map on the physical address space.
197 The bottom level has pointers to PhysPageDesc. */
198 static void *l1_phys_map[P_L1_SIZE];
199
200 static void io_mem_init(void);
201
202 /* io memory support */
203 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
204 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
205 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
206 static char io_mem_used[IO_MEM_NB_ENTRIES];
207 static int io_mem_watch;
208 #endif
209
210 /* log support */
211 #ifdef WIN32
212 static const char *logfilename = "qemu.log";
213 #else
214 static const char *logfilename = "/tmp/qemu.log";
215 #endif
216 FILE *logfile;
217 int loglevel;
218 static int log_append = 0;
219
220 /* statistics */
221 #if !defined(CONFIG_USER_ONLY)
222 static int tlb_flush_count;
223 #endif
224 static int tb_flush_count;
225 static int tb_phys_invalidate_count;
226
227 #ifdef _WIN32
228 static void map_exec(void *addr, long size)
229 {
230 DWORD old_protect;
231 VirtualProtect(addr, size,
232 PAGE_EXECUTE_READWRITE, &old_protect);
233
234 }
235 #else
236 static void map_exec(void *addr, long size)
237 {
238 unsigned long start, end, page_size;
239
240 page_size = getpagesize();
241 start = (unsigned long)addr;
242 start &= ~(page_size - 1);
243
244 end = (unsigned long)addr + size;
245 end += page_size - 1;
246 end &= ~(page_size - 1);
247
248 mprotect((void *)start, end - start,
249 PROT_READ | PROT_WRITE | PROT_EXEC);
250 }
251 #endif
252
253 static void page_init(void)
254 {
255 /* NOTE: we can always suppose that qemu_host_page_size >=
256 TARGET_PAGE_SIZE */
257 #ifdef _WIN32
258 {
259 SYSTEM_INFO system_info;
260
261 GetSystemInfo(&system_info);
262 qemu_real_host_page_size = system_info.dwPageSize;
263 }
264 #else
265 qemu_real_host_page_size = getpagesize();
266 #endif
267 if (qemu_host_page_size == 0)
268 qemu_host_page_size = qemu_real_host_page_size;
269 if (qemu_host_page_size < TARGET_PAGE_SIZE)
270 qemu_host_page_size = TARGET_PAGE_SIZE;
271 qemu_host_page_bits = 0;
272 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
273 qemu_host_page_bits++;
274 qemu_host_page_mask = ~(qemu_host_page_size - 1);
275
276 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
277 {
278 #ifdef HAVE_KINFO_GETVMMAP
279 struct kinfo_vmentry *freep;
280 int i, cnt;
281
282 freep = kinfo_getvmmap(getpid(), &cnt);
283 if (freep) {
284 mmap_lock();
285 for (i = 0; i < cnt; i++) {
286 unsigned long startaddr, endaddr;
287
288 startaddr = freep[i].kve_start;
289 endaddr = freep[i].kve_end;
290 if (h2g_valid(startaddr)) {
291 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
292
293 if (h2g_valid(endaddr)) {
294 endaddr = h2g(endaddr);
295 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
296 } else {
297 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
298 endaddr = ~0ul;
299 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
300 #endif
301 }
302 }
303 }
304 free(freep);
305 mmap_unlock();
306 }
307 #else
308 FILE *f;
309
310 last_brk = (unsigned long)sbrk(0);
311
312 f = fopen("/compat/linux/proc/self/maps", "r");
313 if (f) {
314 mmap_lock();
315
316 do {
317 unsigned long startaddr, endaddr;
318 int n;
319
320 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
321
322 if (n == 2 && h2g_valid(startaddr)) {
323 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
324
325 if (h2g_valid(endaddr)) {
326 endaddr = h2g(endaddr);
327 } else {
328 endaddr = ~0ul;
329 }
330 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
331 }
332 } while (!feof(f));
333
334 fclose(f);
335 mmap_unlock();
336 }
337 #endif
338 }
339 #endif
340 }
341
342 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
343 {
344 PageDesc *pd;
345 void **lp;
346 int i;
347
348 #if defined(CONFIG_USER_ONLY)
349 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
350 # define ALLOC(P, SIZE) \
351 do { \
352 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
353 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
354 } while (0)
355 #else
356 # define ALLOC(P, SIZE) \
357 do { P = qemu_mallocz(SIZE); } while (0)
358 #endif
359
360 /* Level 1. Always allocated. */
361 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
362
363 /* Level 2..N-1. */
364 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
365 void **p = *lp;
366
367 if (p == NULL) {
368 if (!alloc) {
369 return NULL;
370 }
371 ALLOC(p, sizeof(void *) * L2_SIZE);
372 *lp = p;
373 }
374
375 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
376 }
377
378 pd = *lp;
379 if (pd == NULL) {
380 if (!alloc) {
381 return NULL;
382 }
383 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
384 *lp = pd;
385 }
386
387 #undef ALLOC
388
389 return pd + (index & (L2_SIZE - 1));
390 }
391
392 static inline PageDesc *page_find(tb_page_addr_t index)
393 {
394 return page_find_alloc(index, 0);
395 }
396
397 #if !defined(CONFIG_USER_ONLY)
398 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
399 {
400 PhysPageDesc *pd;
401 void **lp;
402 int i;
403
404 /* Level 1. Always allocated. */
405 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
406
407 /* Level 2..N-1. */
408 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
409 void **p = *lp;
410 if (p == NULL) {
411 if (!alloc) {
412 return NULL;
413 }
414 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
415 }
416 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
417 }
418
419 pd = *lp;
420 if (pd == NULL) {
421 int i;
422
423 if (!alloc) {
424 return NULL;
425 }
426
427 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
428
429 for (i = 0; i < L2_SIZE; i++) {
430 pd[i].phys_offset = IO_MEM_UNASSIGNED;
431 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
432 }
433 }
434
435 return pd + (index & (L2_SIZE - 1));
436 }
437
438 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
439 {
440 return phys_page_find_alloc(index, 0);
441 }
442
443 static void tlb_protect_code(ram_addr_t ram_addr);
444 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
445 target_ulong vaddr);
446 #define mmap_lock() do { } while(0)
447 #define mmap_unlock() do { } while(0)
448 #endif
449
450 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
451
452 #if defined(CONFIG_USER_ONLY)
453 /* Currently it is not recommended to allocate big chunks of data in
454 user mode. It will change when a dedicated libc will be used */
455 #define USE_STATIC_CODE_GEN_BUFFER
456 #endif
457
458 #ifdef USE_STATIC_CODE_GEN_BUFFER
459 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
460 __attribute__((aligned (CODE_GEN_ALIGN)));
461 #endif
462
463 static void code_gen_alloc(unsigned long tb_size)
464 {
465 #ifdef USE_STATIC_CODE_GEN_BUFFER
466 code_gen_buffer = static_code_gen_buffer;
467 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
468 map_exec(code_gen_buffer, code_gen_buffer_size);
469 #else
470 code_gen_buffer_size = tb_size;
471 if (code_gen_buffer_size == 0) {
472 #if defined(CONFIG_USER_ONLY)
473 /* in user mode, phys_ram_size is not meaningful */
474 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
475 #else
476 /* XXX: needs adjustments */
477 code_gen_buffer_size = (unsigned long)(ram_size / 4);
478 #endif
479 }
480 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
481 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
482 /* The code gen buffer location may have constraints depending on
483 the host cpu and OS */
484 #if defined(__linux__)
485 {
486 int flags;
487 void *start = NULL;
488
489 flags = MAP_PRIVATE | MAP_ANONYMOUS;
490 #if defined(__x86_64__)
491 flags |= MAP_32BIT;
492 /* Cannot map more than that */
493 if (code_gen_buffer_size > (800 * 1024 * 1024))
494 code_gen_buffer_size = (800 * 1024 * 1024);
495 #elif defined(__sparc_v9__)
496 // Map the buffer below 2G, so we can use direct calls and branches
497 flags |= MAP_FIXED;
498 start = (void *) 0x60000000UL;
499 if (code_gen_buffer_size > (512 * 1024 * 1024))
500 code_gen_buffer_size = (512 * 1024 * 1024);
501 #elif defined(__arm__)
502 /* Map the buffer below 32M, so we can use direct calls and branches */
503 flags |= MAP_FIXED;
504 start = (void *) 0x01000000UL;
505 if (code_gen_buffer_size > 16 * 1024 * 1024)
506 code_gen_buffer_size = 16 * 1024 * 1024;
507 #elif defined(__s390x__)
508 /* Map the buffer so that we can use direct calls and branches. */
509 /* We have a +- 4GB range on the branches; leave some slop. */
510 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
511 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
512 }
513 start = (void *)0x90000000UL;
514 #endif
515 code_gen_buffer = mmap(start, code_gen_buffer_size,
516 PROT_WRITE | PROT_READ | PROT_EXEC,
517 flags, -1, 0);
518 if (code_gen_buffer == MAP_FAILED) {
519 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
520 exit(1);
521 }
522 }
523 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
524 || defined(__DragonFly__) || defined(__OpenBSD__)
525 {
526 int flags;
527 void *addr = NULL;
528 flags = MAP_PRIVATE | MAP_ANONYMOUS;
529 #if defined(__x86_64__)
530 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
531 * 0x40000000 is free */
532 flags |= MAP_FIXED;
533 addr = (void *)0x40000000;
534 /* Cannot map more than that */
535 if (code_gen_buffer_size > (800 * 1024 * 1024))
536 code_gen_buffer_size = (800 * 1024 * 1024);
537 #elif defined(__sparc_v9__)
538 // Map the buffer below 2G, so we can use direct calls and branches
539 flags |= MAP_FIXED;
540 addr = (void *) 0x60000000UL;
541 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
542 code_gen_buffer_size = (512 * 1024 * 1024);
543 }
544 #endif
545 code_gen_buffer = mmap(addr, code_gen_buffer_size,
546 PROT_WRITE | PROT_READ | PROT_EXEC,
547 flags, -1, 0);
548 if (code_gen_buffer == MAP_FAILED) {
549 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
550 exit(1);
551 }
552 }
553 #else
554 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
555 map_exec(code_gen_buffer, code_gen_buffer_size);
556 #endif
557 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
558 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
559 code_gen_buffer_max_size = code_gen_buffer_size -
560 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
561 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
562 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
563 }
564
565 /* Must be called before using the QEMU cpus. 'tb_size' is the size
566 (in bytes) allocated to the translation buffer. Zero means default
567 size. */
568 void cpu_exec_init_all(unsigned long tb_size)
569 {
570 cpu_gen_init();
571 code_gen_alloc(tb_size);
572 code_gen_ptr = code_gen_buffer;
573 page_init();
574 #if !defined(CONFIG_USER_ONLY)
575 io_mem_init();
576 #endif
577 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
578 /* There's no guest base to take into account, so go ahead and
579 initialize the prologue now. */
580 tcg_prologue_init(&tcg_ctx);
581 #endif
582 }
583
584 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
585
586 static int cpu_common_post_load(void *opaque, int version_id)
587 {
588 CPUState *env = opaque;
589
590 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
591 version_id is increased. */
592 env->interrupt_request &= ~0x01;
593 tlb_flush(env, 1);
594
595 return 0;
596 }
597
598 static const VMStateDescription vmstate_cpu_common = {
599 .name = "cpu_common",
600 .version_id = 1,
601 .minimum_version_id = 1,
602 .minimum_version_id_old = 1,
603 .post_load = cpu_common_post_load,
604 .fields = (VMStateField []) {
605 VMSTATE_UINT32(halted, CPUState),
606 VMSTATE_UINT32(interrupt_request, CPUState),
607 VMSTATE_END_OF_LIST()
608 }
609 };
610 #endif
611
612 CPUState *qemu_get_cpu(int cpu)
613 {
614 CPUState *env = first_cpu;
615
616 while (env) {
617 if (env->cpu_index == cpu)
618 break;
619 env = env->next_cpu;
620 }
621
622 return env;
623 }
624
625 void cpu_exec_init(CPUState *env)
626 {
627 CPUState **penv;
628 int cpu_index;
629
630 #if defined(CONFIG_USER_ONLY)
631 cpu_list_lock();
632 #endif
633 env->next_cpu = NULL;
634 penv = &first_cpu;
635 cpu_index = 0;
636 while (*penv != NULL) {
637 penv = &(*penv)->next_cpu;
638 cpu_index++;
639 }
640 env->cpu_index = cpu_index;
641 env->numa_node = 0;
642 QTAILQ_INIT(&env->breakpoints);
643 QTAILQ_INIT(&env->watchpoints);
644 #ifndef CONFIG_USER_ONLY
645 env->thread_id = qemu_get_thread_id();
646 #endif
647 *penv = env;
648 #if defined(CONFIG_USER_ONLY)
649 cpu_list_unlock();
650 #endif
651 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
652 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
653 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
654 cpu_save, cpu_load, env);
655 #endif
656 }
657
658 /* Allocate a new translation block. Flush the translation buffer if
659 too many translation blocks or too much generated code. */
660 static TranslationBlock *tb_alloc(target_ulong pc)
661 {
662 TranslationBlock *tb;
663
664 if (nb_tbs >= code_gen_max_blocks ||
665 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
666 return NULL;
667 tb = &tbs[nb_tbs++];
668 tb->pc = pc;
669 tb->cflags = 0;
670 return tb;
671 }
672
673 void tb_free(TranslationBlock *tb)
674 {
675 /* In practice this is mostly used for single use temporary TB
676 Ignore the hard cases and just back up if this TB happens to
677 be the last one generated. */
678 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
679 code_gen_ptr = tb->tc_ptr;
680 nb_tbs--;
681 }
682 }
683
684 static inline void invalidate_page_bitmap(PageDesc *p)
685 {
686 if (p->code_bitmap) {
687 qemu_free(p->code_bitmap);
688 p->code_bitmap = NULL;
689 }
690 p->code_write_count = 0;
691 }
692
693 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
694
695 static void page_flush_tb_1 (int level, void **lp)
696 {
697 int i;
698
699 if (*lp == NULL) {
700 return;
701 }
702 if (level == 0) {
703 PageDesc *pd = *lp;
704 for (i = 0; i < L2_SIZE; ++i) {
705 pd[i].first_tb = NULL;
706 invalidate_page_bitmap(pd + i);
707 }
708 } else {
709 void **pp = *lp;
710 for (i = 0; i < L2_SIZE; ++i) {
711 page_flush_tb_1 (level - 1, pp + i);
712 }
713 }
714 }
715
716 static void page_flush_tb(void)
717 {
718 int i;
719 for (i = 0; i < V_L1_SIZE; i++) {
720 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
721 }
722 }
723
724 /* flush all the translation blocks */
725 /* XXX: tb_flush is currently not thread safe */
726 void tb_flush(CPUState *env1)
727 {
728 CPUState *env;
729 #if defined(DEBUG_FLUSH)
730 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
731 (unsigned long)(code_gen_ptr - code_gen_buffer),
732 nb_tbs, nb_tbs > 0 ?
733 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
734 #endif
735 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
736 cpu_abort(env1, "Internal error: code buffer overflow\n");
737
738 nb_tbs = 0;
739
740 for(env = first_cpu; env != NULL; env = env->next_cpu) {
741 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
742 }
743
744 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
745 page_flush_tb();
746
747 code_gen_ptr = code_gen_buffer;
748 /* XXX: flush processor icache at this point if cache flush is
749 expensive */
750 tb_flush_count++;
751 }
752
753 #ifdef DEBUG_TB_CHECK
754
755 static void tb_invalidate_check(target_ulong address)
756 {
757 TranslationBlock *tb;
758 int i;
759 address &= TARGET_PAGE_MASK;
760 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
761 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
762 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
763 address >= tb->pc + tb->size)) {
764 printf("ERROR invalidate: address=" TARGET_FMT_lx
765 " PC=%08lx size=%04x\n",
766 address, (long)tb->pc, tb->size);
767 }
768 }
769 }
770 }
771
772 /* verify that all the pages have correct rights for code */
773 static void tb_page_check(void)
774 {
775 TranslationBlock *tb;
776 int i, flags1, flags2;
777
778 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
779 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
780 flags1 = page_get_flags(tb->pc);
781 flags2 = page_get_flags(tb->pc + tb->size - 1);
782 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
783 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
784 (long)tb->pc, tb->size, flags1, flags2);
785 }
786 }
787 }
788 }
789
790 #endif
791
792 /* invalidate one TB */
793 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
794 int next_offset)
795 {
796 TranslationBlock *tb1;
797 for(;;) {
798 tb1 = *ptb;
799 if (tb1 == tb) {
800 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
801 break;
802 }
803 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
804 }
805 }
806
807 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
808 {
809 TranslationBlock *tb1;
810 unsigned int n1;
811
812 for(;;) {
813 tb1 = *ptb;
814 n1 = (long)tb1 & 3;
815 tb1 = (TranslationBlock *)((long)tb1 & ~3);
816 if (tb1 == tb) {
817 *ptb = tb1->page_next[n1];
818 break;
819 }
820 ptb = &tb1->page_next[n1];
821 }
822 }
823
824 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
825 {
826 TranslationBlock *tb1, **ptb;
827 unsigned int n1;
828
829 ptb = &tb->jmp_next[n];
830 tb1 = *ptb;
831 if (tb1) {
832 /* find tb(n) in circular list */
833 for(;;) {
834 tb1 = *ptb;
835 n1 = (long)tb1 & 3;
836 tb1 = (TranslationBlock *)((long)tb1 & ~3);
837 if (n1 == n && tb1 == tb)
838 break;
839 if (n1 == 2) {
840 ptb = &tb1->jmp_first;
841 } else {
842 ptb = &tb1->jmp_next[n1];
843 }
844 }
845 /* now we can suppress tb(n) from the list */
846 *ptb = tb->jmp_next[n];
847
848 tb->jmp_next[n] = NULL;
849 }
850 }
851
852 /* reset the jump entry 'n' of a TB so that it is not chained to
853 another TB */
854 static inline void tb_reset_jump(TranslationBlock *tb, int n)
855 {
856 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
857 }
858
859 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
860 {
861 CPUState *env;
862 PageDesc *p;
863 unsigned int h, n1;
864 tb_page_addr_t phys_pc;
865 TranslationBlock *tb1, *tb2;
866
867 /* remove the TB from the hash list */
868 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
869 h = tb_phys_hash_func(phys_pc);
870 tb_remove(&tb_phys_hash[h], tb,
871 offsetof(TranslationBlock, phys_hash_next));
872
873 /* remove the TB from the page list */
874 if (tb->page_addr[0] != page_addr) {
875 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
876 tb_page_remove(&p->first_tb, tb);
877 invalidate_page_bitmap(p);
878 }
879 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
880 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
881 tb_page_remove(&p->first_tb, tb);
882 invalidate_page_bitmap(p);
883 }
884
885 tb_invalidated_flag = 1;
886
887 /* remove the TB from the hash list */
888 h = tb_jmp_cache_hash_func(tb->pc);
889 for(env = first_cpu; env != NULL; env = env->next_cpu) {
890 if (env->tb_jmp_cache[h] == tb)
891 env->tb_jmp_cache[h] = NULL;
892 }
893
894 /* suppress this TB from the two jump lists */
895 tb_jmp_remove(tb, 0);
896 tb_jmp_remove(tb, 1);
897
898 /* suppress any remaining jumps to this TB */
899 tb1 = tb->jmp_first;
900 for(;;) {
901 n1 = (long)tb1 & 3;
902 if (n1 == 2)
903 break;
904 tb1 = (TranslationBlock *)((long)tb1 & ~3);
905 tb2 = tb1->jmp_next[n1];
906 tb_reset_jump(tb1, n1);
907 tb1->jmp_next[n1] = NULL;
908 tb1 = tb2;
909 }
910 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
911
912 tb_phys_invalidate_count++;
913 }
914
915 static inline void set_bits(uint8_t *tab, int start, int len)
916 {
917 int end, mask, end1;
918
919 end = start + len;
920 tab += start >> 3;
921 mask = 0xff << (start & 7);
922 if ((start & ~7) == (end & ~7)) {
923 if (start < end) {
924 mask &= ~(0xff << (end & 7));
925 *tab |= mask;
926 }
927 } else {
928 *tab++ |= mask;
929 start = (start + 8) & ~7;
930 end1 = end & ~7;
931 while (start < end1) {
932 *tab++ = 0xff;
933 start += 8;
934 }
935 if (start < end) {
936 mask = ~(0xff << (end & 7));
937 *tab |= mask;
938 }
939 }
940 }
941
942 static void build_page_bitmap(PageDesc *p)
943 {
944 int n, tb_start, tb_end;
945 TranslationBlock *tb;
946
947 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
948
949 tb = p->first_tb;
950 while (tb != NULL) {
951 n = (long)tb & 3;
952 tb = (TranslationBlock *)((long)tb & ~3);
953 /* NOTE: this is subtle as a TB may span two physical pages */
954 if (n == 0) {
955 /* NOTE: tb_end may be after the end of the page, but
956 it is not a problem */
957 tb_start = tb->pc & ~TARGET_PAGE_MASK;
958 tb_end = tb_start + tb->size;
959 if (tb_end > TARGET_PAGE_SIZE)
960 tb_end = TARGET_PAGE_SIZE;
961 } else {
962 tb_start = 0;
963 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
964 }
965 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
966 tb = tb->page_next[n];
967 }
968 }
969
970 TranslationBlock *tb_gen_code(CPUState *env,
971 target_ulong pc, target_ulong cs_base,
972 int flags, int cflags)
973 {
974 TranslationBlock *tb;
975 uint8_t *tc_ptr;
976 tb_page_addr_t phys_pc, phys_page2;
977 target_ulong virt_page2;
978 int code_gen_size;
979
980 phys_pc = get_page_addr_code(env, pc);
981 tb = tb_alloc(pc);
982 if (!tb) {
983 /* flush must be done */
984 tb_flush(env);
985 /* cannot fail at this point */
986 tb = tb_alloc(pc);
987 /* Don't forget to invalidate previous TB info. */
988 tb_invalidated_flag = 1;
989 }
990 tc_ptr = code_gen_ptr;
991 tb->tc_ptr = tc_ptr;
992 tb->cs_base = cs_base;
993 tb->flags = flags;
994 tb->cflags = cflags;
995 cpu_gen_code(env, tb, &code_gen_size);
996 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
997
998 /* check next page if needed */
999 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1000 phys_page2 = -1;
1001 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1002 phys_page2 = get_page_addr_code(env, virt_page2);
1003 }
1004 tb_link_page(tb, phys_pc, phys_page2);
1005 return tb;
1006 }
1007
1008 /* invalidate all TBs which intersect with the target physical page
1009 starting in range [start;end[. NOTE: start and end must refer to
1010 the same physical page. 'is_cpu_write_access' should be true if called
1011 from a real cpu write access: the virtual CPU will exit the current
1012 TB if code is modified inside this TB. */
1013 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1014 int is_cpu_write_access)
1015 {
1016 TranslationBlock *tb, *tb_next, *saved_tb;
1017 CPUState *env = cpu_single_env;
1018 tb_page_addr_t tb_start, tb_end;
1019 PageDesc *p;
1020 int n;
1021 #ifdef TARGET_HAS_PRECISE_SMC
1022 int current_tb_not_found = is_cpu_write_access;
1023 TranslationBlock *current_tb = NULL;
1024 int current_tb_modified = 0;
1025 target_ulong current_pc = 0;
1026 target_ulong current_cs_base = 0;
1027 int current_flags = 0;
1028 #endif /* TARGET_HAS_PRECISE_SMC */
1029
1030 p = page_find(start >> TARGET_PAGE_BITS);
1031 if (!p)
1032 return;
1033 if (!p->code_bitmap &&
1034 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1035 is_cpu_write_access) {
1036 /* build code bitmap */
1037 build_page_bitmap(p);
1038 }
1039
1040 /* we remove all the TBs in the range [start, end[ */
1041 /* XXX: see if in some cases it could be faster to invalidate all the code */
1042 tb = p->first_tb;
1043 while (tb != NULL) {
1044 n = (long)tb & 3;
1045 tb = (TranslationBlock *)((long)tb & ~3);
1046 tb_next = tb->page_next[n];
1047 /* NOTE: this is subtle as a TB may span two physical pages */
1048 if (n == 0) {
1049 /* NOTE: tb_end may be after the end of the page, but
1050 it is not a problem */
1051 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1052 tb_end = tb_start + tb->size;
1053 } else {
1054 tb_start = tb->page_addr[1];
1055 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1056 }
1057 if (!(tb_end <= start || tb_start >= end)) {
1058 #ifdef TARGET_HAS_PRECISE_SMC
1059 if (current_tb_not_found) {
1060 current_tb_not_found = 0;
1061 current_tb = NULL;
1062 if (env->mem_io_pc) {
1063 /* now we have a real cpu fault */
1064 current_tb = tb_find_pc(env->mem_io_pc);
1065 }
1066 }
1067 if (current_tb == tb &&
1068 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1069 /* If we are modifying the current TB, we must stop
1070 its execution. We could be more precise by checking
1071 that the modification is after the current PC, but it
1072 would require a specialized function to partially
1073 restore the CPU state */
1074
1075 current_tb_modified = 1;
1076 cpu_restore_state(current_tb, env, env->mem_io_pc);
1077 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1078 &current_flags);
1079 }
1080 #endif /* TARGET_HAS_PRECISE_SMC */
1081 /* we need to do that to handle the case where a signal
1082 occurs while doing tb_phys_invalidate() */
1083 saved_tb = NULL;
1084 if (env) {
1085 saved_tb = env->current_tb;
1086 env->current_tb = NULL;
1087 }
1088 tb_phys_invalidate(tb, -1);
1089 if (env) {
1090 env->current_tb = saved_tb;
1091 if (env->interrupt_request && env->current_tb)
1092 cpu_interrupt(env, env->interrupt_request);
1093 }
1094 }
1095 tb = tb_next;
1096 }
1097 #if !defined(CONFIG_USER_ONLY)
1098 /* if no code remaining, no need to continue to use slow writes */
1099 if (!p->first_tb) {
1100 invalidate_page_bitmap(p);
1101 if (is_cpu_write_access) {
1102 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1103 }
1104 }
1105 #endif
1106 #ifdef TARGET_HAS_PRECISE_SMC
1107 if (current_tb_modified) {
1108 /* we generate a block containing just the instruction
1109 modifying the memory. It will ensure that it cannot modify
1110 itself */
1111 env->current_tb = NULL;
1112 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1113 cpu_resume_from_signal(env, NULL);
1114 }
1115 #endif
1116 }
1117
1118 /* len must be <= 8 and start must be a multiple of len */
1119 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1120 {
1121 PageDesc *p;
1122 int offset, b;
1123 #if 0
1124 if (1) {
1125 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1126 cpu_single_env->mem_io_vaddr, len,
1127 cpu_single_env->eip,
1128 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1129 }
1130 #endif
1131 p = page_find(start >> TARGET_PAGE_BITS);
1132 if (!p)
1133 return;
1134 if (p->code_bitmap) {
1135 offset = start & ~TARGET_PAGE_MASK;
1136 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1137 if (b & ((1 << len) - 1))
1138 goto do_invalidate;
1139 } else {
1140 do_invalidate:
1141 tb_invalidate_phys_page_range(start, start + len, 1);
1142 }
1143 }
1144
1145 #if !defined(CONFIG_SOFTMMU)
1146 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1147 unsigned long pc, void *puc)
1148 {
1149 TranslationBlock *tb;
1150 PageDesc *p;
1151 int n;
1152 #ifdef TARGET_HAS_PRECISE_SMC
1153 TranslationBlock *current_tb = NULL;
1154 CPUState *env = cpu_single_env;
1155 int current_tb_modified = 0;
1156 target_ulong current_pc = 0;
1157 target_ulong current_cs_base = 0;
1158 int current_flags = 0;
1159 #endif
1160
1161 addr &= TARGET_PAGE_MASK;
1162 p = page_find(addr >> TARGET_PAGE_BITS);
1163 if (!p)
1164 return;
1165 tb = p->first_tb;
1166 #ifdef TARGET_HAS_PRECISE_SMC
1167 if (tb && pc != 0) {
1168 current_tb = tb_find_pc(pc);
1169 }
1170 #endif
1171 while (tb != NULL) {
1172 n = (long)tb & 3;
1173 tb = (TranslationBlock *)((long)tb & ~3);
1174 #ifdef TARGET_HAS_PRECISE_SMC
1175 if (current_tb == tb &&
1176 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1177 /* If we are modifying the current TB, we must stop
1178 its execution. We could be more precise by checking
1179 that the modification is after the current PC, but it
1180 would require a specialized function to partially
1181 restore the CPU state */
1182
1183 current_tb_modified = 1;
1184 cpu_restore_state(current_tb, env, pc);
1185 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1186 &current_flags);
1187 }
1188 #endif /* TARGET_HAS_PRECISE_SMC */
1189 tb_phys_invalidate(tb, addr);
1190 tb = tb->page_next[n];
1191 }
1192 p->first_tb = NULL;
1193 #ifdef TARGET_HAS_PRECISE_SMC
1194 if (current_tb_modified) {
1195 /* we generate a block containing just the instruction
1196 modifying the memory. It will ensure that it cannot modify
1197 itself */
1198 env->current_tb = NULL;
1199 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1200 cpu_resume_from_signal(env, puc);
1201 }
1202 #endif
1203 }
1204 #endif
1205
1206 /* add the tb in the target page and protect it if necessary */
1207 static inline void tb_alloc_page(TranslationBlock *tb,
1208 unsigned int n, tb_page_addr_t page_addr)
1209 {
1210 PageDesc *p;
1211 #ifndef CONFIG_USER_ONLY
1212 bool page_already_protected;
1213 #endif
1214
1215 tb->page_addr[n] = page_addr;
1216 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1217 tb->page_next[n] = p->first_tb;
1218 #ifndef CONFIG_USER_ONLY
1219 page_already_protected = p->first_tb != NULL;
1220 #endif
1221 p->first_tb = (TranslationBlock *)((long)tb | n);
1222 invalidate_page_bitmap(p);
1223
1224 #if defined(TARGET_HAS_SMC) || 1
1225
1226 #if defined(CONFIG_USER_ONLY)
1227 if (p->flags & PAGE_WRITE) {
1228 target_ulong addr;
1229 PageDesc *p2;
1230 int prot;
1231
1232 /* force the host page as non writable (writes will have a
1233 page fault + mprotect overhead) */
1234 page_addr &= qemu_host_page_mask;
1235 prot = 0;
1236 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1237 addr += TARGET_PAGE_SIZE) {
1238
1239 p2 = page_find (addr >> TARGET_PAGE_BITS);
1240 if (!p2)
1241 continue;
1242 prot |= p2->flags;
1243 p2->flags &= ~PAGE_WRITE;
1244 }
1245 mprotect(g2h(page_addr), qemu_host_page_size,
1246 (prot & PAGE_BITS) & ~PAGE_WRITE);
1247 #ifdef DEBUG_TB_INVALIDATE
1248 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1249 page_addr);
1250 #endif
1251 }
1252 #else
1253 /* if some code is already present, then the pages are already
1254 protected. So we handle the case where only the first TB is
1255 allocated in a physical page */
1256 if (!page_already_protected) {
1257 tlb_protect_code(page_addr);
1258 }
1259 #endif
1260
1261 #endif /* TARGET_HAS_SMC */
1262 }
1263
1264 /* add a new TB and link it to the physical page tables. phys_page2 is
1265 (-1) to indicate that only one page contains the TB. */
1266 void tb_link_page(TranslationBlock *tb,
1267 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1268 {
1269 unsigned int h;
1270 TranslationBlock **ptb;
1271
1272 /* Grab the mmap lock to stop another thread invalidating this TB
1273 before we are done. */
1274 mmap_lock();
1275 /* add in the physical hash table */
1276 h = tb_phys_hash_func(phys_pc);
1277 ptb = &tb_phys_hash[h];
1278 tb->phys_hash_next = *ptb;
1279 *ptb = tb;
1280
1281 /* add in the page list */
1282 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1283 if (phys_page2 != -1)
1284 tb_alloc_page(tb, 1, phys_page2);
1285 else
1286 tb->page_addr[1] = -1;
1287
1288 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1289 tb->jmp_next[0] = NULL;
1290 tb->jmp_next[1] = NULL;
1291
1292 /* init original jump addresses */
1293 if (tb->tb_next_offset[0] != 0xffff)
1294 tb_reset_jump(tb, 0);
1295 if (tb->tb_next_offset[1] != 0xffff)
1296 tb_reset_jump(tb, 1);
1297
1298 #ifdef DEBUG_TB_CHECK
1299 tb_page_check();
1300 #endif
1301 mmap_unlock();
1302 }
1303
1304 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1305 tb[1].tc_ptr. Return NULL if not found */
1306 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1307 {
1308 int m_min, m_max, m;
1309 unsigned long v;
1310 TranslationBlock *tb;
1311
1312 if (nb_tbs <= 0)
1313 return NULL;
1314 if (tc_ptr < (unsigned long)code_gen_buffer ||
1315 tc_ptr >= (unsigned long)code_gen_ptr)
1316 return NULL;
1317 /* binary search (cf Knuth) */
1318 m_min = 0;
1319 m_max = nb_tbs - 1;
1320 while (m_min <= m_max) {
1321 m = (m_min + m_max) >> 1;
1322 tb = &tbs[m];
1323 v = (unsigned long)tb->tc_ptr;
1324 if (v == tc_ptr)
1325 return tb;
1326 else if (tc_ptr < v) {
1327 m_max = m - 1;
1328 } else {
1329 m_min = m + 1;
1330 }
1331 }
1332 return &tbs[m_max];
1333 }
1334
1335 static void tb_reset_jump_recursive(TranslationBlock *tb);
1336
1337 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1338 {
1339 TranslationBlock *tb1, *tb_next, **ptb;
1340 unsigned int n1;
1341
1342 tb1 = tb->jmp_next[n];
1343 if (tb1 != NULL) {
1344 /* find head of list */
1345 for(;;) {
1346 n1 = (long)tb1 & 3;
1347 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1348 if (n1 == 2)
1349 break;
1350 tb1 = tb1->jmp_next[n1];
1351 }
1352 /* we are now sure now that tb jumps to tb1 */
1353 tb_next = tb1;
1354
1355 /* remove tb from the jmp_first list */
1356 ptb = &tb_next->jmp_first;
1357 for(;;) {
1358 tb1 = *ptb;
1359 n1 = (long)tb1 & 3;
1360 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1361 if (n1 == n && tb1 == tb)
1362 break;
1363 ptb = &tb1->jmp_next[n1];
1364 }
1365 *ptb = tb->jmp_next[n];
1366 tb->jmp_next[n] = NULL;
1367
1368 /* suppress the jump to next tb in generated code */
1369 tb_reset_jump(tb, n);
1370
1371 /* suppress jumps in the tb on which we could have jumped */
1372 tb_reset_jump_recursive(tb_next);
1373 }
1374 }
1375
1376 static void tb_reset_jump_recursive(TranslationBlock *tb)
1377 {
1378 tb_reset_jump_recursive2(tb, 0);
1379 tb_reset_jump_recursive2(tb, 1);
1380 }
1381
1382 #if defined(TARGET_HAS_ICE)
1383 #if defined(CONFIG_USER_ONLY)
1384 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1385 {
1386 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1387 }
1388 #else
1389 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1390 {
1391 target_phys_addr_t addr;
1392 target_ulong pd;
1393 ram_addr_t ram_addr;
1394 PhysPageDesc *p;
1395
1396 addr = cpu_get_phys_page_debug(env, pc);
1397 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1398 if (!p) {
1399 pd = IO_MEM_UNASSIGNED;
1400 } else {
1401 pd = p->phys_offset;
1402 }
1403 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1404 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1405 }
1406 #endif
1407 #endif /* TARGET_HAS_ICE */
1408
1409 #if defined(CONFIG_USER_ONLY)
1410 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1411
1412 {
1413 }
1414
1415 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1416 int flags, CPUWatchpoint **watchpoint)
1417 {
1418 return -ENOSYS;
1419 }
1420 #else
1421 /* Add a watchpoint. */
1422 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1423 int flags, CPUWatchpoint **watchpoint)
1424 {
1425 target_ulong len_mask = ~(len - 1);
1426 CPUWatchpoint *wp;
1427
1428 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1429 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1430 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1431 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1432 return -EINVAL;
1433 }
1434 wp = qemu_malloc(sizeof(*wp));
1435
1436 wp->vaddr = addr;
1437 wp->len_mask = len_mask;
1438 wp->flags = flags;
1439
1440 /* keep all GDB-injected watchpoints in front */
1441 if (flags & BP_GDB)
1442 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1443 else
1444 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1445
1446 tlb_flush_page(env, addr);
1447
1448 if (watchpoint)
1449 *watchpoint = wp;
1450 return 0;
1451 }
1452
1453 /* Remove a specific watchpoint. */
1454 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1455 int flags)
1456 {
1457 target_ulong len_mask = ~(len - 1);
1458 CPUWatchpoint *wp;
1459
1460 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1461 if (addr == wp->vaddr && len_mask == wp->len_mask
1462 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1463 cpu_watchpoint_remove_by_ref(env, wp);
1464 return 0;
1465 }
1466 }
1467 return -ENOENT;
1468 }
1469
1470 /* Remove a specific watchpoint by reference. */
1471 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1472 {
1473 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1474
1475 tlb_flush_page(env, watchpoint->vaddr);
1476
1477 qemu_free(watchpoint);
1478 }
1479
1480 /* Remove all matching watchpoints. */
1481 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1482 {
1483 CPUWatchpoint *wp, *next;
1484
1485 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1486 if (wp->flags & mask)
1487 cpu_watchpoint_remove_by_ref(env, wp);
1488 }
1489 }
1490 #endif
1491
1492 /* Add a breakpoint. */
1493 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1494 CPUBreakpoint **breakpoint)
1495 {
1496 #if defined(TARGET_HAS_ICE)
1497 CPUBreakpoint *bp;
1498
1499 bp = qemu_malloc(sizeof(*bp));
1500
1501 bp->pc = pc;
1502 bp->flags = flags;
1503
1504 /* keep all GDB-injected breakpoints in front */
1505 if (flags & BP_GDB)
1506 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1507 else
1508 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1509
1510 breakpoint_invalidate(env, pc);
1511
1512 if (breakpoint)
1513 *breakpoint = bp;
1514 return 0;
1515 #else
1516 return -ENOSYS;
1517 #endif
1518 }
1519
1520 /* Remove a specific breakpoint. */
1521 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1522 {
1523 #if defined(TARGET_HAS_ICE)
1524 CPUBreakpoint *bp;
1525
1526 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1527 if (bp->pc == pc && bp->flags == flags) {
1528 cpu_breakpoint_remove_by_ref(env, bp);
1529 return 0;
1530 }
1531 }
1532 return -ENOENT;
1533 #else
1534 return -ENOSYS;
1535 #endif
1536 }
1537
1538 /* Remove a specific breakpoint by reference. */
1539 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1540 {
1541 #if defined(TARGET_HAS_ICE)
1542 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1543
1544 breakpoint_invalidate(env, breakpoint->pc);
1545
1546 qemu_free(breakpoint);
1547 #endif
1548 }
1549
1550 /* Remove all matching breakpoints. */
1551 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1552 {
1553 #if defined(TARGET_HAS_ICE)
1554 CPUBreakpoint *bp, *next;
1555
1556 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1557 if (bp->flags & mask)
1558 cpu_breakpoint_remove_by_ref(env, bp);
1559 }
1560 #endif
1561 }
1562
1563 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1564 CPU loop after each instruction */
1565 void cpu_single_step(CPUState *env, int enabled)
1566 {
1567 #if defined(TARGET_HAS_ICE)
1568 if (env->singlestep_enabled != enabled) {
1569 env->singlestep_enabled = enabled;
1570 if (kvm_enabled())
1571 kvm_update_guest_debug(env, 0);
1572 else {
1573 /* must flush all the translated code to avoid inconsistencies */
1574 /* XXX: only flush what is necessary */
1575 tb_flush(env);
1576 }
1577 }
1578 #endif
1579 }
1580
1581 /* enable or disable low levels log */
1582 void cpu_set_log(int log_flags)
1583 {
1584 loglevel = log_flags;
1585 if (loglevel && !logfile) {
1586 logfile = fopen(logfilename, log_append ? "a" : "w");
1587 if (!logfile) {
1588 perror(logfilename);
1589 _exit(1);
1590 }
1591 #if !defined(CONFIG_SOFTMMU)
1592 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1593 {
1594 static char logfile_buf[4096];
1595 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1596 }
1597 #elif !defined(_WIN32)
1598 /* Win32 doesn't support line-buffering and requires size >= 2 */
1599 setvbuf(logfile, NULL, _IOLBF, 0);
1600 #endif
1601 log_append = 1;
1602 }
1603 if (!loglevel && logfile) {
1604 fclose(logfile);
1605 logfile = NULL;
1606 }
1607 }
1608
1609 void cpu_set_log_filename(const char *filename)
1610 {
1611 logfilename = strdup(filename);
1612 if (logfile) {
1613 fclose(logfile);
1614 logfile = NULL;
1615 }
1616 cpu_set_log(loglevel);
1617 }
1618
1619 static void cpu_unlink_tb(CPUState *env)
1620 {
1621 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1622 problem and hope the cpu will stop of its own accord. For userspace
1623 emulation this often isn't actually as bad as it sounds. Often
1624 signals are used primarily to interrupt blocking syscalls. */
1625 TranslationBlock *tb;
1626 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1627
1628 spin_lock(&interrupt_lock);
1629 tb = env->current_tb;
1630 /* if the cpu is currently executing code, we must unlink it and
1631 all the potentially executing TB */
1632 if (tb) {
1633 env->current_tb = NULL;
1634 tb_reset_jump_recursive(tb);
1635 }
1636 spin_unlock(&interrupt_lock);
1637 }
1638
1639 #ifndef CONFIG_USER_ONLY
1640 /* mask must never be zero, except for A20 change call */
1641 static void tcg_handle_interrupt(CPUState *env, int mask)
1642 {
1643 int old_mask;
1644
1645 old_mask = env->interrupt_request;
1646 env->interrupt_request |= mask;
1647
1648 /*
1649 * If called from iothread context, wake the target cpu in
1650 * case its halted.
1651 */
1652 if (!qemu_cpu_is_self(env)) {
1653 qemu_cpu_kick(env);
1654 return;
1655 }
1656
1657 if (use_icount) {
1658 env->icount_decr.u16.high = 0xffff;
1659 if (!can_do_io(env)
1660 && (mask & ~old_mask) != 0) {
1661 cpu_abort(env, "Raised interrupt while not in I/O function");
1662 }
1663 } else {
1664 cpu_unlink_tb(env);
1665 }
1666 }
1667
1668 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1669
1670 #else /* CONFIG_USER_ONLY */
1671
1672 void cpu_interrupt(CPUState *env, int mask)
1673 {
1674 env->interrupt_request |= mask;
1675 cpu_unlink_tb(env);
1676 }
1677 #endif /* CONFIG_USER_ONLY */
1678
1679 void cpu_reset_interrupt(CPUState *env, int mask)
1680 {
1681 env->interrupt_request &= ~mask;
1682 }
1683
1684 void cpu_exit(CPUState *env)
1685 {
1686 env->exit_request = 1;
1687 cpu_unlink_tb(env);
1688 }
1689
1690 const CPULogItem cpu_log_items[] = {
1691 { CPU_LOG_TB_OUT_ASM, "out_asm",
1692 "show generated host assembly code for each compiled TB" },
1693 { CPU_LOG_TB_IN_ASM, "in_asm",
1694 "show target assembly code for each compiled TB" },
1695 { CPU_LOG_TB_OP, "op",
1696 "show micro ops for each compiled TB" },
1697 { CPU_LOG_TB_OP_OPT, "op_opt",
1698 "show micro ops "
1699 #ifdef TARGET_I386
1700 "before eflags optimization and "
1701 #endif
1702 "after liveness analysis" },
1703 { CPU_LOG_INT, "int",
1704 "show interrupts/exceptions in short format" },
1705 { CPU_LOG_EXEC, "exec",
1706 "show trace before each executed TB (lots of logs)" },
1707 { CPU_LOG_TB_CPU, "cpu",
1708 "show CPU state before block translation" },
1709 #ifdef TARGET_I386
1710 { CPU_LOG_PCALL, "pcall",
1711 "show protected mode far calls/returns/exceptions" },
1712 { CPU_LOG_RESET, "cpu_reset",
1713 "show CPU state before CPU resets" },
1714 #endif
1715 #ifdef DEBUG_IOPORT
1716 { CPU_LOG_IOPORT, "ioport",
1717 "show all i/o ports accesses" },
1718 #endif
1719 { 0, NULL, NULL },
1720 };
1721
1722 #ifndef CONFIG_USER_ONLY
1723 static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1724 = QLIST_HEAD_INITIALIZER(memory_client_list);
1725
1726 static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1727 ram_addr_t size,
1728 ram_addr_t phys_offset,
1729 bool log_dirty)
1730 {
1731 CPUPhysMemoryClient *client;
1732 QLIST_FOREACH(client, &memory_client_list, list) {
1733 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
1734 }
1735 }
1736
1737 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1738 target_phys_addr_t end)
1739 {
1740 CPUPhysMemoryClient *client;
1741 QLIST_FOREACH(client, &memory_client_list, list) {
1742 int r = client->sync_dirty_bitmap(client, start, end);
1743 if (r < 0)
1744 return r;
1745 }
1746 return 0;
1747 }
1748
1749 static int cpu_notify_migration_log(int enable)
1750 {
1751 CPUPhysMemoryClient *client;
1752 QLIST_FOREACH(client, &memory_client_list, list) {
1753 int r = client->migration_log(client, enable);
1754 if (r < 0)
1755 return r;
1756 }
1757 return 0;
1758 }
1759
1760 struct last_map {
1761 target_phys_addr_t start_addr;
1762 ram_addr_t size;
1763 ram_addr_t phys_offset;
1764 };
1765
1766 /* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1767 * address. Each intermediate table provides the next L2_BITs of guest
1768 * physical address space. The number of levels vary based on host and
1769 * guest configuration, making it efficient to build the final guest
1770 * physical address by seeding the L1 offset and shifting and adding in
1771 * each L2 offset as we recurse through them. */
1772 static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1773 void **lp, target_phys_addr_t addr,
1774 struct last_map *map)
1775 {
1776 int i;
1777
1778 if (*lp == NULL) {
1779 return;
1780 }
1781 if (level == 0) {
1782 PhysPageDesc *pd = *lp;
1783 addr <<= L2_BITS + TARGET_PAGE_BITS;
1784 for (i = 0; i < L2_SIZE; ++i) {
1785 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1786 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1787
1788 if (map->size &&
1789 start_addr == map->start_addr + map->size &&
1790 pd[i].phys_offset == map->phys_offset + map->size) {
1791
1792 map->size += TARGET_PAGE_SIZE;
1793 continue;
1794 } else if (map->size) {
1795 client->set_memory(client, map->start_addr,
1796 map->size, map->phys_offset, false);
1797 }
1798
1799 map->start_addr = start_addr;
1800 map->size = TARGET_PAGE_SIZE;
1801 map->phys_offset = pd[i].phys_offset;
1802 }
1803 }
1804 } else {
1805 void **pp = *lp;
1806 for (i = 0; i < L2_SIZE; ++i) {
1807 phys_page_for_each_1(client, level - 1, pp + i,
1808 (addr << L2_BITS) | i, map);
1809 }
1810 }
1811 }
1812
1813 static void phys_page_for_each(CPUPhysMemoryClient *client)
1814 {
1815 int i;
1816 struct last_map map = { };
1817
1818 for (i = 0; i < P_L1_SIZE; ++i) {
1819 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1820 l1_phys_map + i, i, &map);
1821 }
1822 if (map.size) {
1823 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1824 false);
1825 }
1826 }
1827
1828 void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1829 {
1830 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1831 phys_page_for_each(client);
1832 }
1833
1834 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1835 {
1836 QLIST_REMOVE(client, list);
1837 }
1838 #endif
1839
1840 static int cmp1(const char *s1, int n, const char *s2)
1841 {
1842 if (strlen(s2) != n)
1843 return 0;
1844 return memcmp(s1, s2, n) == 0;
1845 }
1846
1847 /* takes a comma separated list of log masks. Return 0 if error. */
1848 int cpu_str_to_log_mask(const char *str)
1849 {
1850 const CPULogItem *item;
1851 int mask;
1852 const char *p, *p1;
1853
1854 p = str;
1855 mask = 0;
1856 for(;;) {
1857 p1 = strchr(p, ',');
1858 if (!p1)
1859 p1 = p + strlen(p);
1860 if(cmp1(p,p1-p,"all")) {
1861 for(item = cpu_log_items; item->mask != 0; item++) {
1862 mask |= item->mask;
1863 }
1864 } else {
1865 for(item = cpu_log_items; item->mask != 0; item++) {
1866 if (cmp1(p, p1 - p, item->name))
1867 goto found;
1868 }
1869 return 0;
1870 }
1871 found:
1872 mask |= item->mask;
1873 if (*p1 != ',')
1874 break;
1875 p = p1 + 1;
1876 }
1877 return mask;
1878 }
1879
1880 void cpu_abort(CPUState *env, const char *fmt, ...)
1881 {
1882 va_list ap;
1883 va_list ap2;
1884
1885 va_start(ap, fmt);
1886 va_copy(ap2, ap);
1887 fprintf(stderr, "qemu: fatal: ");
1888 vfprintf(stderr, fmt, ap);
1889 fprintf(stderr, "\n");
1890 #ifdef TARGET_I386
1891 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1892 #else
1893 cpu_dump_state(env, stderr, fprintf, 0);
1894 #endif
1895 if (qemu_log_enabled()) {
1896 qemu_log("qemu: fatal: ");
1897 qemu_log_vprintf(fmt, ap2);
1898 qemu_log("\n");
1899 #ifdef TARGET_I386
1900 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1901 #else
1902 log_cpu_state(env, 0);
1903 #endif
1904 qemu_log_flush();
1905 qemu_log_close();
1906 }
1907 va_end(ap2);
1908 va_end(ap);
1909 #if defined(CONFIG_USER_ONLY)
1910 {
1911 struct sigaction act;
1912 sigfillset(&act.sa_mask);
1913 act.sa_handler = SIG_DFL;
1914 sigaction(SIGABRT, &act, NULL);
1915 }
1916 #endif
1917 abort();
1918 }
1919
1920 CPUState *cpu_copy(CPUState *env)
1921 {
1922 CPUState *new_env = cpu_init(env->cpu_model_str);
1923 CPUState *next_cpu = new_env->next_cpu;
1924 int cpu_index = new_env->cpu_index;
1925 #if defined(TARGET_HAS_ICE)
1926 CPUBreakpoint *bp;
1927 CPUWatchpoint *wp;
1928 #endif
1929
1930 memcpy(new_env, env, sizeof(CPUState));
1931
1932 /* Preserve chaining and index. */
1933 new_env->next_cpu = next_cpu;
1934 new_env->cpu_index = cpu_index;
1935
1936 /* Clone all break/watchpoints.
1937 Note: Once we support ptrace with hw-debug register access, make sure
1938 BP_CPU break/watchpoints are handled correctly on clone. */
1939 QTAILQ_INIT(&env->breakpoints);
1940 QTAILQ_INIT(&env->watchpoints);
1941 #if defined(TARGET_HAS_ICE)
1942 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1943 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1944 }
1945 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1946 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1947 wp->flags, NULL);
1948 }
1949 #endif
1950
1951 return new_env;
1952 }
1953
1954 #if !defined(CONFIG_USER_ONLY)
1955
1956 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1957 {
1958 unsigned int i;
1959
1960 /* Discard jump cache entries for any tb which might potentially
1961 overlap the flushed page. */
1962 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1963 memset (&env->tb_jmp_cache[i], 0,
1964 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1965
1966 i = tb_jmp_cache_hash_page(addr);
1967 memset (&env->tb_jmp_cache[i], 0,
1968 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1969 }
1970
1971 static CPUTLBEntry s_cputlb_empty_entry = {
1972 .addr_read = -1,
1973 .addr_write = -1,
1974 .addr_code = -1,
1975 .addend = -1,
1976 };
1977
1978 /* NOTE: if flush_global is true, also flush global entries (not
1979 implemented yet) */
1980 void tlb_flush(CPUState *env, int flush_global)
1981 {
1982 int i;
1983
1984 #if defined(DEBUG_TLB)
1985 printf("tlb_flush:\n");
1986 #endif
1987 /* must reset current TB so that interrupts cannot modify the
1988 links while we are modifying them */
1989 env->current_tb = NULL;
1990
1991 for(i = 0; i < CPU_TLB_SIZE; i++) {
1992 int mmu_idx;
1993 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1994 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1995 }
1996 }
1997
1998 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1999
2000 env->tlb_flush_addr = -1;
2001 env->tlb_flush_mask = 0;
2002 tlb_flush_count++;
2003 }
2004
2005 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
2006 {
2007 if (addr == (tlb_entry->addr_read &
2008 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2009 addr == (tlb_entry->addr_write &
2010 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2011 addr == (tlb_entry->addr_code &
2012 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
2013 *tlb_entry = s_cputlb_empty_entry;
2014 }
2015 }
2016
2017 void tlb_flush_page(CPUState *env, target_ulong addr)
2018 {
2019 int i;
2020 int mmu_idx;
2021
2022 #if defined(DEBUG_TLB)
2023 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
2024 #endif
2025 /* Check if we need to flush due to large pages. */
2026 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2027 #if defined(DEBUG_TLB)
2028 printf("tlb_flush_page: forced full flush ("
2029 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2030 env->tlb_flush_addr, env->tlb_flush_mask);
2031 #endif
2032 tlb_flush(env, 1);
2033 return;
2034 }
2035 /* must reset current TB so that interrupts cannot modify the
2036 links while we are modifying them */
2037 env->current_tb = NULL;
2038
2039 addr &= TARGET_PAGE_MASK;
2040 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2041 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2042 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2043
2044 tlb_flush_jmp_cache(env, addr);
2045 }
2046
2047 /* update the TLBs so that writes to code in the virtual page 'addr'
2048 can be detected */
2049 static void tlb_protect_code(ram_addr_t ram_addr)
2050 {
2051 cpu_physical_memory_reset_dirty(ram_addr,
2052 ram_addr + TARGET_PAGE_SIZE,
2053 CODE_DIRTY_FLAG);
2054 }
2055
2056 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2057 tested for self modifying code */
2058 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2059 target_ulong vaddr)
2060 {
2061 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2062 }
2063
2064 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2065 unsigned long start, unsigned long length)
2066 {
2067 unsigned long addr;
2068 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2069 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2070 if ((addr - start) < length) {
2071 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2072 }
2073 }
2074 }
2075
2076 /* Note: start and end must be within the same ram block. */
2077 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2078 int dirty_flags)
2079 {
2080 CPUState *env;
2081 unsigned long length, start1;
2082 int i;
2083
2084 start &= TARGET_PAGE_MASK;
2085 end = TARGET_PAGE_ALIGN(end);
2086
2087 length = end - start;
2088 if (length == 0)
2089 return;
2090 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2091
2092 /* we modify the TLB cache so that the dirty bit will be set again
2093 when accessing the range */
2094 start1 = (unsigned long)qemu_safe_ram_ptr(start);
2095 /* Check that we don't span multiple blocks - this breaks the
2096 address comparisons below. */
2097 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
2098 != (end - 1) - start) {
2099 abort();
2100 }
2101
2102 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2103 int mmu_idx;
2104 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2105 for(i = 0; i < CPU_TLB_SIZE; i++)
2106 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2107 start1, length);
2108 }
2109 }
2110 }
2111
2112 int cpu_physical_memory_set_dirty_tracking(int enable)
2113 {
2114 int ret = 0;
2115 in_migration = enable;
2116 ret = cpu_notify_migration_log(!!enable);
2117 return ret;
2118 }
2119
2120 int cpu_physical_memory_get_dirty_tracking(void)
2121 {
2122 return in_migration;
2123 }
2124
2125 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2126 target_phys_addr_t end_addr)
2127 {
2128 int ret;
2129
2130 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2131 return ret;
2132 }
2133
2134 int cpu_physical_log_start(target_phys_addr_t start_addr,
2135 ram_addr_t size)
2136 {
2137 CPUPhysMemoryClient *client;
2138 QLIST_FOREACH(client, &memory_client_list, list) {
2139 if (client->log_start) {
2140 int r = client->log_start(client, start_addr, size);
2141 if (r < 0) {
2142 return r;
2143 }
2144 }
2145 }
2146 return 0;
2147 }
2148
2149 int cpu_physical_log_stop(target_phys_addr_t start_addr,
2150 ram_addr_t size)
2151 {
2152 CPUPhysMemoryClient *client;
2153 QLIST_FOREACH(client, &memory_client_list, list) {
2154 if (client->log_stop) {
2155 int r = client->log_stop(client, start_addr, size);
2156 if (r < 0) {
2157 return r;
2158 }
2159 }
2160 }
2161 return 0;
2162 }
2163
2164 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2165 {
2166 ram_addr_t ram_addr;
2167 void *p;
2168
2169 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2170 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2171 + tlb_entry->addend);
2172 ram_addr = qemu_ram_addr_from_host_nofail(p);
2173 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2174 tlb_entry->addr_write |= TLB_NOTDIRTY;
2175 }
2176 }
2177 }
2178
2179 /* update the TLB according to the current state of the dirty bits */
2180 void cpu_tlb_update_dirty(CPUState *env)
2181 {
2182 int i;
2183 int mmu_idx;
2184 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2185 for(i = 0; i < CPU_TLB_SIZE; i++)
2186 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2187 }
2188 }
2189
2190 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2191 {
2192 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2193 tlb_entry->addr_write = vaddr;
2194 }
2195
2196 /* update the TLB corresponding to virtual page vaddr
2197 so that it is no longer dirty */
2198 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2199 {
2200 int i;
2201 int mmu_idx;
2202
2203 vaddr &= TARGET_PAGE_MASK;
2204 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2205 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2206 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2207 }
2208
2209 /* Our TLB does not support large pages, so remember the area covered by
2210 large pages and trigger a full TLB flush if these are invalidated. */
2211 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2212 target_ulong size)
2213 {
2214 target_ulong mask = ~(size - 1);
2215
2216 if (env->tlb_flush_addr == (target_ulong)-1) {
2217 env->tlb_flush_addr = vaddr & mask;
2218 env->tlb_flush_mask = mask;
2219 return;
2220 }
2221 /* Extend the existing region to include the new page.
2222 This is a compromise between unnecessary flushes and the cost
2223 of maintaining a full variable size TLB. */
2224 mask &= env->tlb_flush_mask;
2225 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2226 mask <<= 1;
2227 }
2228 env->tlb_flush_addr &= mask;
2229 env->tlb_flush_mask = mask;
2230 }
2231
2232 /* Add a new TLB entry. At most one entry for a given virtual address
2233 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2234 supplied size is only used by tlb_flush_page. */
2235 void tlb_set_page(CPUState *env, target_ulong vaddr,
2236 target_phys_addr_t paddr, int prot,
2237 int mmu_idx, target_ulong size)
2238 {
2239 PhysPageDesc *p;
2240 unsigned long pd;
2241 unsigned int index;
2242 target_ulong address;
2243 target_ulong code_address;
2244 unsigned long addend;
2245 CPUTLBEntry *te;
2246 CPUWatchpoint *wp;
2247 target_phys_addr_t iotlb;
2248
2249 assert(size >= TARGET_PAGE_SIZE);
2250 if (size != TARGET_PAGE_SIZE) {
2251 tlb_add_large_page(env, vaddr, size);
2252 }
2253 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2254 if (!p) {
2255 pd = IO_MEM_UNASSIGNED;
2256 } else {
2257 pd = p->phys_offset;
2258 }
2259 #if defined(DEBUG_TLB)
2260 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2261 " prot=%x idx=%d pd=0x%08lx\n",
2262 vaddr, paddr, prot, mmu_idx, pd);
2263 #endif
2264
2265 address = vaddr;
2266 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2267 /* IO memory case (romd handled later) */
2268 address |= TLB_MMIO;
2269 }
2270 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2271 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2272 /* Normal RAM. */
2273 iotlb = pd & TARGET_PAGE_MASK;
2274 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2275 iotlb |= IO_MEM_NOTDIRTY;
2276 else
2277 iotlb |= IO_MEM_ROM;
2278 } else {
2279 /* IO handlers are currently passed a physical address.
2280 It would be nice to pass an offset from the base address
2281 of that region. This would avoid having to special case RAM,
2282 and avoid full address decoding in every device.
2283 We can't use the high bits of pd for this because
2284 IO_MEM_ROMD uses these as a ram address. */
2285 iotlb = (pd & ~TARGET_PAGE_MASK);
2286 if (p) {
2287 iotlb += p->region_offset;
2288 } else {
2289 iotlb += paddr;
2290 }
2291 }
2292
2293 code_address = address;
2294 /* Make accesses to pages with watchpoints go via the
2295 watchpoint trap routines. */
2296 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2297 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2298 /* Avoid trapping reads of pages with a write breakpoint. */
2299 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2300 iotlb = io_mem_watch + paddr;
2301 address |= TLB_MMIO;
2302 break;
2303 }
2304 }
2305 }
2306
2307 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2308 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2309 te = &env->tlb_table[mmu_idx][index];
2310 te->addend = addend - vaddr;
2311 if (prot & PAGE_READ) {
2312 te->addr_read = address;
2313 } else {
2314 te->addr_read = -1;
2315 }
2316
2317 if (prot & PAGE_EXEC) {
2318 te->addr_code = code_address;
2319 } else {
2320 te->addr_code = -1;
2321 }
2322 if (prot & PAGE_WRITE) {
2323 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2324 (pd & IO_MEM_ROMD)) {
2325 /* Write access calls the I/O callback. */
2326 te->addr_write = address | TLB_MMIO;
2327 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2328 !cpu_physical_memory_is_dirty(pd)) {
2329 te->addr_write = address | TLB_NOTDIRTY;
2330 } else {
2331 te->addr_write = address;
2332 }
2333 } else {
2334 te->addr_write = -1;
2335 }
2336 }
2337
2338 #else
2339
2340 void tlb_flush(CPUState *env, int flush_global)
2341 {
2342 }
2343
2344 void tlb_flush_page(CPUState *env, target_ulong addr)
2345 {
2346 }
2347
2348 /*
2349 * Walks guest process memory "regions" one by one
2350 * and calls callback function 'fn' for each region.
2351 */
2352
2353 struct walk_memory_regions_data
2354 {
2355 walk_memory_regions_fn fn;
2356 void *priv;
2357 unsigned long start;
2358 int prot;
2359 };
2360
2361 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2362 abi_ulong end, int new_prot)
2363 {
2364 if (data->start != -1ul) {
2365 int rc = data->fn(data->priv, data->start, end, data->prot);
2366 if (rc != 0) {
2367 return rc;
2368 }
2369 }
2370
2371 data->start = (new_prot ? end : -1ul);
2372 data->prot = new_prot;
2373
2374 return 0;
2375 }
2376
2377 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2378 abi_ulong base, int level, void **lp)
2379 {
2380 abi_ulong pa;
2381 int i, rc;
2382
2383 if (*lp == NULL) {
2384 return walk_memory_regions_end(data, base, 0);
2385 }
2386
2387 if (level == 0) {
2388 PageDesc *pd = *lp;
2389 for (i = 0; i < L2_SIZE; ++i) {
2390 int prot = pd[i].flags;
2391
2392 pa = base | (i << TARGET_PAGE_BITS);
2393 if (prot != data->prot) {
2394 rc = walk_memory_regions_end(data, pa, prot);
2395 if (rc != 0) {
2396 return rc;
2397 }
2398 }
2399 }
2400 } else {
2401 void **pp = *lp;
2402 for (i = 0; i < L2_SIZE; ++i) {
2403 pa = base | ((abi_ulong)i <<
2404 (TARGET_PAGE_BITS + L2_BITS * level));
2405 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2406 if (rc != 0) {
2407 return rc;
2408 }
2409 }
2410 }
2411
2412 return 0;
2413 }
2414
2415 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2416 {
2417 struct walk_memory_regions_data data;
2418 unsigned long i;
2419
2420 data.fn = fn;
2421 data.priv = priv;
2422 data.start = -1ul;
2423 data.prot = 0;
2424
2425 for (i = 0; i < V_L1_SIZE; i++) {
2426 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2427 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2428 if (rc != 0) {
2429 return rc;
2430 }
2431 }
2432
2433 return walk_memory_regions_end(&data, 0, 0);
2434 }
2435
2436 static int dump_region(void *priv, abi_ulong start,
2437 abi_ulong end, unsigned long prot)
2438 {
2439 FILE *f = (FILE *)priv;
2440
2441 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2442 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2443 start, end, end - start,
2444 ((prot & PAGE_READ) ? 'r' : '-'),
2445 ((prot & PAGE_WRITE) ? 'w' : '-'),
2446 ((prot & PAGE_EXEC) ? 'x' : '-'));
2447
2448 return (0);
2449 }
2450
2451 /* dump memory mappings */
2452 void page_dump(FILE *f)
2453 {
2454 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2455 "start", "end", "size", "prot");
2456 walk_memory_regions(f, dump_region);
2457 }
2458
2459 int page_get_flags(target_ulong address)
2460 {
2461 PageDesc *p;
2462
2463 p = page_find(address >> TARGET_PAGE_BITS);
2464 if (!p)
2465 return 0;
2466 return p->flags;
2467 }
2468
2469 /* Modify the flags of a page and invalidate the code if necessary.
2470 The flag PAGE_WRITE_ORG is positioned automatically depending
2471 on PAGE_WRITE. The mmap_lock should already be held. */
2472 void page_set_flags(target_ulong start, target_ulong end, int flags)
2473 {
2474 target_ulong addr, len;
2475
2476 /* This function should never be called with addresses outside the
2477 guest address space. If this assert fires, it probably indicates
2478 a missing call to h2g_valid. */
2479 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2480 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2481 #endif
2482 assert(start < end);
2483
2484 start = start & TARGET_PAGE_MASK;
2485 end = TARGET_PAGE_ALIGN(end);
2486
2487 if (flags & PAGE_WRITE) {
2488 flags |= PAGE_WRITE_ORG;
2489 }
2490
2491 for (addr = start, len = end - start;
2492 len != 0;
2493 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2494 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2495
2496 /* If the write protection bit is set, then we invalidate
2497 the code inside. */
2498 if (!(p->flags & PAGE_WRITE) &&
2499 (flags & PAGE_WRITE) &&
2500 p->first_tb) {
2501 tb_invalidate_phys_page(addr, 0, NULL);
2502 }
2503 p->flags = flags;
2504 }
2505 }
2506
2507 int page_check_range(target_ulong start, target_ulong len, int flags)
2508 {
2509 PageDesc *p;
2510 target_ulong end;
2511 target_ulong addr;
2512
2513 /* This function should never be called with addresses outside the
2514 guest address space. If this assert fires, it probably indicates
2515 a missing call to h2g_valid. */
2516 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2517 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2518 #endif
2519
2520 if (len == 0) {
2521 return 0;
2522 }
2523 if (start + len - 1 < start) {
2524 /* We've wrapped around. */
2525 return -1;
2526 }
2527
2528 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2529 start = start & TARGET_PAGE_MASK;
2530
2531 for (addr = start, len = end - start;
2532 len != 0;
2533 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2534 p = page_find(addr >> TARGET_PAGE_BITS);
2535 if( !p )
2536 return -1;
2537 if( !(p->flags & PAGE_VALID) )
2538 return -1;
2539
2540 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2541 return -1;
2542 if (flags & PAGE_WRITE) {
2543 if (!(p->flags & PAGE_WRITE_ORG))
2544 return -1;
2545 /* unprotect the page if it was put read-only because it
2546 contains translated code */
2547 if (!(p->flags & PAGE_WRITE)) {
2548 if (!page_unprotect(addr, 0, NULL))
2549 return -1;
2550 }
2551 return 0;
2552 }
2553 }
2554 return 0;
2555 }
2556
2557 /* called from signal handler: invalidate the code and unprotect the
2558 page. Return TRUE if the fault was successfully handled. */
2559 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2560 {
2561 unsigned int prot;
2562 PageDesc *p;
2563 target_ulong host_start, host_end, addr;
2564
2565 /* Technically this isn't safe inside a signal handler. However we
2566 know this only ever happens in a synchronous SEGV handler, so in
2567 practice it seems to be ok. */
2568 mmap_lock();
2569
2570 p = page_find(address >> TARGET_PAGE_BITS);
2571 if (!p) {
2572 mmap_unlock();
2573 return 0;
2574 }
2575
2576 /* if the page was really writable, then we change its
2577 protection back to writable */
2578 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2579 host_start = address & qemu_host_page_mask;
2580 host_end = host_start + qemu_host_page_size;
2581
2582 prot = 0;
2583 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2584 p = page_find(addr >> TARGET_PAGE_BITS);
2585 p->flags |= PAGE_WRITE;
2586 prot |= p->flags;
2587
2588 /* and since the content will be modified, we must invalidate
2589 the corresponding translated code. */
2590 tb_invalidate_phys_page(addr, pc, puc);
2591 #ifdef DEBUG_TB_CHECK
2592 tb_invalidate_check(addr);
2593 #endif
2594 }
2595 mprotect((void *)g2h(host_start), qemu_host_page_size,
2596 prot & PAGE_BITS);
2597
2598 mmap_unlock();
2599 return 1;
2600 }
2601 mmap_unlock();
2602 return 0;
2603 }
2604
2605 static inline void tlb_set_dirty(CPUState *env,
2606 unsigned long addr, target_ulong vaddr)
2607 {
2608 }
2609 #endif /* defined(CONFIG_USER_ONLY) */
2610
2611 #if !defined(CONFIG_USER_ONLY)
2612
2613 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2614 typedef struct subpage_t {
2615 target_phys_addr_t base;
2616 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2617 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2618 } subpage_t;
2619
2620 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2621 ram_addr_t memory, ram_addr_t region_offset);
2622 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2623 ram_addr_t orig_memory,
2624 ram_addr_t region_offset);
2625 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2626 need_subpage) \
2627 do { \
2628 if (addr > start_addr) \
2629 start_addr2 = 0; \
2630 else { \
2631 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2632 if (start_addr2 > 0) \
2633 need_subpage = 1; \
2634 } \
2635 \
2636 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2637 end_addr2 = TARGET_PAGE_SIZE - 1; \
2638 else { \
2639 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2640 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2641 need_subpage = 1; \
2642 } \
2643 } while (0)
2644
2645 /* register physical memory.
2646 For RAM, 'size' must be a multiple of the target page size.
2647 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2648 io memory page. The address used when calling the IO function is
2649 the offset from the start of the region, plus region_offset. Both
2650 start_addr and region_offset are rounded down to a page boundary
2651 before calculating this offset. This should not be a problem unless
2652 the low bits of start_addr and region_offset differ. */
2653 void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
2654 ram_addr_t size,
2655 ram_addr_t phys_offset,
2656 ram_addr_t region_offset,
2657 bool log_dirty)
2658 {
2659 target_phys_addr_t addr, end_addr;
2660 PhysPageDesc *p;
2661 CPUState *env;
2662 ram_addr_t orig_size = size;
2663 subpage_t *subpage;
2664
2665 assert(size);
2666 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
2667
2668 if (phys_offset == IO_MEM_UNASSIGNED) {
2669 region_offset = start_addr;
2670 }
2671 region_offset &= TARGET_PAGE_MASK;
2672 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2673 end_addr = start_addr + (target_phys_addr_t)size;
2674
2675 addr = start_addr;
2676 do {
2677 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2678 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2679 ram_addr_t orig_memory = p->phys_offset;
2680 target_phys_addr_t start_addr2, end_addr2;
2681 int need_subpage = 0;
2682
2683 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2684 need_subpage);
2685 if (need_subpage) {
2686 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2687 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2688 &p->phys_offset, orig_memory,
2689 p->region_offset);
2690 } else {
2691 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2692 >> IO_MEM_SHIFT];
2693 }
2694 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2695 region_offset);
2696 p->region_offset = 0;
2697 } else {
2698 p->phys_offset = phys_offset;
2699 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2700 (phys_offset & IO_MEM_ROMD))
2701 phys_offset += TARGET_PAGE_SIZE;
2702 }
2703 } else {
2704 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2705 p->phys_offset = phys_offset;
2706 p->region_offset = region_offset;
2707 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2708 (phys_offset & IO_MEM_ROMD)) {
2709 phys_offset += TARGET_PAGE_SIZE;
2710 } else {
2711 target_phys_addr_t start_addr2, end_addr2;
2712 int need_subpage = 0;
2713
2714 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2715 end_addr2, need_subpage);
2716
2717 if (need_subpage) {
2718 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2719 &p->phys_offset, IO_MEM_UNASSIGNED,
2720 addr & TARGET_PAGE_MASK);
2721 subpage_register(subpage, start_addr2, end_addr2,
2722 phys_offset, region_offset);
2723 p->region_offset = 0;
2724 }
2725 }
2726 }
2727 region_offset += TARGET_PAGE_SIZE;
2728 addr += TARGET_PAGE_SIZE;
2729 } while (addr != end_addr);
2730
2731 /* since each CPU stores ram addresses in its TLB cache, we must
2732 reset the modified entries */
2733 /* XXX: slow ! */
2734 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2735 tlb_flush(env, 1);
2736 }
2737 }
2738
2739 /* XXX: temporary until new memory mapping API */
2740 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2741 {
2742 PhysPageDesc *p;
2743
2744 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2745 if (!p)
2746 return IO_MEM_UNASSIGNED;
2747 return p->phys_offset;
2748 }
2749
2750 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2751 {
2752 if (kvm_enabled())
2753 kvm_coalesce_mmio_region(addr, size);
2754 }
2755
2756 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2757 {
2758 if (kvm_enabled())
2759 kvm_uncoalesce_mmio_region(addr, size);
2760 }
2761
2762 void qemu_flush_coalesced_mmio_buffer(void)
2763 {
2764 if (kvm_enabled())
2765 kvm_flush_coalesced_mmio_buffer();
2766 }
2767
2768 #if defined(__linux__) && !defined(TARGET_S390X)
2769
2770 #include <sys/vfs.h>
2771
2772 #define HUGETLBFS_MAGIC 0x958458f6
2773
2774 static long gethugepagesize(const char *path)
2775 {
2776 struct statfs fs;
2777 int ret;
2778
2779 do {
2780 ret = statfs(path, &fs);
2781 } while (ret != 0 && errno == EINTR);
2782
2783 if (ret != 0) {
2784 perror(path);
2785 return 0;
2786 }
2787
2788 if (fs.f_type != HUGETLBFS_MAGIC)
2789 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2790
2791 return fs.f_bsize;
2792 }
2793
2794 static void *file_ram_alloc(RAMBlock *block,
2795 ram_addr_t memory,
2796 const char *path)
2797 {
2798 char *filename;
2799 void *area;
2800 int fd;
2801 #ifdef MAP_POPULATE
2802 int flags;
2803 #endif
2804 unsigned long hpagesize;
2805
2806 hpagesize = gethugepagesize(path);
2807 if (!hpagesize) {
2808 return NULL;
2809 }
2810
2811 if (memory < hpagesize) {
2812 return NULL;
2813 }
2814
2815 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2816 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2817 return NULL;
2818 }
2819
2820 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2821 return NULL;
2822 }
2823
2824 fd = mkstemp(filename);
2825 if (fd < 0) {
2826 perror("unable to create backing store for hugepages");
2827 free(filename);
2828 return NULL;
2829 }
2830 unlink(filename);
2831 free(filename);
2832
2833 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2834
2835 /*
2836 * ftruncate is not supported by hugetlbfs in older
2837 * hosts, so don't bother bailing out on errors.
2838 * If anything goes wrong with it under other filesystems,
2839 * mmap will fail.
2840 */
2841 if (ftruncate(fd, memory))
2842 perror("ftruncate");
2843
2844 #ifdef MAP_POPULATE
2845 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2846 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2847 * to sidestep this quirk.
2848 */
2849 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2850 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2851 #else
2852 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2853 #endif
2854 if (area == MAP_FAILED) {
2855 perror("file_ram_alloc: can't mmap RAM pages");
2856 close(fd);
2857 return (NULL);
2858 }
2859 block->fd = fd;
2860 return area;
2861 }
2862 #endif
2863
2864 static ram_addr_t find_ram_offset(ram_addr_t size)
2865 {
2866 RAMBlock *block, *next_block;
2867 ram_addr_t offset = 0, mingap = ULONG_MAX;
2868
2869 if (QLIST_EMPTY(&ram_list.blocks))
2870 return 0;
2871
2872 QLIST_FOREACH(block, &ram_list.blocks, next) {
2873 ram_addr_t end, next = ULONG_MAX;
2874
2875 end = block->offset + block->length;
2876
2877 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2878 if (next_block->offset >= end) {
2879 next = MIN(next, next_block->offset);
2880 }
2881 }
2882 if (next - end >= size && next - end < mingap) {
2883 offset = end;
2884 mingap = next - end;
2885 }
2886 }
2887 return offset;
2888 }
2889
2890 static ram_addr_t last_ram_offset(void)
2891 {
2892 RAMBlock *block;
2893 ram_addr_t last = 0;
2894
2895 QLIST_FOREACH(block, &ram_list.blocks, next)
2896 last = MAX(last, block->offset + block->length);
2897
2898 return last;
2899 }
2900
2901 ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
2902 ram_addr_t size, void *host)
2903 {
2904 RAMBlock *new_block, *block;
2905
2906 size = TARGET_PAGE_ALIGN(size);
2907 new_block = qemu_mallocz(sizeof(*new_block));
2908
2909 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2910 char *id = dev->parent_bus->info->get_dev_path(dev);
2911 if (id) {
2912 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2913 qemu_free(id);
2914 }
2915 }
2916 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2917
2918 QLIST_FOREACH(block, &ram_list.blocks, next) {
2919 if (!strcmp(block->idstr, new_block->idstr)) {
2920 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2921 new_block->idstr);
2922 abort();
2923 }
2924 }
2925
2926 new_block->offset = find_ram_offset(size);
2927 if (host) {
2928 new_block->host = host;
2929 new_block->flags |= RAM_PREALLOC_MASK;
2930 } else {
2931 if (mem_path) {
2932 #if defined (__linux__) && !defined(TARGET_S390X)
2933 new_block->host = file_ram_alloc(new_block, size, mem_path);
2934 if (!new_block->host) {
2935 new_block->host = qemu_vmalloc(size);
2936 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2937 }
2938 #else
2939 fprintf(stderr, "-mem-path option unsupported\n");
2940 exit(1);
2941 #endif
2942 } else {
2943 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2944 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2945 an system defined value, which is at least 256GB. Larger systems
2946 have larger values. We put the guest between the end of data
2947 segment (system break) and this value. We use 32GB as a base to
2948 have enough room for the system break to grow. */
2949 new_block->host = mmap((void*)0x800000000, size,
2950 PROT_EXEC|PROT_READ|PROT_WRITE,
2951 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2952 if (new_block->host == MAP_FAILED) {
2953 fprintf(stderr, "Allocating RAM failed\n");
2954 abort();
2955 }
2956 #else
2957 if (xen_mapcache_enabled()) {
2958 xen_ram_alloc(new_block->offset, size);
2959 } else {
2960 new_block->host = qemu_vmalloc(size);
2961 }
2962 #endif
2963 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2964 }
2965 }
2966 new_block->length = size;
2967
2968 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2969
2970 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2971 last_ram_offset() >> TARGET_PAGE_BITS);
2972 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2973 0xff, size >> TARGET_PAGE_BITS);
2974
2975 if (kvm_enabled())
2976 kvm_setup_guest_memory(new_block->host, size);
2977
2978 return new_block->offset;
2979 }
2980
2981 ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2982 {
2983 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2984 }
2985
2986 void qemu_ram_free_from_ptr(ram_addr_t addr)
2987 {
2988 RAMBlock *block;
2989
2990 QLIST_FOREACH(block, &ram_list.blocks, next) {
2991 if (addr == block->offset) {
2992 QLIST_REMOVE(block, next);
2993 qemu_free(block);
2994 return;
2995 }
2996 }
2997 }
2998
2999 void qemu_ram_free(ram_addr_t addr)
3000 {
3001 RAMBlock *block;
3002
3003 QLIST_FOREACH(block, &ram_list.blocks, next) {
3004 if (addr == block->offset) {
3005 QLIST_REMOVE(block, next);
3006 if (block->flags & RAM_PREALLOC_MASK) {
3007 ;
3008 } else if (mem_path) {
3009 #if defined (__linux__) && !defined(TARGET_S390X)
3010 if (block->fd) {
3011 munmap(block->host, block->length);
3012 close(block->fd);
3013 } else {
3014 qemu_vfree(block->host);
3015 }
3016 #else
3017 abort();
3018 #endif
3019 } else {
3020 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3021 munmap(block->host, block->length);
3022 #else
3023 if (xen_mapcache_enabled()) {
3024 qemu_invalidate_entry(block->host);
3025 } else {
3026 qemu_vfree(block->host);
3027 }
3028 #endif
3029 }
3030 qemu_free(block);
3031 return;
3032 }
3033 }
3034
3035 }
3036
3037 #ifndef _WIN32
3038 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3039 {
3040 RAMBlock *block;
3041 ram_addr_t offset;
3042 int flags;
3043 void *area, *vaddr;
3044
3045 QLIST_FOREACH(block, &ram_list.blocks, next) {
3046 offset = addr - block->offset;
3047 if (offset < block->length) {
3048 vaddr = block->host + offset;
3049 if (block->flags & RAM_PREALLOC_MASK) {
3050 ;
3051 } else {
3052 flags = MAP_FIXED;
3053 munmap(vaddr, length);
3054 if (mem_path) {
3055 #if defined(__linux__) && !defined(TARGET_S390X)
3056 if (block->fd) {
3057 #ifdef MAP_POPULATE
3058 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3059 MAP_PRIVATE;
3060 #else
3061 flags |= MAP_PRIVATE;
3062 #endif
3063 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3064 flags, block->fd, offset);
3065 } else {
3066 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3067 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3068 flags, -1, 0);
3069 }
3070 #else
3071 abort();
3072 #endif
3073 } else {
3074 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3075 flags |= MAP_SHARED | MAP_ANONYMOUS;
3076 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3077 flags, -1, 0);
3078 #else
3079 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3080 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3081 flags, -1, 0);
3082 #endif
3083 }
3084 if (area != vaddr) {
3085 fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3086 length, addr);
3087 exit(1);
3088 }
3089 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3090 }
3091 return;
3092 }
3093 }
3094 }
3095 #endif /* !_WIN32 */
3096
3097 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3098 With the exception of the softmmu code in this file, this should
3099 only be used for local memory (e.g. video ram) that the device owns,
3100 and knows it isn't going to access beyond the end of the block.
3101
3102 It should not be used for general purpose DMA.
3103 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3104 */
3105 void *qemu_get_ram_ptr(ram_addr_t addr)
3106 {
3107 RAMBlock *block;
3108
3109 QLIST_FOREACH(block, &ram_list.blocks, next) {
3110 if (addr - block->offset < block->length) {
3111 /* Move this entry to to start of the list. */
3112 if (block != QLIST_FIRST(&ram_list.blocks)) {
3113 QLIST_REMOVE(block, next);
3114 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3115 }
3116 if (xen_mapcache_enabled()) {
3117 /* We need to check if the requested address is in the RAM
3118 * because we don't want to map the entire memory in QEMU.
3119 * In that case just map until the end of the page.
3120 */
3121 if (block->offset == 0) {
3122 return qemu_map_cache(addr, 0, 0);
3123 } else if (block->host == NULL) {
3124 block->host = qemu_map_cache(block->offset, block->length, 1);
3125 }
3126 }
3127 return block->host + (addr - block->offset);
3128 }
3129 }
3130
3131 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3132 abort();
3133
3134 return NULL;
3135 }
3136
3137 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3138 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3139 */
3140 void *qemu_safe_ram_ptr(ram_addr_t addr)
3141 {
3142 RAMBlock *block;
3143
3144 QLIST_FOREACH(block, &ram_list.blocks, next) {
3145 if (addr - block->offset < block->length) {
3146 if (xen_mapcache_enabled()) {
3147 /* We need to check if the requested address is in the RAM
3148 * because we don't want to map the entire memory in QEMU.
3149 * In that case just map until the end of the page.
3150 */
3151 if (block->offset == 0) {
3152 return qemu_map_cache(addr, 0, 0);
3153 } else if (block->host == NULL) {
3154 block->host = qemu_map_cache(block->offset, block->length, 1);
3155 }
3156 }
3157 return block->host + (addr - block->offset);
3158 }
3159 }
3160
3161 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3162 abort();
3163
3164 return NULL;
3165 }
3166
3167 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3168 * but takes a size argument */
3169 void *qemu_ram_ptr_length(target_phys_addr_t addr, target_phys_addr_t *size)
3170 {
3171 if (xen_mapcache_enabled())
3172 return qemu_map_cache(addr, *size, 1);
3173 else {
3174 RAMBlock *block;
3175
3176 QLIST_FOREACH(block, &ram_list.blocks, next) {
3177 if (addr - block->offset < block->length) {
3178 if (addr - block->offset + *size > block->length)
3179 *size = block->length - addr + block->offset;
3180 return block->host + (addr - block->offset);
3181 }
3182 }
3183
3184 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3185 abort();
3186
3187 *size = 0;
3188 return NULL;
3189 }
3190 }
3191
3192 void qemu_put_ram_ptr(void *addr)
3193 {
3194 trace_qemu_put_ram_ptr(addr);
3195 }
3196
3197 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3198 {
3199 RAMBlock *block;
3200 uint8_t *host = ptr;
3201
3202 if (xen_mapcache_enabled()) {
3203 *ram_addr = qemu_ram_addr_from_mapcache(ptr);
3204 return 0;
3205 }
3206
3207 QLIST_FOREACH(block, &ram_list.blocks, next) {
3208 /* This case append when the block is not mapped. */
3209 if (block->host == NULL) {
3210 continue;
3211 }
3212 if (host - block->host < block->length) {
3213 *ram_addr = block->offset + (host - block->host);
3214 return 0;
3215 }
3216 }
3217
3218 return -1;
3219 }
3220
3221 /* Some of the softmmu routines need to translate from a host pointer
3222 (typically a TLB entry) back to a ram offset. */
3223 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3224 {
3225 ram_addr_t ram_addr;
3226
3227 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3228 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3229 abort();
3230 }
3231 return ram_addr;
3232 }
3233
3234 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
3235 {
3236 #ifdef DEBUG_UNASSIGNED
3237 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3238 #endif
3239 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3240 do_unassigned_access(addr, 0, 0, 0, 1);
3241 #endif
3242 return 0;
3243 }
3244
3245 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3246 {
3247 #ifdef DEBUG_UNASSIGNED
3248 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3249 #endif
3250 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3251 do_unassigned_access(addr, 0, 0, 0, 2);
3252 #endif
3253 return 0;
3254 }
3255
3256 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3257 {
3258 #ifdef DEBUG_UNASSIGNED
3259 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3260 #endif
3261 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3262 do_unassigned_access(addr, 0, 0, 0, 4);
3263 #endif
3264 return 0;
3265 }
3266
3267 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3268 {
3269 #ifdef DEBUG_UNASSIGNED
3270 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3271 #endif
3272 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3273 do_unassigned_access(addr, 1, 0, 0, 1);
3274 #endif
3275 }
3276
3277 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3278 {
3279 #ifdef DEBUG_UNASSIGNED
3280 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3281 #endif
3282 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3283 do_unassigned_access(addr, 1, 0, 0, 2);
3284 #endif
3285 }
3286
3287 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3288 {
3289 #ifdef DEBUG_UNASSIGNED
3290 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3291 #endif
3292 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3293 do_unassigned_access(addr, 1, 0, 0, 4);
3294 #endif
3295 }
3296
3297 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3298 unassigned_mem_readb,
3299 unassigned_mem_readw,
3300 unassigned_mem_readl,
3301 };
3302
3303 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3304 unassigned_mem_writeb,
3305 unassigned_mem_writew,
3306 unassigned_mem_writel,
3307 };
3308
3309 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3310 uint32_t val)
3311 {
3312 int dirty_flags;
3313 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3314 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3315 #if !defined(CONFIG_USER_ONLY)
3316 tb_invalidate_phys_page_fast(ram_addr, 1);
3317 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3318 #endif
3319 }
3320 stb_p(qemu_get_ram_ptr(ram_addr), val);
3321 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3322 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3323 /* we remove the notdirty callback only if the code has been
3324 flushed */
3325 if (dirty_flags == 0xff)
3326 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3327 }
3328
3329 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3330 uint32_t val)
3331 {
3332 int dirty_flags;
3333 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3334 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3335 #if !defined(CONFIG_USER_ONLY)
3336 tb_invalidate_phys_page_fast(ram_addr, 2);
3337 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3338 #endif
3339 }
3340 stw_p(qemu_get_ram_ptr(ram_addr), val);
3341 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3342 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3343 /* we remove the notdirty callback only if the code has been
3344 flushed */
3345 if (dirty_flags == 0xff)
3346 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3347 }
3348
3349 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3350 uint32_t val)
3351 {
3352 int dirty_flags;
3353 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3354 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3355 #if !defined(CONFIG_USER_ONLY)
3356 tb_invalidate_phys_page_fast(ram_addr, 4);
3357 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3358 #endif
3359 }
3360 stl_p(qemu_get_ram_ptr(ram_addr), val);
3361 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3362 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3363 /* we remove the notdirty callback only if the code has been
3364 flushed */
3365 if (dirty_flags == 0xff)
3366 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3367 }
3368
3369 static CPUReadMemoryFunc * const error_mem_read[3] = {
3370 NULL, /* never used */
3371 NULL, /* never used */
3372 NULL, /* never used */
3373 };
3374
3375 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3376 notdirty_mem_writeb,
3377 notdirty_mem_writew,
3378 notdirty_mem_writel,
3379 };
3380
3381 /* Generate a debug exception if a watchpoint has been hit. */
3382 static void check_watchpoint(int offset, int len_mask, int flags)
3383 {
3384 CPUState *env = cpu_single_env;
3385 target_ulong pc, cs_base;
3386 TranslationBlock *tb;
3387 target_ulong vaddr;
3388 CPUWatchpoint *wp;
3389 int cpu_flags;
3390
3391 if (env->watchpoint_hit) {
3392 /* We re-entered the check after replacing the TB. Now raise
3393 * the debug interrupt so that is will trigger after the
3394 * current instruction. */
3395 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3396 return;
3397 }
3398 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3399 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3400 if ((vaddr == (wp->vaddr & len_mask) ||
3401 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3402 wp->flags |= BP_WATCHPOINT_HIT;
3403 if (!env->watchpoint_hit) {
3404 env->watchpoint_hit = wp;
3405 tb = tb_find_pc(env->mem_io_pc);
3406 if (!tb) {
3407 cpu_abort(env, "check_watchpoint: could not find TB for "
3408 "pc=%p", (void *)env->mem_io_pc);
3409 }
3410 cpu_restore_state(tb, env, env->mem_io_pc);
3411 tb_phys_invalidate(tb, -1);
3412 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3413 env->exception_index = EXCP_DEBUG;
3414 } else {
3415 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3416 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3417 }
3418 cpu_resume_from_signal(env, NULL);
3419 }
3420 } else {
3421 wp->flags &= ~BP_WATCHPOINT_HIT;
3422 }
3423 }
3424 }
3425
3426 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3427 so these check for a hit then pass through to the normal out-of-line
3428 phys routines. */
3429 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3430 {
3431 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3432 return ldub_phys(addr);
3433 }
3434
3435 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3436 {
3437 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3438 return lduw_phys(addr);
3439 }
3440
3441 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3442 {
3443 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3444 return ldl_phys(addr);
3445 }
3446
3447 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3448 uint32_t val)
3449 {
3450 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3451 stb_phys(addr, val);
3452 }
3453
3454 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3455 uint32_t val)
3456 {
3457 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3458 stw_phys(addr, val);
3459 }
3460
3461 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3462 uint32_t val)
3463 {
3464 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3465 stl_phys(addr, val);
3466 }
3467
3468 static CPUReadMemoryFunc * const watch_mem_read[3] = {
3469 watch_mem_readb,
3470 watch_mem_readw,
3471 watch_mem_readl,
3472 };
3473
3474 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3475 watch_mem_writeb,
3476 watch_mem_writew,
3477 watch_mem_writel,
3478 };
3479
3480 static inline uint32_t subpage_readlen (subpage_t *mmio,
3481 target_phys_addr_t addr,
3482 unsigned int len)
3483 {
3484 unsigned int idx = SUBPAGE_IDX(addr);
3485 #if defined(DEBUG_SUBPAGE)
3486 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3487 mmio, len, addr, idx);
3488 #endif
3489
3490 addr += mmio->region_offset[idx];
3491 idx = mmio->sub_io_index[idx];
3492 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3493 }
3494
3495 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3496 uint32_t value, unsigned int len)
3497 {
3498 unsigned int idx = SUBPAGE_IDX(addr);
3499 #if defined(DEBUG_SUBPAGE)
3500 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3501 __func__, mmio, len, addr, idx, value);
3502 #endif
3503
3504 addr += mmio->region_offset[idx];
3505 idx = mmio->sub_io_index[idx];
3506 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3507 }
3508
3509 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3510 {
3511 return subpage_readlen(opaque, addr, 0);
3512 }
3513
3514 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3515 uint32_t value)
3516 {
3517 subpage_writelen(opaque, addr, value, 0);
3518 }
3519
3520 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3521 {
3522 return subpage_readlen(opaque, addr, 1);
3523 }
3524
3525 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3526 uint32_t value)
3527 {
3528 subpage_writelen(opaque, addr, value, 1);
3529 }
3530
3531 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3532 {
3533 return subpage_readlen(opaque, addr, 2);
3534 }
3535
3536 static void subpage_writel (void *opaque, target_phys_addr_t addr,
3537 uint32_t value)
3538 {
3539 subpage_writelen(opaque, addr, value, 2);
3540 }
3541
3542 static CPUReadMemoryFunc * const subpage_read[] = {
3543 &subpage_readb,
3544 &subpage_readw,
3545 &subpage_readl,
3546 };
3547
3548 static CPUWriteMemoryFunc * const subpage_write[] = {
3549 &subpage_writeb,
3550 &subpage_writew,
3551 &subpage_writel,
3552 };
3553
3554 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3555 ram_addr_t memory, ram_addr_t region_offset)
3556 {
3557 int idx, eidx;
3558
3559 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3560 return -1;
3561 idx = SUBPAGE_IDX(start);
3562 eidx = SUBPAGE_IDX(end);
3563 #if defined(DEBUG_SUBPAGE)
3564 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3565 mmio, start, end, idx, eidx, memory);
3566 #endif
3567 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3568 memory = IO_MEM_UNASSIGNED;
3569 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3570 for (; idx <= eidx; idx++) {
3571 mmio->sub_io_index[idx] = memory;
3572 mmio->region_offset[idx] = region_offset;
3573 }
3574
3575 return 0;
3576 }
3577
3578 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3579 ram_addr_t orig_memory,
3580 ram_addr_t region_offset)
3581 {
3582 subpage_t *mmio;
3583 int subpage_memory;
3584
3585 mmio = qemu_mallocz(sizeof(subpage_t));
3586
3587 mmio->base = base;
3588 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3589 DEVICE_NATIVE_ENDIAN);
3590 #if defined(DEBUG_SUBPAGE)
3591 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3592 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3593 #endif
3594 *phys = subpage_memory | IO_MEM_SUBPAGE;
3595 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3596
3597 return mmio;
3598 }
3599
3600 static int get_free_io_mem_idx(void)
3601 {
3602 int i;
3603
3604 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3605 if (!io_mem_used[i]) {
3606 io_mem_used[i] = 1;
3607 return i;
3608 }
3609 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3610 return -1;
3611 }
3612
3613 /*
3614 * Usually, devices operate in little endian mode. There are devices out
3615 * there that operate in big endian too. Each device gets byte swapped
3616 * mmio if plugged onto a CPU that does the other endianness.
3617 *
3618 * CPU Device swap?
3619 *
3620 * little little no
3621 * little big yes
3622 * big little yes
3623 * big big no
3624 */
3625
3626 typedef struct SwapEndianContainer {
3627 CPUReadMemoryFunc *read[3];
3628 CPUWriteMemoryFunc *write[3];
3629 void *opaque;
3630 } SwapEndianContainer;
3631
3632 static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3633 {
3634 uint32_t val;
3635 SwapEndianContainer *c = opaque;
3636 val = c->read[0](c->opaque, addr);
3637 return val;
3638 }
3639
3640 static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3641 {
3642 uint32_t val;
3643 SwapEndianContainer *c = opaque;
3644 val = bswap16(c->read[1](c->opaque, addr));
3645 return val;
3646 }
3647
3648 static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3649 {
3650 uint32_t val;
3651 SwapEndianContainer *c = opaque;
3652 val = bswap32(c->read[2](c->opaque, addr));
3653 return val;
3654 }
3655
3656 static CPUReadMemoryFunc * const swapendian_readfn[3]={
3657 swapendian_mem_readb,
3658 swapendian_mem_readw,
3659 swapendian_mem_readl
3660 };
3661
3662 static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3663 uint32_t val)
3664 {
3665 SwapEndianContainer *c = opaque;
3666 c->write[0](c->opaque, addr, val);
3667 }
3668
3669 static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3670 uint32_t val)
3671 {
3672 SwapEndianContainer *c = opaque;
3673 c->write[1](c->opaque, addr, bswap16(val));
3674 }
3675
3676 static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3677 uint32_t val)
3678 {
3679 SwapEndianContainer *c = opaque;
3680 c->write[2](c->opaque, addr, bswap32(val));
3681 }
3682
3683 static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3684 swapendian_mem_writeb,
3685 swapendian_mem_writew,
3686 swapendian_mem_writel
3687 };
3688
3689 static void swapendian_init(int io_index)
3690 {
3691 SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3692 int i;
3693
3694 /* Swap mmio for big endian targets */
3695 c->opaque = io_mem_opaque[io_index];
3696 for (i = 0; i < 3; i++) {
3697 c->read[i] = io_mem_read[io_index][i];
3698 c->write[i] = io_mem_write[io_index][i];
3699
3700 io_mem_read[io_index][i] = swapendian_readfn[i];
3701 io_mem_write[io_index][i] = swapendian_writefn[i];
3702 }
3703 io_mem_opaque[io_index] = c;
3704 }
3705
3706 static void swapendian_del(int io_index)
3707 {
3708 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3709 qemu_free(io_mem_opaque[io_index]);
3710 }
3711 }
3712
3713 /* mem_read and mem_write are arrays of functions containing the
3714 function to access byte (index 0), word (index 1) and dword (index
3715 2). Functions can be omitted with a NULL function pointer.
3716 If io_index is non zero, the corresponding io zone is
3717 modified. If it is zero, a new io zone is allocated. The return
3718 value can be used with cpu_register_physical_memory(). (-1) is
3719 returned if error. */
3720 static int cpu_register_io_memory_fixed(int io_index,
3721 CPUReadMemoryFunc * const *mem_read,
3722 CPUWriteMemoryFunc * const *mem_write,
3723 void *opaque, enum device_endian endian)
3724 {
3725 int i;
3726
3727 if (io_index <= 0) {
3728 io_index = get_free_io_mem_idx();
3729 if (io_index == -1)
3730 return io_index;
3731 } else {
3732 io_index >>= IO_MEM_SHIFT;
3733 if (io_index >= IO_MEM_NB_ENTRIES)
3734 return -1;
3735 }
3736
3737 for (i = 0; i < 3; ++i) {
3738 io_mem_read[io_index][i]
3739 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3740 }
3741 for (i = 0; i < 3; ++i) {
3742 io_mem_write[io_index][i]
3743 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3744 }
3745 io_mem_opaque[io_index] = opaque;
3746
3747 switch (endian) {
3748 case DEVICE_BIG_ENDIAN:
3749 #ifndef TARGET_WORDS_BIGENDIAN
3750 swapendian_init(io_index);
3751 #endif
3752 break;
3753 case DEVICE_LITTLE_ENDIAN:
3754 #ifdef TARGET_WORDS_BIGENDIAN
3755 swapendian_init(io_index);
3756 #endif
3757 break;
3758 case DEVICE_NATIVE_ENDIAN:
3759 default:
3760 break;
3761 }
3762
3763 return (io_index << IO_MEM_SHIFT);
3764 }
3765
3766 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3767 CPUWriteMemoryFunc * const *mem_write,
3768 void *opaque, enum device_endian endian)
3769 {
3770 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
3771 }
3772
3773 void cpu_unregister_io_memory(int io_table_address)
3774 {
3775 int i;
3776 int io_index = io_table_address >> IO_MEM_SHIFT;
3777
3778 swapendian_del(io_index);
3779
3780 for (i=0;i < 3; i++) {
3781 io_mem_read[io_index][i] = unassigned_mem_read[i];
3782 io_mem_write[io_index][i] = unassigned_mem_write[i];
3783 }
3784 io_mem_opaque[io_index] = NULL;
3785 io_mem_used[io_index] = 0;
3786 }
3787
3788 static void io_mem_init(void)
3789 {
3790 int i;
3791
3792 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3793 unassigned_mem_write, NULL,
3794 DEVICE_NATIVE_ENDIAN);
3795 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3796 unassigned_mem_write, NULL,
3797 DEVICE_NATIVE_ENDIAN);
3798 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3799 notdirty_mem_write, NULL,
3800 DEVICE_NATIVE_ENDIAN);
3801 for (i=0; i<5; i++)
3802 io_mem_used[i] = 1;
3803
3804 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3805 watch_mem_write, NULL,
3806 DEVICE_NATIVE_ENDIAN);
3807 }
3808
3809 #endif /* !defined(CONFIG_USER_ONLY) */
3810
3811 /* physical memory access (slow version, mainly for debug) */
3812 #if defined(CONFIG_USER_ONLY)
3813 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3814 uint8_t *buf, int len, int is_write)
3815 {
3816 int l, flags;
3817 target_ulong page;
3818 void * p;
3819
3820 while (len > 0) {
3821 page = addr & TARGET_PAGE_MASK;
3822 l = (page + TARGET_PAGE_SIZE) - addr;
3823 if (l > len)
3824 l = len;
3825 flags = page_get_flags(page);
3826 if (!(flags & PAGE_VALID))
3827 return -1;
3828 if (is_write) {
3829 if (!(flags & PAGE_WRITE))
3830 return -1;
3831 /* XXX: this code should not depend on lock_user */
3832 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3833 return -1;
3834 memcpy(p, buf, l);
3835 unlock_user(p, addr, l);
3836 } else {
3837 if (!(flags & PAGE_READ))
3838 return -1;
3839 /* XXX: this code should not depend on lock_user */
3840 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3841 return -1;
3842 memcpy(buf, p, l);
3843 unlock_user(p, addr, 0);
3844 }
3845 len -= l;
3846 buf += l;
3847 addr += l;
3848 }
3849 return 0;
3850 }
3851
3852 #else
3853 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3854 int len, int is_write)
3855 {
3856 int l, io_index;
3857 uint8_t *ptr;
3858 uint32_t val;
3859 target_phys_addr_t page;
3860 unsigned long pd;
3861 PhysPageDesc *p;
3862
3863 while (len > 0) {
3864 page = addr & TARGET_PAGE_MASK;
3865 l = (page + TARGET_PAGE_SIZE) - addr;
3866 if (l > len)
3867 l = len;
3868 p = phys_page_find(page >> TARGET_PAGE_BITS);
3869 if (!p) {
3870 pd = IO_MEM_UNASSIGNED;
3871 } else {
3872 pd = p->phys_offset;
3873 }
3874
3875 if (is_write) {
3876 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3877 target_phys_addr_t addr1 = addr;
3878 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3879 if (p)
3880 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3881 /* XXX: could force cpu_single_env to NULL to avoid
3882 potential bugs */
3883 if (l >= 4 && ((addr1 & 3) == 0)) {
3884 /* 32 bit write access */
3885 val = ldl_p(buf);
3886 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3887 l = 4;
3888 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3889 /* 16 bit write access */
3890 val = lduw_p(buf);
3891 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3892 l = 2;
3893 } else {
3894 /* 8 bit write access */
3895 val = ldub_p(buf);
3896 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3897 l = 1;
3898 }
3899 } else {
3900 unsigned long addr1;
3901 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3902 /* RAM case */
3903 ptr = qemu_get_ram_ptr(addr1);
3904 memcpy(ptr, buf, l);
3905 if (!cpu_physical_memory_is_dirty(addr1)) {
3906 /* invalidate code */
3907 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3908 /* set dirty bit */
3909 cpu_physical_memory_set_dirty_flags(
3910 addr1, (0xff & ~CODE_DIRTY_FLAG));
3911 }
3912 qemu_put_ram_ptr(ptr);
3913 }
3914 } else {
3915 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3916 !(pd & IO_MEM_ROMD)) {
3917 target_phys_addr_t addr1 = addr;
3918 /* I/O case */
3919 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3920 if (p)
3921 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3922 if (l >= 4 && ((addr1 & 3) == 0)) {
3923 /* 32 bit read access */
3924 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3925 stl_p(buf, val);
3926 l = 4;
3927 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3928 /* 16 bit read access */
3929 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3930 stw_p(buf, val);
3931 l = 2;
3932 } else {
3933 /* 8 bit read access */
3934 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3935 stb_p(buf, val);
3936 l = 1;
3937 }
3938 } else {
3939 /* RAM case */
3940 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3941 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3942 qemu_put_ram_ptr(ptr);
3943 }
3944 }
3945 len -= l;
3946 buf += l;
3947 addr += l;
3948 }
3949 }
3950
3951 /* used for ROM loading : can write in RAM and ROM */
3952 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3953 const uint8_t *buf, int len)
3954 {
3955 int l;
3956 uint8_t *ptr;
3957 target_phys_addr_t page;
3958 unsigned long pd;
3959 PhysPageDesc *p;
3960
3961 while (len > 0) {
3962 page = addr & TARGET_PAGE_MASK;
3963 l = (page + TARGET_PAGE_SIZE) - addr;
3964 if (l > len)
3965 l = len;
3966 p = phys_page_find(page >> TARGET_PAGE_BITS);
3967 if (!p) {
3968 pd = IO_MEM_UNASSIGNED;
3969 } else {
3970 pd = p->phys_offset;
3971 }
3972
3973 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3974 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3975 !(pd & IO_MEM_ROMD)) {
3976 /* do nothing */
3977 } else {
3978 unsigned long addr1;
3979 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3980 /* ROM/RAM case */
3981 ptr = qemu_get_ram_ptr(addr1);
3982 memcpy(ptr, buf, l);
3983 qemu_put_ram_ptr(ptr);
3984 }
3985 len -= l;
3986 buf += l;
3987 addr += l;
3988 }
3989 }
3990
3991 typedef struct {
3992 void *buffer;
3993 target_phys_addr_t addr;
3994 target_phys_addr_t len;
3995 } BounceBuffer;
3996
3997 static BounceBuffer bounce;
3998
3999 typedef struct MapClient {
4000 void *opaque;
4001 void (*callback)(void *opaque);
4002 QLIST_ENTRY(MapClient) link;
4003 } MapClient;
4004
4005 static QLIST_HEAD(map_client_list, MapClient) map_client_list
4006 = QLIST_HEAD_INITIALIZER(map_client_list);
4007
4008 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4009 {
4010 MapClient *client = qemu_malloc(sizeof(*client));
4011
4012 client->opaque = opaque;
4013 client->callback = callback;
4014 QLIST_INSERT_HEAD(&map_client_list, client, link);
4015 return client;
4016 }
4017
4018 void cpu_unregister_map_client(void *_client)
4019 {
4020 MapClient *client = (MapClient *)_client;
4021
4022 QLIST_REMOVE(client, link);
4023 qemu_free(client);
4024 }
4025
4026 static void cpu_notify_map_clients(void)
4027 {
4028 MapClient *client;
4029
4030 while (!QLIST_EMPTY(&map_client_list)) {
4031 client = QLIST_FIRST(&map_client_list);
4032 client->callback(client->opaque);
4033 cpu_unregister_map_client(client);
4034 }
4035 }
4036
4037 /* Map a physical memory region into a host virtual address.
4038 * May map a subset of the requested range, given by and returned in *plen.
4039 * May return NULL if resources needed to perform the mapping are exhausted.
4040 * Use only for reads OR writes - not for read-modify-write operations.
4041 * Use cpu_register_map_client() to know when retrying the map operation is
4042 * likely to succeed.
4043 */
4044 void *cpu_physical_memory_map(target_phys_addr_t addr,
4045 target_phys_addr_t *plen,
4046 int is_write)
4047 {
4048 target_phys_addr_t len = *plen;
4049 target_phys_addr_t todo = 0;
4050 int l;
4051 target_phys_addr_t page;
4052 unsigned long pd;
4053 PhysPageDesc *p;
4054 target_phys_addr_t addr1 = addr;
4055
4056 while (len > 0) {
4057 page = addr & TARGET_PAGE_MASK;
4058 l = (page + TARGET_PAGE_SIZE) - addr;
4059 if (l > len)
4060 l = len;
4061 p = phys_page_find(page >> TARGET_PAGE_BITS);
4062 if (!p) {
4063 pd = IO_MEM_UNASSIGNED;
4064 } else {
4065 pd = p->phys_offset;
4066 }
4067
4068 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4069 if (todo || bounce.buffer) {
4070 break;
4071 }
4072 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4073 bounce.addr = addr;
4074 bounce.len = l;
4075 if (!is_write) {
4076 cpu_physical_memory_read(addr, bounce.buffer, l);
4077 }
4078
4079 *plen = l;
4080 return bounce.buffer;
4081 }
4082
4083 len -= l;
4084 addr += l;
4085 todo += l;
4086 }
4087 *plen = todo;
4088 return qemu_ram_ptr_length(addr1, plen);
4089 }
4090
4091 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4092 * Will also mark the memory as dirty if is_write == 1. access_len gives
4093 * the amount of memory that was actually read or written by the caller.
4094 */
4095 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4096 int is_write, target_phys_addr_t access_len)
4097 {
4098 if (buffer != bounce.buffer) {
4099 if (is_write) {
4100 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
4101 while (access_len) {
4102 unsigned l;
4103 l = TARGET_PAGE_SIZE;
4104 if (l > access_len)
4105 l = access_len;
4106 if (!cpu_physical_memory_is_dirty(addr1)) {
4107 /* invalidate code */
4108 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4109 /* set dirty bit */
4110 cpu_physical_memory_set_dirty_flags(
4111 addr1, (0xff & ~CODE_DIRTY_FLAG));
4112 }
4113 addr1 += l;
4114 access_len -= l;
4115 }
4116 }
4117 if (xen_mapcache_enabled()) {
4118 qemu_invalidate_entry(buffer);
4119 }
4120 return;
4121 }
4122 if (is_write) {
4123 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4124 }
4125 qemu_vfree(bounce.buffer);
4126 bounce.buffer = NULL;
4127 cpu_notify_map_clients();
4128 }
4129
4130 /* warning: addr must be aligned */
4131 uint32_t ldl_phys(target_phys_addr_t addr)
4132 {
4133 int io_index;
4134 uint8_t *ptr;
4135 uint32_t val;
4136 unsigned long pd;
4137 PhysPageDesc *p;
4138
4139 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4140 if (!p) {
4141 pd = IO_MEM_UNASSIGNED;
4142 } else {
4143 pd = p->phys_offset;
4144 }
4145
4146 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4147 !(pd & IO_MEM_ROMD)) {
4148 /* I/O case */
4149 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4150 if (p)
4151 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4152 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4153 } else {
4154 /* RAM case */
4155 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4156 (addr & ~TARGET_PAGE_MASK);
4157 val = ldl_p(ptr);
4158 }
4159 return val;
4160 }
4161
4162 /* warning: addr must be aligned */
4163 uint64_t ldq_phys(target_phys_addr_t addr)
4164 {
4165 int io_index;
4166 uint8_t *ptr;
4167 uint64_t val;
4168 unsigned long pd;
4169 PhysPageDesc *p;
4170
4171 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4172 if (!p) {
4173 pd = IO_MEM_UNASSIGNED;
4174 } else {
4175 pd = p->phys_offset;
4176 }
4177
4178 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4179 !(pd & IO_MEM_ROMD)) {
4180 /* I/O case */
4181 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4182 if (p)
4183 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4184 #ifdef TARGET_WORDS_BIGENDIAN
4185 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4186 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4187 #else
4188 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4189 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4190 #endif
4191 } else {
4192 /* RAM case */
4193 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4194 (addr & ~TARGET_PAGE_MASK);
4195 val = ldq_p(ptr);
4196 }
4197 return val;
4198 }
4199
4200 /* XXX: optimize */
4201 uint32_t ldub_phys(target_phys_addr_t addr)
4202 {
4203 uint8_t val;
4204 cpu_physical_memory_read(addr, &val, 1);
4205 return val;
4206 }
4207
4208 /* warning: addr must be aligned */
4209 uint32_t lduw_phys(target_phys_addr_t addr)
4210 {
4211 int io_index;
4212 uint8_t *ptr;
4213 uint64_t val;
4214 unsigned long pd;
4215 PhysPageDesc *p;
4216
4217 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4218 if (!p) {
4219 pd = IO_MEM_UNASSIGNED;
4220 } else {
4221 pd = p->phys_offset;
4222 }
4223
4224 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4225 !(pd & IO_MEM_ROMD)) {
4226 /* I/O case */
4227 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4228 if (p)
4229 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4230 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4231 } else {
4232 /* RAM case */
4233 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4234 (addr & ~TARGET_PAGE_MASK);
4235 val = lduw_p(ptr);
4236 }
4237 return val;
4238 }
4239
4240 /* warning: addr must be aligned. The ram page is not masked as dirty
4241 and the code inside is not invalidated. It is useful if the dirty
4242 bits are used to track modified PTEs */
4243 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4244 {
4245 int io_index;
4246 uint8_t *ptr;
4247 unsigned long pd;
4248 PhysPageDesc *p;
4249
4250 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4251 if (!p) {
4252 pd = IO_MEM_UNASSIGNED;
4253 } else {
4254 pd = p->phys_offset;
4255 }
4256
4257 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4258 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4259 if (p)
4260 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4261 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4262 } else {
4263 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4264 ptr = qemu_get_ram_ptr(addr1);
4265 stl_p(ptr, val);
4266
4267 if (unlikely(in_migration)) {
4268 if (!cpu_physical_memory_is_dirty(addr1)) {
4269 /* invalidate code */
4270 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4271 /* set dirty bit */
4272 cpu_physical_memory_set_dirty_flags(
4273 addr1, (0xff & ~CODE_DIRTY_FLAG));
4274 }
4275 }
4276 }
4277 }
4278
4279 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4280 {
4281 int io_index;
4282 uint8_t *ptr;
4283 unsigned long pd;
4284 PhysPageDesc *p;
4285
4286 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4287 if (!p) {
4288 pd = IO_MEM_UNASSIGNED;
4289 } else {
4290 pd = p->phys_offset;
4291 }
4292
4293 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4294 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4295 if (p)
4296 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4297 #ifdef TARGET_WORDS_BIGENDIAN
4298 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4299 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4300 #else
4301 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4302 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4303 #endif
4304 } else {
4305 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4306 (addr & ~TARGET_PAGE_MASK);
4307 stq_p(ptr, val);
4308 }
4309 }
4310
4311 /* warning: addr must be aligned */
4312 void stl_phys(target_phys_addr_t addr, uint32_t val)
4313 {
4314 int io_index;
4315 uint8_t *ptr;
4316 unsigned long pd;
4317 PhysPageDesc *p;
4318
4319 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4320 if (!p) {
4321 pd = IO_MEM_UNASSIGNED;
4322 } else {
4323 pd = p->phys_offset;
4324 }
4325
4326 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4327 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4328 if (p)
4329 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4330 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4331 } else {
4332 unsigned long addr1;
4333 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4334 /* RAM case */
4335 ptr = qemu_get_ram_ptr(addr1);
4336 stl_p(ptr, val);
4337 if (!cpu_physical_memory_is_dirty(addr1)) {
4338 /* invalidate code */
4339 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4340 /* set dirty bit */
4341 cpu_physical_memory_set_dirty_flags(addr1,
4342 (0xff & ~CODE_DIRTY_FLAG));
4343 }
4344 }
4345 }
4346
4347 /* XXX: optimize */
4348 void stb_phys(target_phys_addr_t addr, uint32_t val)
4349 {
4350 uint8_t v = val;
4351 cpu_physical_memory_write(addr, &v, 1);
4352 }
4353
4354 /* warning: addr must be aligned */
4355 void stw_phys(target_phys_addr_t addr, uint32_t val)
4356 {
4357 int io_index;
4358 uint8_t *ptr;
4359 unsigned long pd;
4360 PhysPageDesc *p;
4361
4362 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4363 if (!p) {
4364 pd = IO_MEM_UNASSIGNED;
4365 } else {
4366 pd = p->phys_offset;
4367 }
4368
4369 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4370 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4371 if (p)
4372 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4373 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4374 } else {
4375 unsigned long addr1;
4376 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4377 /* RAM case */
4378 ptr = qemu_get_ram_ptr(addr1);
4379 stw_p(ptr, val);
4380 if (!cpu_physical_memory_is_dirty(addr1)) {
4381 /* invalidate code */
4382 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4383 /* set dirty bit */
4384 cpu_physical_memory_set_dirty_flags(addr1,
4385 (0xff & ~CODE_DIRTY_FLAG));
4386 }
4387 }
4388 }
4389
4390 /* XXX: optimize */
4391 void stq_phys(target_phys_addr_t addr, uint64_t val)
4392 {
4393 val = tswap64(val);
4394 cpu_physical_memory_write(addr, &val, 8);
4395 }
4396
4397 /* virtual memory access for debug (includes writing to ROM) */
4398 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4399 uint8_t *buf, int len, int is_write)
4400 {
4401 int l;
4402 target_phys_addr_t phys_addr;
4403 target_ulong page;
4404
4405 while (len > 0) {
4406 page = addr & TARGET_PAGE_MASK;
4407 phys_addr = cpu_get_phys_page_debug(env, page);
4408 /* if no physical page mapped, return an error */
4409 if (phys_addr == -1)
4410 return -1;
4411 l = (page + TARGET_PAGE_SIZE) - addr;
4412 if (l > len)
4413 l = len;
4414 phys_addr += (addr & ~TARGET_PAGE_MASK);
4415 if (is_write)
4416 cpu_physical_memory_write_rom(phys_addr, buf, l);
4417 else
4418 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4419 len -= l;
4420 buf += l;
4421 addr += l;
4422 }
4423 return 0;
4424 }
4425 #endif
4426
4427 /* in deterministic execution mode, instructions doing device I/Os
4428 must be at the end of the TB */
4429 void cpu_io_recompile(CPUState *env, void *retaddr)
4430 {
4431 TranslationBlock *tb;
4432 uint32_t n, cflags;
4433 target_ulong pc, cs_base;
4434 uint64_t flags;
4435
4436 tb = tb_find_pc((unsigned long)retaddr);
4437 if (!tb) {
4438 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4439 retaddr);
4440 }
4441 n = env->icount_decr.u16.low + tb->icount;
4442 cpu_restore_state(tb, env, (unsigned long)retaddr);
4443 /* Calculate how many instructions had been executed before the fault
4444 occurred. */
4445 n = n - env->icount_decr.u16.low;
4446 /* Generate a new TB ending on the I/O insn. */
4447 n++;
4448 /* On MIPS and SH, delay slot instructions can only be restarted if
4449 they were already the first instruction in the TB. If this is not
4450 the first instruction in a TB then re-execute the preceding
4451 branch. */
4452 #if defined(TARGET_MIPS)
4453 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4454 env->active_tc.PC -= 4;
4455 env->icount_decr.u16.low++;
4456 env->hflags &= ~MIPS_HFLAG_BMASK;
4457 }
4458 #elif defined(TARGET_SH4)
4459 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4460 && n > 1) {
4461 env->pc -= 2;
4462 env->icount_decr.u16.low++;
4463 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4464 }
4465 #endif
4466 /* This should never happen. */
4467 if (n > CF_COUNT_MASK)
4468 cpu_abort(env, "TB too big during recompile");
4469
4470 cflags = n | CF_LAST_IO;
4471 pc = tb->pc;
4472 cs_base = tb->cs_base;
4473 flags = tb->flags;
4474 tb_phys_invalidate(tb, -1);
4475 /* FIXME: In theory this could raise an exception. In practice
4476 we have already translated the block once so it's probably ok. */
4477 tb_gen_code(env, pc, cs_base, flags, cflags);
4478 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4479 the first in the TB) then we end up generating a whole new TB and
4480 repeating the fault, which is horribly inefficient.
4481 Better would be to execute just this insn uncached, or generate a
4482 second new TB. */
4483 cpu_resume_from_signal(env, NULL);
4484 }
4485
4486 #if !defined(CONFIG_USER_ONLY)
4487
4488 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4489 {
4490 int i, target_code_size, max_target_code_size;
4491 int direct_jmp_count, direct_jmp2_count, cross_page;
4492 TranslationBlock *tb;
4493
4494 target_code_size = 0;
4495 max_target_code_size = 0;
4496 cross_page = 0;
4497 direct_jmp_count = 0;
4498 direct_jmp2_count = 0;
4499 for(i = 0; i < nb_tbs; i++) {
4500 tb = &tbs[i];
4501 target_code_size += tb->size;
4502 if (tb->size > max_target_code_size)
4503 max_target_code_size = tb->size;
4504 if (tb->page_addr[1] != -1)
4505 cross_page++;
4506 if (tb->tb_next_offset[0] != 0xffff) {
4507 direct_jmp_count++;
4508 if (tb->tb_next_offset[1] != 0xffff) {
4509 direct_jmp2_count++;
4510 }
4511 }
4512 }
4513 /* XXX: avoid using doubles ? */
4514 cpu_fprintf(f, "Translation buffer state:\n");
4515 cpu_fprintf(f, "gen code size %td/%ld\n",
4516 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4517 cpu_fprintf(f, "TB count %d/%d\n",
4518 nb_tbs, code_gen_max_blocks);
4519 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4520 nb_tbs ? target_code_size / nb_tbs : 0,
4521 max_target_code_size);
4522 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4523 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4524 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4525 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4526 cross_page,
4527 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4528 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4529 direct_jmp_count,
4530 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4531 direct_jmp2_count,
4532 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4533 cpu_fprintf(f, "\nStatistics:\n");
4534 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4535 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4536 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4537 tcg_dump_info(f, cpu_fprintf);
4538 }
4539
4540 #define MMUSUFFIX _cmmu
4541 #define GETPC() NULL
4542 #define env cpu_single_env
4543 #define SOFTMMU_CODE_ACCESS
4544
4545 #define SHIFT 0
4546 #include "softmmu_template.h"
4547
4548 #define SHIFT 1
4549 #include "softmmu_template.h"
4550
4551 #define SHIFT 2
4552 #include "softmmu_template.h"
4553
4554 #define SHIFT 3
4555 #include "softmmu_template.h"
4556
4557 #undef env
4558
4559 #endif