]> git.proxmox.com Git - qemu.git/blob - exec.c
Fix wrong region_offset when overlaying a page with another
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "tcg.h"
30 #include "hw/hw.h"
31 #include "hw/qdev.h"
32 #include "osdep.h"
33 #include "kvm.h"
34 #include "hw/xen.h"
35 #include "qemu-timer.h"
36 #include "memory.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
39 #include <qemu.h>
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
45 #include <sys/time.h>
46 #include <sys/proc.h>
47 #include <machine/profile.h>
48 #define _KERNEL
49 #include <sys/user.h>
50 #undef _KERNEL
51 #undef sigqueue
52 #include <libutil.h>
53 #endif
54 #endif
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
57 #include "trace.h"
58 #endif
59
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
62
63 //#define DEBUG_TB_INVALIDATE
64 //#define DEBUG_FLUSH
65 //#define DEBUG_TLB
66 //#define DEBUG_UNASSIGNED
67
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
71
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
74
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
77 #undef DEBUG_TB_CHECK
78 #endif
79
80 #define SMC_BITMAP_USE_THRESHOLD 10
81
82 static TranslationBlock *tbs;
83 static int code_gen_max_blocks;
84 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 static int nb_tbs;
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
96 #elif defined(_WIN32)
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
100 #else
101 #define code_gen_section \
102 __attribute__((aligned (32)))
103 #endif
104
105 uint8_t code_gen_prologue[1024] code_gen_section;
106 static uint8_t *code_gen_buffer;
107 static unsigned long code_gen_buffer_size;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size;
110 static uint8_t *code_gen_ptr;
111
112 #if !defined(CONFIG_USER_ONLY)
113 int phys_ram_fd;
114 static int in_migration;
115
116 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
117
118 static MemoryRegion *system_memory;
119 static MemoryRegion *system_io;
120
121 #endif
122
123 CPUState *first_cpu;
124 /* current CPU in the current thread. It is only valid inside
125 cpu_exec() */
126 DEFINE_TLS(CPUState *,cpu_single_env);
127 /* 0 = Do not count executed instructions.
128 1 = Precise instruction counting.
129 2 = Adaptive rate instruction counting. */
130 int use_icount = 0;
131
132 typedef struct PageDesc {
133 /* list of TBs intersecting this ram page */
134 TranslationBlock *first_tb;
135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count;
138 uint8_t *code_bitmap;
139 #if defined(CONFIG_USER_ONLY)
140 unsigned long flags;
141 #endif
142 } PageDesc;
143
144 /* In system mode we want L1_MAP to be based on ram offsets,
145 while in user mode we want it to be based on virtual addresses. */
146 #if !defined(CONFIG_USER_ONLY)
147 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
149 #else
150 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
151 #endif
152 #else
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
154 #endif
155
156 /* Size of the L2 (and L3, etc) page tables. */
157 #define L2_BITS 10
158 #define L2_SIZE (1 << L2_BITS)
159
160 /* The bits remaining after N lower levels of page tables. */
161 #define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163 #define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165
166 /* Size of the L1 page table. Avoid silly small sizes. */
167 #if P_L1_BITS_REM < 4
168 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
169 #else
170 #define P_L1_BITS P_L1_BITS_REM
171 #endif
172
173 #if V_L1_BITS_REM < 4
174 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
175 #else
176 #define V_L1_BITS V_L1_BITS_REM
177 #endif
178
179 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
181
182 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
184
185 unsigned long qemu_real_host_page_size;
186 unsigned long qemu_host_page_size;
187 unsigned long qemu_host_page_mask;
188
189 /* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191 static void *l1_map[V_L1_SIZE];
192
193 #if !defined(CONFIG_USER_ONLY)
194 typedef struct PhysPageDesc {
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset;
197 ram_addr_t region_offset;
198 } PhysPageDesc;
199
200 /* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202 static void *l1_phys_map[P_L1_SIZE];
203
204 static void io_mem_init(void);
205 static void memory_map_init(void);
206
207 /* io memory support */
208 CPUWriteMemoryFunc *_io_mem_write[IO_MEM_NB_ENTRIES][4];
209 CPUReadMemoryFunc *_io_mem_read[IO_MEM_NB_ENTRIES][4];
210 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
211 static char io_mem_used[IO_MEM_NB_ENTRIES];
212 static int io_mem_watch;
213 #endif
214
215 /* log support */
216 #ifdef WIN32
217 static const char *logfilename = "qemu.log";
218 #else
219 static const char *logfilename = "/tmp/qemu.log";
220 #endif
221 FILE *logfile;
222 int loglevel;
223 static int log_append = 0;
224
225 /* statistics */
226 #if !defined(CONFIG_USER_ONLY)
227 static int tlb_flush_count;
228 #endif
229 static int tb_flush_count;
230 static int tb_phys_invalidate_count;
231
232 #ifdef _WIN32
233 static void map_exec(void *addr, long size)
234 {
235 DWORD old_protect;
236 VirtualProtect(addr, size,
237 PAGE_EXECUTE_READWRITE, &old_protect);
238
239 }
240 #else
241 static void map_exec(void *addr, long size)
242 {
243 unsigned long start, end, page_size;
244
245 page_size = getpagesize();
246 start = (unsigned long)addr;
247 start &= ~(page_size - 1);
248
249 end = (unsigned long)addr + size;
250 end += page_size - 1;
251 end &= ~(page_size - 1);
252
253 mprotect((void *)start, end - start,
254 PROT_READ | PROT_WRITE | PROT_EXEC);
255 }
256 #endif
257
258 static void page_init(void)
259 {
260 /* NOTE: we can always suppose that qemu_host_page_size >=
261 TARGET_PAGE_SIZE */
262 #ifdef _WIN32
263 {
264 SYSTEM_INFO system_info;
265
266 GetSystemInfo(&system_info);
267 qemu_real_host_page_size = system_info.dwPageSize;
268 }
269 #else
270 qemu_real_host_page_size = getpagesize();
271 #endif
272 if (qemu_host_page_size == 0)
273 qemu_host_page_size = qemu_real_host_page_size;
274 if (qemu_host_page_size < TARGET_PAGE_SIZE)
275 qemu_host_page_size = TARGET_PAGE_SIZE;
276 qemu_host_page_mask = ~(qemu_host_page_size - 1);
277
278 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
279 {
280 #ifdef HAVE_KINFO_GETVMMAP
281 struct kinfo_vmentry *freep;
282 int i, cnt;
283
284 freep = kinfo_getvmmap(getpid(), &cnt);
285 if (freep) {
286 mmap_lock();
287 for (i = 0; i < cnt; i++) {
288 unsigned long startaddr, endaddr;
289
290 startaddr = freep[i].kve_start;
291 endaddr = freep[i].kve_end;
292 if (h2g_valid(startaddr)) {
293 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
294
295 if (h2g_valid(endaddr)) {
296 endaddr = h2g(endaddr);
297 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
298 } else {
299 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
300 endaddr = ~0ul;
301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
302 #endif
303 }
304 }
305 }
306 free(freep);
307 mmap_unlock();
308 }
309 #else
310 FILE *f;
311
312 last_brk = (unsigned long)sbrk(0);
313
314 f = fopen("/compat/linux/proc/self/maps", "r");
315 if (f) {
316 mmap_lock();
317
318 do {
319 unsigned long startaddr, endaddr;
320 int n;
321
322 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
323
324 if (n == 2 && h2g_valid(startaddr)) {
325 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
326
327 if (h2g_valid(endaddr)) {
328 endaddr = h2g(endaddr);
329 } else {
330 endaddr = ~0ul;
331 }
332 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
333 }
334 } while (!feof(f));
335
336 fclose(f);
337 mmap_unlock();
338 }
339 #endif
340 }
341 #endif
342 }
343
344 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
345 {
346 PageDesc *pd;
347 void **lp;
348 int i;
349
350 #if defined(CONFIG_USER_ONLY)
351 /* We can't use g_malloc because it may recurse into a locked mutex. */
352 # define ALLOC(P, SIZE) \
353 do { \
354 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
355 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
356 } while (0)
357 #else
358 # define ALLOC(P, SIZE) \
359 do { P = g_malloc0(SIZE); } while (0)
360 #endif
361
362 /* Level 1. Always allocated. */
363 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
364
365 /* Level 2..N-1. */
366 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
367 void **p = *lp;
368
369 if (p == NULL) {
370 if (!alloc) {
371 return NULL;
372 }
373 ALLOC(p, sizeof(void *) * L2_SIZE);
374 *lp = p;
375 }
376
377 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
378 }
379
380 pd = *lp;
381 if (pd == NULL) {
382 if (!alloc) {
383 return NULL;
384 }
385 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
386 *lp = pd;
387 }
388
389 #undef ALLOC
390
391 return pd + (index & (L2_SIZE - 1));
392 }
393
394 static inline PageDesc *page_find(tb_page_addr_t index)
395 {
396 return page_find_alloc(index, 0);
397 }
398
399 #if !defined(CONFIG_USER_ONLY)
400 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
401 {
402 PhysPageDesc *pd;
403 void **lp;
404 int i;
405
406 /* Level 1. Always allocated. */
407 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
408
409 /* Level 2..N-1. */
410 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
411 void **p = *lp;
412 if (p == NULL) {
413 if (!alloc) {
414 return NULL;
415 }
416 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
417 }
418 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
419 }
420
421 pd = *lp;
422 if (pd == NULL) {
423 int i;
424 int first_index = index & ~(L2_SIZE - 1);
425
426 if (!alloc) {
427 return NULL;
428 }
429
430 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
431
432 for (i = 0; i < L2_SIZE; i++) {
433 pd[i].phys_offset = IO_MEM_UNASSIGNED;
434 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
435 }
436 }
437
438 return pd + (index & (L2_SIZE - 1));
439 }
440
441 static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
442 {
443 PhysPageDesc *p = phys_page_find_alloc(index, 0);
444
445 if (p) {
446 return *p;
447 } else {
448 return (PhysPageDesc) {
449 .phys_offset = IO_MEM_UNASSIGNED,
450 .region_offset = index << TARGET_PAGE_BITS,
451 };
452 }
453 }
454
455 static void tlb_protect_code(ram_addr_t ram_addr);
456 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
457 target_ulong vaddr);
458 #define mmap_lock() do { } while(0)
459 #define mmap_unlock() do { } while(0)
460 #endif
461
462 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
463
464 #if defined(CONFIG_USER_ONLY)
465 /* Currently it is not recommended to allocate big chunks of data in
466 user mode. It will change when a dedicated libc will be used */
467 #define USE_STATIC_CODE_GEN_BUFFER
468 #endif
469
470 #ifdef USE_STATIC_CODE_GEN_BUFFER
471 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
472 __attribute__((aligned (CODE_GEN_ALIGN)));
473 #endif
474
475 static void code_gen_alloc(unsigned long tb_size)
476 {
477 #ifdef USE_STATIC_CODE_GEN_BUFFER
478 code_gen_buffer = static_code_gen_buffer;
479 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
480 map_exec(code_gen_buffer, code_gen_buffer_size);
481 #else
482 code_gen_buffer_size = tb_size;
483 if (code_gen_buffer_size == 0) {
484 #if defined(CONFIG_USER_ONLY)
485 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
486 #else
487 /* XXX: needs adjustments */
488 code_gen_buffer_size = (unsigned long)(ram_size / 4);
489 #endif
490 }
491 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
492 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
493 /* The code gen buffer location may have constraints depending on
494 the host cpu and OS */
495 #if defined(__linux__)
496 {
497 int flags;
498 void *start = NULL;
499
500 flags = MAP_PRIVATE | MAP_ANONYMOUS;
501 #if defined(__x86_64__)
502 flags |= MAP_32BIT;
503 /* Cannot map more than that */
504 if (code_gen_buffer_size > (800 * 1024 * 1024))
505 code_gen_buffer_size = (800 * 1024 * 1024);
506 #elif defined(__sparc_v9__)
507 // Map the buffer below 2G, so we can use direct calls and branches
508 flags |= MAP_FIXED;
509 start = (void *) 0x60000000UL;
510 if (code_gen_buffer_size > (512 * 1024 * 1024))
511 code_gen_buffer_size = (512 * 1024 * 1024);
512 #elif defined(__arm__)
513 /* Keep the buffer no bigger than 16GB to branch between blocks */
514 if (code_gen_buffer_size > 16 * 1024 * 1024)
515 code_gen_buffer_size = 16 * 1024 * 1024;
516 #elif defined(__s390x__)
517 /* Map the buffer so that we can use direct calls and branches. */
518 /* We have a +- 4GB range on the branches; leave some slop. */
519 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
520 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
521 }
522 start = (void *)0x90000000UL;
523 #endif
524 code_gen_buffer = mmap(start, code_gen_buffer_size,
525 PROT_WRITE | PROT_READ | PROT_EXEC,
526 flags, -1, 0);
527 if (code_gen_buffer == MAP_FAILED) {
528 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
529 exit(1);
530 }
531 }
532 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
533 || defined(__DragonFly__) || defined(__OpenBSD__) \
534 || defined(__NetBSD__)
535 {
536 int flags;
537 void *addr = NULL;
538 flags = MAP_PRIVATE | MAP_ANONYMOUS;
539 #if defined(__x86_64__)
540 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
541 * 0x40000000 is free */
542 flags |= MAP_FIXED;
543 addr = (void *)0x40000000;
544 /* Cannot map more than that */
545 if (code_gen_buffer_size > (800 * 1024 * 1024))
546 code_gen_buffer_size = (800 * 1024 * 1024);
547 #elif defined(__sparc_v9__)
548 // Map the buffer below 2G, so we can use direct calls and branches
549 flags |= MAP_FIXED;
550 addr = (void *) 0x60000000UL;
551 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
552 code_gen_buffer_size = (512 * 1024 * 1024);
553 }
554 #endif
555 code_gen_buffer = mmap(addr, code_gen_buffer_size,
556 PROT_WRITE | PROT_READ | PROT_EXEC,
557 flags, -1, 0);
558 if (code_gen_buffer == MAP_FAILED) {
559 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
560 exit(1);
561 }
562 }
563 #else
564 code_gen_buffer = g_malloc(code_gen_buffer_size);
565 map_exec(code_gen_buffer, code_gen_buffer_size);
566 #endif
567 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
568 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
569 code_gen_buffer_max_size = code_gen_buffer_size -
570 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
571 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
572 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
573 }
574
575 /* Must be called before using the QEMU cpus. 'tb_size' is the size
576 (in bytes) allocated to the translation buffer. Zero means default
577 size. */
578 void tcg_exec_init(unsigned long tb_size)
579 {
580 cpu_gen_init();
581 code_gen_alloc(tb_size);
582 code_gen_ptr = code_gen_buffer;
583 page_init();
584 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
585 /* There's no guest base to take into account, so go ahead and
586 initialize the prologue now. */
587 tcg_prologue_init(&tcg_ctx);
588 #endif
589 }
590
591 bool tcg_enabled(void)
592 {
593 return code_gen_buffer != NULL;
594 }
595
596 void cpu_exec_init_all(void)
597 {
598 #if !defined(CONFIG_USER_ONLY)
599 memory_map_init();
600 io_mem_init();
601 #endif
602 }
603
604 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
605
606 static int cpu_common_post_load(void *opaque, int version_id)
607 {
608 CPUState *env = opaque;
609
610 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
611 version_id is increased. */
612 env->interrupt_request &= ~0x01;
613 tlb_flush(env, 1);
614
615 return 0;
616 }
617
618 static const VMStateDescription vmstate_cpu_common = {
619 .name = "cpu_common",
620 .version_id = 1,
621 .minimum_version_id = 1,
622 .minimum_version_id_old = 1,
623 .post_load = cpu_common_post_load,
624 .fields = (VMStateField []) {
625 VMSTATE_UINT32(halted, CPUState),
626 VMSTATE_UINT32(interrupt_request, CPUState),
627 VMSTATE_END_OF_LIST()
628 }
629 };
630 #endif
631
632 CPUState *qemu_get_cpu(int cpu)
633 {
634 CPUState *env = first_cpu;
635
636 while (env) {
637 if (env->cpu_index == cpu)
638 break;
639 env = env->next_cpu;
640 }
641
642 return env;
643 }
644
645 void cpu_exec_init(CPUState *env)
646 {
647 CPUState **penv;
648 int cpu_index;
649
650 #if defined(CONFIG_USER_ONLY)
651 cpu_list_lock();
652 #endif
653 env->next_cpu = NULL;
654 penv = &first_cpu;
655 cpu_index = 0;
656 while (*penv != NULL) {
657 penv = &(*penv)->next_cpu;
658 cpu_index++;
659 }
660 env->cpu_index = cpu_index;
661 env->numa_node = 0;
662 QTAILQ_INIT(&env->breakpoints);
663 QTAILQ_INIT(&env->watchpoints);
664 #ifndef CONFIG_USER_ONLY
665 env->thread_id = qemu_get_thread_id();
666 #endif
667 *penv = env;
668 #if defined(CONFIG_USER_ONLY)
669 cpu_list_unlock();
670 #endif
671 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
672 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
673 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
674 cpu_save, cpu_load, env);
675 #endif
676 }
677
678 /* Allocate a new translation block. Flush the translation buffer if
679 too many translation blocks or too much generated code. */
680 static TranslationBlock *tb_alloc(target_ulong pc)
681 {
682 TranslationBlock *tb;
683
684 if (nb_tbs >= code_gen_max_blocks ||
685 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
686 return NULL;
687 tb = &tbs[nb_tbs++];
688 tb->pc = pc;
689 tb->cflags = 0;
690 return tb;
691 }
692
693 void tb_free(TranslationBlock *tb)
694 {
695 /* In practice this is mostly used for single use temporary TB
696 Ignore the hard cases and just back up if this TB happens to
697 be the last one generated. */
698 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
699 code_gen_ptr = tb->tc_ptr;
700 nb_tbs--;
701 }
702 }
703
704 static inline void invalidate_page_bitmap(PageDesc *p)
705 {
706 if (p->code_bitmap) {
707 g_free(p->code_bitmap);
708 p->code_bitmap = NULL;
709 }
710 p->code_write_count = 0;
711 }
712
713 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
714
715 static void page_flush_tb_1 (int level, void **lp)
716 {
717 int i;
718
719 if (*lp == NULL) {
720 return;
721 }
722 if (level == 0) {
723 PageDesc *pd = *lp;
724 for (i = 0; i < L2_SIZE; ++i) {
725 pd[i].first_tb = NULL;
726 invalidate_page_bitmap(pd + i);
727 }
728 } else {
729 void **pp = *lp;
730 for (i = 0; i < L2_SIZE; ++i) {
731 page_flush_tb_1 (level - 1, pp + i);
732 }
733 }
734 }
735
736 static void page_flush_tb(void)
737 {
738 int i;
739 for (i = 0; i < V_L1_SIZE; i++) {
740 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
741 }
742 }
743
744 /* flush all the translation blocks */
745 /* XXX: tb_flush is currently not thread safe */
746 void tb_flush(CPUState *env1)
747 {
748 CPUState *env;
749 #if defined(DEBUG_FLUSH)
750 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
751 (unsigned long)(code_gen_ptr - code_gen_buffer),
752 nb_tbs, nb_tbs > 0 ?
753 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
754 #endif
755 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
756 cpu_abort(env1, "Internal error: code buffer overflow\n");
757
758 nb_tbs = 0;
759
760 for(env = first_cpu; env != NULL; env = env->next_cpu) {
761 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
762 }
763
764 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
765 page_flush_tb();
766
767 code_gen_ptr = code_gen_buffer;
768 /* XXX: flush processor icache at this point if cache flush is
769 expensive */
770 tb_flush_count++;
771 }
772
773 #ifdef DEBUG_TB_CHECK
774
775 static void tb_invalidate_check(target_ulong address)
776 {
777 TranslationBlock *tb;
778 int i;
779 address &= TARGET_PAGE_MASK;
780 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
781 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
782 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
783 address >= tb->pc + tb->size)) {
784 printf("ERROR invalidate: address=" TARGET_FMT_lx
785 " PC=%08lx size=%04x\n",
786 address, (long)tb->pc, tb->size);
787 }
788 }
789 }
790 }
791
792 /* verify that all the pages have correct rights for code */
793 static void tb_page_check(void)
794 {
795 TranslationBlock *tb;
796 int i, flags1, flags2;
797
798 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
799 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
800 flags1 = page_get_flags(tb->pc);
801 flags2 = page_get_flags(tb->pc + tb->size - 1);
802 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
803 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
804 (long)tb->pc, tb->size, flags1, flags2);
805 }
806 }
807 }
808 }
809
810 #endif
811
812 /* invalidate one TB */
813 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
814 int next_offset)
815 {
816 TranslationBlock *tb1;
817 for(;;) {
818 tb1 = *ptb;
819 if (tb1 == tb) {
820 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
821 break;
822 }
823 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
824 }
825 }
826
827 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
828 {
829 TranslationBlock *tb1;
830 unsigned int n1;
831
832 for(;;) {
833 tb1 = *ptb;
834 n1 = (long)tb1 & 3;
835 tb1 = (TranslationBlock *)((long)tb1 & ~3);
836 if (tb1 == tb) {
837 *ptb = tb1->page_next[n1];
838 break;
839 }
840 ptb = &tb1->page_next[n1];
841 }
842 }
843
844 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
845 {
846 TranslationBlock *tb1, **ptb;
847 unsigned int n1;
848
849 ptb = &tb->jmp_next[n];
850 tb1 = *ptb;
851 if (tb1) {
852 /* find tb(n) in circular list */
853 for(;;) {
854 tb1 = *ptb;
855 n1 = (long)tb1 & 3;
856 tb1 = (TranslationBlock *)((long)tb1 & ~3);
857 if (n1 == n && tb1 == tb)
858 break;
859 if (n1 == 2) {
860 ptb = &tb1->jmp_first;
861 } else {
862 ptb = &tb1->jmp_next[n1];
863 }
864 }
865 /* now we can suppress tb(n) from the list */
866 *ptb = tb->jmp_next[n];
867
868 tb->jmp_next[n] = NULL;
869 }
870 }
871
872 /* reset the jump entry 'n' of a TB so that it is not chained to
873 another TB */
874 static inline void tb_reset_jump(TranslationBlock *tb, int n)
875 {
876 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
877 }
878
879 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
880 {
881 CPUState *env;
882 PageDesc *p;
883 unsigned int h, n1;
884 tb_page_addr_t phys_pc;
885 TranslationBlock *tb1, *tb2;
886
887 /* remove the TB from the hash list */
888 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
889 h = tb_phys_hash_func(phys_pc);
890 tb_remove(&tb_phys_hash[h], tb,
891 offsetof(TranslationBlock, phys_hash_next));
892
893 /* remove the TB from the page list */
894 if (tb->page_addr[0] != page_addr) {
895 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
896 tb_page_remove(&p->first_tb, tb);
897 invalidate_page_bitmap(p);
898 }
899 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
900 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
901 tb_page_remove(&p->first_tb, tb);
902 invalidate_page_bitmap(p);
903 }
904
905 tb_invalidated_flag = 1;
906
907 /* remove the TB from the hash list */
908 h = tb_jmp_cache_hash_func(tb->pc);
909 for(env = first_cpu; env != NULL; env = env->next_cpu) {
910 if (env->tb_jmp_cache[h] == tb)
911 env->tb_jmp_cache[h] = NULL;
912 }
913
914 /* suppress this TB from the two jump lists */
915 tb_jmp_remove(tb, 0);
916 tb_jmp_remove(tb, 1);
917
918 /* suppress any remaining jumps to this TB */
919 tb1 = tb->jmp_first;
920 for(;;) {
921 n1 = (long)tb1 & 3;
922 if (n1 == 2)
923 break;
924 tb1 = (TranslationBlock *)((long)tb1 & ~3);
925 tb2 = tb1->jmp_next[n1];
926 tb_reset_jump(tb1, n1);
927 tb1->jmp_next[n1] = NULL;
928 tb1 = tb2;
929 }
930 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
931
932 tb_phys_invalidate_count++;
933 }
934
935 static inline void set_bits(uint8_t *tab, int start, int len)
936 {
937 int end, mask, end1;
938
939 end = start + len;
940 tab += start >> 3;
941 mask = 0xff << (start & 7);
942 if ((start & ~7) == (end & ~7)) {
943 if (start < end) {
944 mask &= ~(0xff << (end & 7));
945 *tab |= mask;
946 }
947 } else {
948 *tab++ |= mask;
949 start = (start + 8) & ~7;
950 end1 = end & ~7;
951 while (start < end1) {
952 *tab++ = 0xff;
953 start += 8;
954 }
955 if (start < end) {
956 mask = ~(0xff << (end & 7));
957 *tab |= mask;
958 }
959 }
960 }
961
962 static void build_page_bitmap(PageDesc *p)
963 {
964 int n, tb_start, tb_end;
965 TranslationBlock *tb;
966
967 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
968
969 tb = p->first_tb;
970 while (tb != NULL) {
971 n = (long)tb & 3;
972 tb = (TranslationBlock *)((long)tb & ~3);
973 /* NOTE: this is subtle as a TB may span two physical pages */
974 if (n == 0) {
975 /* NOTE: tb_end may be after the end of the page, but
976 it is not a problem */
977 tb_start = tb->pc & ~TARGET_PAGE_MASK;
978 tb_end = tb_start + tb->size;
979 if (tb_end > TARGET_PAGE_SIZE)
980 tb_end = TARGET_PAGE_SIZE;
981 } else {
982 tb_start = 0;
983 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
984 }
985 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
986 tb = tb->page_next[n];
987 }
988 }
989
990 TranslationBlock *tb_gen_code(CPUState *env,
991 target_ulong pc, target_ulong cs_base,
992 int flags, int cflags)
993 {
994 TranslationBlock *tb;
995 uint8_t *tc_ptr;
996 tb_page_addr_t phys_pc, phys_page2;
997 target_ulong virt_page2;
998 int code_gen_size;
999
1000 phys_pc = get_page_addr_code(env, pc);
1001 tb = tb_alloc(pc);
1002 if (!tb) {
1003 /* flush must be done */
1004 tb_flush(env);
1005 /* cannot fail at this point */
1006 tb = tb_alloc(pc);
1007 /* Don't forget to invalidate previous TB info. */
1008 tb_invalidated_flag = 1;
1009 }
1010 tc_ptr = code_gen_ptr;
1011 tb->tc_ptr = tc_ptr;
1012 tb->cs_base = cs_base;
1013 tb->flags = flags;
1014 tb->cflags = cflags;
1015 cpu_gen_code(env, tb, &code_gen_size);
1016 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1017
1018 /* check next page if needed */
1019 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1020 phys_page2 = -1;
1021 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1022 phys_page2 = get_page_addr_code(env, virt_page2);
1023 }
1024 tb_link_page(tb, phys_pc, phys_page2);
1025 return tb;
1026 }
1027
1028 /* invalidate all TBs which intersect with the target physical page
1029 starting in range [start;end[. NOTE: start and end must refer to
1030 the same physical page. 'is_cpu_write_access' should be true if called
1031 from a real cpu write access: the virtual CPU will exit the current
1032 TB if code is modified inside this TB. */
1033 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1034 int is_cpu_write_access)
1035 {
1036 TranslationBlock *tb, *tb_next, *saved_tb;
1037 CPUState *env = cpu_single_env;
1038 tb_page_addr_t tb_start, tb_end;
1039 PageDesc *p;
1040 int n;
1041 #ifdef TARGET_HAS_PRECISE_SMC
1042 int current_tb_not_found = is_cpu_write_access;
1043 TranslationBlock *current_tb = NULL;
1044 int current_tb_modified = 0;
1045 target_ulong current_pc = 0;
1046 target_ulong current_cs_base = 0;
1047 int current_flags = 0;
1048 #endif /* TARGET_HAS_PRECISE_SMC */
1049
1050 p = page_find(start >> TARGET_PAGE_BITS);
1051 if (!p)
1052 return;
1053 if (!p->code_bitmap &&
1054 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1055 is_cpu_write_access) {
1056 /* build code bitmap */
1057 build_page_bitmap(p);
1058 }
1059
1060 /* we remove all the TBs in the range [start, end[ */
1061 /* XXX: see if in some cases it could be faster to invalidate all the code */
1062 tb = p->first_tb;
1063 while (tb != NULL) {
1064 n = (long)tb & 3;
1065 tb = (TranslationBlock *)((long)tb & ~3);
1066 tb_next = tb->page_next[n];
1067 /* NOTE: this is subtle as a TB may span two physical pages */
1068 if (n == 0) {
1069 /* NOTE: tb_end may be after the end of the page, but
1070 it is not a problem */
1071 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1072 tb_end = tb_start + tb->size;
1073 } else {
1074 tb_start = tb->page_addr[1];
1075 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1076 }
1077 if (!(tb_end <= start || tb_start >= end)) {
1078 #ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb_not_found) {
1080 current_tb_not_found = 0;
1081 current_tb = NULL;
1082 if (env->mem_io_pc) {
1083 /* now we have a real cpu fault */
1084 current_tb = tb_find_pc(env->mem_io_pc);
1085 }
1086 }
1087 if (current_tb == tb &&
1088 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1089 /* If we are modifying the current TB, we must stop
1090 its execution. We could be more precise by checking
1091 that the modification is after the current PC, but it
1092 would require a specialized function to partially
1093 restore the CPU state */
1094
1095 current_tb_modified = 1;
1096 cpu_restore_state(current_tb, env, env->mem_io_pc);
1097 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1098 &current_flags);
1099 }
1100 #endif /* TARGET_HAS_PRECISE_SMC */
1101 /* we need to do that to handle the case where a signal
1102 occurs while doing tb_phys_invalidate() */
1103 saved_tb = NULL;
1104 if (env) {
1105 saved_tb = env->current_tb;
1106 env->current_tb = NULL;
1107 }
1108 tb_phys_invalidate(tb, -1);
1109 if (env) {
1110 env->current_tb = saved_tb;
1111 if (env->interrupt_request && env->current_tb)
1112 cpu_interrupt(env, env->interrupt_request);
1113 }
1114 }
1115 tb = tb_next;
1116 }
1117 #if !defined(CONFIG_USER_ONLY)
1118 /* if no code remaining, no need to continue to use slow writes */
1119 if (!p->first_tb) {
1120 invalidate_page_bitmap(p);
1121 if (is_cpu_write_access) {
1122 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1123 }
1124 }
1125 #endif
1126 #ifdef TARGET_HAS_PRECISE_SMC
1127 if (current_tb_modified) {
1128 /* we generate a block containing just the instruction
1129 modifying the memory. It will ensure that it cannot modify
1130 itself */
1131 env->current_tb = NULL;
1132 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1133 cpu_resume_from_signal(env, NULL);
1134 }
1135 #endif
1136 }
1137
1138 /* len must be <= 8 and start must be a multiple of len */
1139 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1140 {
1141 PageDesc *p;
1142 int offset, b;
1143 #if 0
1144 if (1) {
1145 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1146 cpu_single_env->mem_io_vaddr, len,
1147 cpu_single_env->eip,
1148 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1149 }
1150 #endif
1151 p = page_find(start >> TARGET_PAGE_BITS);
1152 if (!p)
1153 return;
1154 if (p->code_bitmap) {
1155 offset = start & ~TARGET_PAGE_MASK;
1156 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1157 if (b & ((1 << len) - 1))
1158 goto do_invalidate;
1159 } else {
1160 do_invalidate:
1161 tb_invalidate_phys_page_range(start, start + len, 1);
1162 }
1163 }
1164
1165 #if !defined(CONFIG_SOFTMMU)
1166 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1167 unsigned long pc, void *puc)
1168 {
1169 TranslationBlock *tb;
1170 PageDesc *p;
1171 int n;
1172 #ifdef TARGET_HAS_PRECISE_SMC
1173 TranslationBlock *current_tb = NULL;
1174 CPUState *env = cpu_single_env;
1175 int current_tb_modified = 0;
1176 target_ulong current_pc = 0;
1177 target_ulong current_cs_base = 0;
1178 int current_flags = 0;
1179 #endif
1180
1181 addr &= TARGET_PAGE_MASK;
1182 p = page_find(addr >> TARGET_PAGE_BITS);
1183 if (!p)
1184 return;
1185 tb = p->first_tb;
1186 #ifdef TARGET_HAS_PRECISE_SMC
1187 if (tb && pc != 0) {
1188 current_tb = tb_find_pc(pc);
1189 }
1190 #endif
1191 while (tb != NULL) {
1192 n = (long)tb & 3;
1193 tb = (TranslationBlock *)((long)tb & ~3);
1194 #ifdef TARGET_HAS_PRECISE_SMC
1195 if (current_tb == tb &&
1196 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1197 /* If we are modifying the current TB, we must stop
1198 its execution. We could be more precise by checking
1199 that the modification is after the current PC, but it
1200 would require a specialized function to partially
1201 restore the CPU state */
1202
1203 current_tb_modified = 1;
1204 cpu_restore_state(current_tb, env, pc);
1205 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1206 &current_flags);
1207 }
1208 #endif /* TARGET_HAS_PRECISE_SMC */
1209 tb_phys_invalidate(tb, addr);
1210 tb = tb->page_next[n];
1211 }
1212 p->first_tb = NULL;
1213 #ifdef TARGET_HAS_PRECISE_SMC
1214 if (current_tb_modified) {
1215 /* we generate a block containing just the instruction
1216 modifying the memory. It will ensure that it cannot modify
1217 itself */
1218 env->current_tb = NULL;
1219 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1220 cpu_resume_from_signal(env, puc);
1221 }
1222 #endif
1223 }
1224 #endif
1225
1226 /* add the tb in the target page and protect it if necessary */
1227 static inline void tb_alloc_page(TranslationBlock *tb,
1228 unsigned int n, tb_page_addr_t page_addr)
1229 {
1230 PageDesc *p;
1231 #ifndef CONFIG_USER_ONLY
1232 bool page_already_protected;
1233 #endif
1234
1235 tb->page_addr[n] = page_addr;
1236 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1237 tb->page_next[n] = p->first_tb;
1238 #ifndef CONFIG_USER_ONLY
1239 page_already_protected = p->first_tb != NULL;
1240 #endif
1241 p->first_tb = (TranslationBlock *)((long)tb | n);
1242 invalidate_page_bitmap(p);
1243
1244 #if defined(TARGET_HAS_SMC) || 1
1245
1246 #if defined(CONFIG_USER_ONLY)
1247 if (p->flags & PAGE_WRITE) {
1248 target_ulong addr;
1249 PageDesc *p2;
1250 int prot;
1251
1252 /* force the host page as non writable (writes will have a
1253 page fault + mprotect overhead) */
1254 page_addr &= qemu_host_page_mask;
1255 prot = 0;
1256 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1257 addr += TARGET_PAGE_SIZE) {
1258
1259 p2 = page_find (addr >> TARGET_PAGE_BITS);
1260 if (!p2)
1261 continue;
1262 prot |= p2->flags;
1263 p2->flags &= ~PAGE_WRITE;
1264 }
1265 mprotect(g2h(page_addr), qemu_host_page_size,
1266 (prot & PAGE_BITS) & ~PAGE_WRITE);
1267 #ifdef DEBUG_TB_INVALIDATE
1268 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1269 page_addr);
1270 #endif
1271 }
1272 #else
1273 /* if some code is already present, then the pages are already
1274 protected. So we handle the case where only the first TB is
1275 allocated in a physical page */
1276 if (!page_already_protected) {
1277 tlb_protect_code(page_addr);
1278 }
1279 #endif
1280
1281 #endif /* TARGET_HAS_SMC */
1282 }
1283
1284 /* add a new TB and link it to the physical page tables. phys_page2 is
1285 (-1) to indicate that only one page contains the TB. */
1286 void tb_link_page(TranslationBlock *tb,
1287 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1288 {
1289 unsigned int h;
1290 TranslationBlock **ptb;
1291
1292 /* Grab the mmap lock to stop another thread invalidating this TB
1293 before we are done. */
1294 mmap_lock();
1295 /* add in the physical hash table */
1296 h = tb_phys_hash_func(phys_pc);
1297 ptb = &tb_phys_hash[h];
1298 tb->phys_hash_next = *ptb;
1299 *ptb = tb;
1300
1301 /* add in the page list */
1302 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1303 if (phys_page2 != -1)
1304 tb_alloc_page(tb, 1, phys_page2);
1305 else
1306 tb->page_addr[1] = -1;
1307
1308 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1309 tb->jmp_next[0] = NULL;
1310 tb->jmp_next[1] = NULL;
1311
1312 /* init original jump addresses */
1313 if (tb->tb_next_offset[0] != 0xffff)
1314 tb_reset_jump(tb, 0);
1315 if (tb->tb_next_offset[1] != 0xffff)
1316 tb_reset_jump(tb, 1);
1317
1318 #ifdef DEBUG_TB_CHECK
1319 tb_page_check();
1320 #endif
1321 mmap_unlock();
1322 }
1323
1324 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1325 tb[1].tc_ptr. Return NULL if not found */
1326 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1327 {
1328 int m_min, m_max, m;
1329 unsigned long v;
1330 TranslationBlock *tb;
1331
1332 if (nb_tbs <= 0)
1333 return NULL;
1334 if (tc_ptr < (unsigned long)code_gen_buffer ||
1335 tc_ptr >= (unsigned long)code_gen_ptr)
1336 return NULL;
1337 /* binary search (cf Knuth) */
1338 m_min = 0;
1339 m_max = nb_tbs - 1;
1340 while (m_min <= m_max) {
1341 m = (m_min + m_max) >> 1;
1342 tb = &tbs[m];
1343 v = (unsigned long)tb->tc_ptr;
1344 if (v == tc_ptr)
1345 return tb;
1346 else if (tc_ptr < v) {
1347 m_max = m - 1;
1348 } else {
1349 m_min = m + 1;
1350 }
1351 }
1352 return &tbs[m_max];
1353 }
1354
1355 static void tb_reset_jump_recursive(TranslationBlock *tb);
1356
1357 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1358 {
1359 TranslationBlock *tb1, *tb_next, **ptb;
1360 unsigned int n1;
1361
1362 tb1 = tb->jmp_next[n];
1363 if (tb1 != NULL) {
1364 /* find head of list */
1365 for(;;) {
1366 n1 = (long)tb1 & 3;
1367 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1368 if (n1 == 2)
1369 break;
1370 tb1 = tb1->jmp_next[n1];
1371 }
1372 /* we are now sure now that tb jumps to tb1 */
1373 tb_next = tb1;
1374
1375 /* remove tb from the jmp_first list */
1376 ptb = &tb_next->jmp_first;
1377 for(;;) {
1378 tb1 = *ptb;
1379 n1 = (long)tb1 & 3;
1380 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1381 if (n1 == n && tb1 == tb)
1382 break;
1383 ptb = &tb1->jmp_next[n1];
1384 }
1385 *ptb = tb->jmp_next[n];
1386 tb->jmp_next[n] = NULL;
1387
1388 /* suppress the jump to next tb in generated code */
1389 tb_reset_jump(tb, n);
1390
1391 /* suppress jumps in the tb on which we could have jumped */
1392 tb_reset_jump_recursive(tb_next);
1393 }
1394 }
1395
1396 static void tb_reset_jump_recursive(TranslationBlock *tb)
1397 {
1398 tb_reset_jump_recursive2(tb, 0);
1399 tb_reset_jump_recursive2(tb, 1);
1400 }
1401
1402 #if defined(TARGET_HAS_ICE)
1403 #if defined(CONFIG_USER_ONLY)
1404 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1405 {
1406 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1407 }
1408 #else
1409 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1410 {
1411 target_phys_addr_t addr;
1412 target_ulong pd;
1413 ram_addr_t ram_addr;
1414 PhysPageDesc p;
1415
1416 addr = cpu_get_phys_page_debug(env, pc);
1417 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1418 pd = p.phys_offset;
1419 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1420 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1421 }
1422 #endif
1423 #endif /* TARGET_HAS_ICE */
1424
1425 #if defined(CONFIG_USER_ONLY)
1426 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1427
1428 {
1429 }
1430
1431 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1432 int flags, CPUWatchpoint **watchpoint)
1433 {
1434 return -ENOSYS;
1435 }
1436 #else
1437 /* Add a watchpoint. */
1438 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1439 int flags, CPUWatchpoint **watchpoint)
1440 {
1441 target_ulong len_mask = ~(len - 1);
1442 CPUWatchpoint *wp;
1443
1444 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1445 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1446 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1447 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1448 return -EINVAL;
1449 }
1450 wp = g_malloc(sizeof(*wp));
1451
1452 wp->vaddr = addr;
1453 wp->len_mask = len_mask;
1454 wp->flags = flags;
1455
1456 /* keep all GDB-injected watchpoints in front */
1457 if (flags & BP_GDB)
1458 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1459 else
1460 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1461
1462 tlb_flush_page(env, addr);
1463
1464 if (watchpoint)
1465 *watchpoint = wp;
1466 return 0;
1467 }
1468
1469 /* Remove a specific watchpoint. */
1470 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1471 int flags)
1472 {
1473 target_ulong len_mask = ~(len - 1);
1474 CPUWatchpoint *wp;
1475
1476 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1477 if (addr == wp->vaddr && len_mask == wp->len_mask
1478 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1479 cpu_watchpoint_remove_by_ref(env, wp);
1480 return 0;
1481 }
1482 }
1483 return -ENOENT;
1484 }
1485
1486 /* Remove a specific watchpoint by reference. */
1487 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1488 {
1489 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1490
1491 tlb_flush_page(env, watchpoint->vaddr);
1492
1493 g_free(watchpoint);
1494 }
1495
1496 /* Remove all matching watchpoints. */
1497 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1498 {
1499 CPUWatchpoint *wp, *next;
1500
1501 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1502 if (wp->flags & mask)
1503 cpu_watchpoint_remove_by_ref(env, wp);
1504 }
1505 }
1506 #endif
1507
1508 /* Add a breakpoint. */
1509 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1510 CPUBreakpoint **breakpoint)
1511 {
1512 #if defined(TARGET_HAS_ICE)
1513 CPUBreakpoint *bp;
1514
1515 bp = g_malloc(sizeof(*bp));
1516
1517 bp->pc = pc;
1518 bp->flags = flags;
1519
1520 /* keep all GDB-injected breakpoints in front */
1521 if (flags & BP_GDB)
1522 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1523 else
1524 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1525
1526 breakpoint_invalidate(env, pc);
1527
1528 if (breakpoint)
1529 *breakpoint = bp;
1530 return 0;
1531 #else
1532 return -ENOSYS;
1533 #endif
1534 }
1535
1536 /* Remove a specific breakpoint. */
1537 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1538 {
1539 #if defined(TARGET_HAS_ICE)
1540 CPUBreakpoint *bp;
1541
1542 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1543 if (bp->pc == pc && bp->flags == flags) {
1544 cpu_breakpoint_remove_by_ref(env, bp);
1545 return 0;
1546 }
1547 }
1548 return -ENOENT;
1549 #else
1550 return -ENOSYS;
1551 #endif
1552 }
1553
1554 /* Remove a specific breakpoint by reference. */
1555 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1556 {
1557 #if defined(TARGET_HAS_ICE)
1558 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1559
1560 breakpoint_invalidate(env, breakpoint->pc);
1561
1562 g_free(breakpoint);
1563 #endif
1564 }
1565
1566 /* Remove all matching breakpoints. */
1567 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1568 {
1569 #if defined(TARGET_HAS_ICE)
1570 CPUBreakpoint *bp, *next;
1571
1572 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1573 if (bp->flags & mask)
1574 cpu_breakpoint_remove_by_ref(env, bp);
1575 }
1576 #endif
1577 }
1578
1579 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1580 CPU loop after each instruction */
1581 void cpu_single_step(CPUState *env, int enabled)
1582 {
1583 #if defined(TARGET_HAS_ICE)
1584 if (env->singlestep_enabled != enabled) {
1585 env->singlestep_enabled = enabled;
1586 if (kvm_enabled())
1587 kvm_update_guest_debug(env, 0);
1588 else {
1589 /* must flush all the translated code to avoid inconsistencies */
1590 /* XXX: only flush what is necessary */
1591 tb_flush(env);
1592 }
1593 }
1594 #endif
1595 }
1596
1597 /* enable or disable low levels log */
1598 void cpu_set_log(int log_flags)
1599 {
1600 loglevel = log_flags;
1601 if (loglevel && !logfile) {
1602 logfile = fopen(logfilename, log_append ? "a" : "w");
1603 if (!logfile) {
1604 perror(logfilename);
1605 _exit(1);
1606 }
1607 #if !defined(CONFIG_SOFTMMU)
1608 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1609 {
1610 static char logfile_buf[4096];
1611 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1612 }
1613 #elif defined(_WIN32)
1614 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1615 setvbuf(logfile, NULL, _IONBF, 0);
1616 #else
1617 setvbuf(logfile, NULL, _IOLBF, 0);
1618 #endif
1619 log_append = 1;
1620 }
1621 if (!loglevel && logfile) {
1622 fclose(logfile);
1623 logfile = NULL;
1624 }
1625 }
1626
1627 void cpu_set_log_filename(const char *filename)
1628 {
1629 logfilename = strdup(filename);
1630 if (logfile) {
1631 fclose(logfile);
1632 logfile = NULL;
1633 }
1634 cpu_set_log(loglevel);
1635 }
1636
1637 static void cpu_unlink_tb(CPUState *env)
1638 {
1639 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1640 problem and hope the cpu will stop of its own accord. For userspace
1641 emulation this often isn't actually as bad as it sounds. Often
1642 signals are used primarily to interrupt blocking syscalls. */
1643 TranslationBlock *tb;
1644 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1645
1646 spin_lock(&interrupt_lock);
1647 tb = env->current_tb;
1648 /* if the cpu is currently executing code, we must unlink it and
1649 all the potentially executing TB */
1650 if (tb) {
1651 env->current_tb = NULL;
1652 tb_reset_jump_recursive(tb);
1653 }
1654 spin_unlock(&interrupt_lock);
1655 }
1656
1657 #ifndef CONFIG_USER_ONLY
1658 /* mask must never be zero, except for A20 change call */
1659 static void tcg_handle_interrupt(CPUState *env, int mask)
1660 {
1661 int old_mask;
1662
1663 old_mask = env->interrupt_request;
1664 env->interrupt_request |= mask;
1665
1666 /*
1667 * If called from iothread context, wake the target cpu in
1668 * case its halted.
1669 */
1670 if (!qemu_cpu_is_self(env)) {
1671 qemu_cpu_kick(env);
1672 return;
1673 }
1674
1675 if (use_icount) {
1676 env->icount_decr.u16.high = 0xffff;
1677 if (!can_do_io(env)
1678 && (mask & ~old_mask) != 0) {
1679 cpu_abort(env, "Raised interrupt while not in I/O function");
1680 }
1681 } else {
1682 cpu_unlink_tb(env);
1683 }
1684 }
1685
1686 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1687
1688 #else /* CONFIG_USER_ONLY */
1689
1690 void cpu_interrupt(CPUState *env, int mask)
1691 {
1692 env->interrupt_request |= mask;
1693 cpu_unlink_tb(env);
1694 }
1695 #endif /* CONFIG_USER_ONLY */
1696
1697 void cpu_reset_interrupt(CPUState *env, int mask)
1698 {
1699 env->interrupt_request &= ~mask;
1700 }
1701
1702 void cpu_exit(CPUState *env)
1703 {
1704 env->exit_request = 1;
1705 cpu_unlink_tb(env);
1706 }
1707
1708 const CPULogItem cpu_log_items[] = {
1709 { CPU_LOG_TB_OUT_ASM, "out_asm",
1710 "show generated host assembly code for each compiled TB" },
1711 { CPU_LOG_TB_IN_ASM, "in_asm",
1712 "show target assembly code for each compiled TB" },
1713 { CPU_LOG_TB_OP, "op",
1714 "show micro ops for each compiled TB" },
1715 { CPU_LOG_TB_OP_OPT, "op_opt",
1716 "show micro ops "
1717 #ifdef TARGET_I386
1718 "before eflags optimization and "
1719 #endif
1720 "after liveness analysis" },
1721 { CPU_LOG_INT, "int",
1722 "show interrupts/exceptions in short format" },
1723 { CPU_LOG_EXEC, "exec",
1724 "show trace before each executed TB (lots of logs)" },
1725 { CPU_LOG_TB_CPU, "cpu",
1726 "show CPU state before block translation" },
1727 #ifdef TARGET_I386
1728 { CPU_LOG_PCALL, "pcall",
1729 "show protected mode far calls/returns/exceptions" },
1730 { CPU_LOG_RESET, "cpu_reset",
1731 "show CPU state before CPU resets" },
1732 #endif
1733 #ifdef DEBUG_IOPORT
1734 { CPU_LOG_IOPORT, "ioport",
1735 "show all i/o ports accesses" },
1736 #endif
1737 { 0, NULL, NULL },
1738 };
1739
1740 static int cmp1(const char *s1, int n, const char *s2)
1741 {
1742 if (strlen(s2) != n)
1743 return 0;
1744 return memcmp(s1, s2, n) == 0;
1745 }
1746
1747 /* takes a comma separated list of log masks. Return 0 if error. */
1748 int cpu_str_to_log_mask(const char *str)
1749 {
1750 const CPULogItem *item;
1751 int mask;
1752 const char *p, *p1;
1753
1754 p = str;
1755 mask = 0;
1756 for(;;) {
1757 p1 = strchr(p, ',');
1758 if (!p1)
1759 p1 = p + strlen(p);
1760 if(cmp1(p,p1-p,"all")) {
1761 for(item = cpu_log_items; item->mask != 0; item++) {
1762 mask |= item->mask;
1763 }
1764 } else {
1765 for(item = cpu_log_items; item->mask != 0; item++) {
1766 if (cmp1(p, p1 - p, item->name))
1767 goto found;
1768 }
1769 return 0;
1770 }
1771 found:
1772 mask |= item->mask;
1773 if (*p1 != ',')
1774 break;
1775 p = p1 + 1;
1776 }
1777 return mask;
1778 }
1779
1780 void cpu_abort(CPUState *env, const char *fmt, ...)
1781 {
1782 va_list ap;
1783 va_list ap2;
1784
1785 va_start(ap, fmt);
1786 va_copy(ap2, ap);
1787 fprintf(stderr, "qemu: fatal: ");
1788 vfprintf(stderr, fmt, ap);
1789 fprintf(stderr, "\n");
1790 #ifdef TARGET_I386
1791 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1792 #else
1793 cpu_dump_state(env, stderr, fprintf, 0);
1794 #endif
1795 if (qemu_log_enabled()) {
1796 qemu_log("qemu: fatal: ");
1797 qemu_log_vprintf(fmt, ap2);
1798 qemu_log("\n");
1799 #ifdef TARGET_I386
1800 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1801 #else
1802 log_cpu_state(env, 0);
1803 #endif
1804 qemu_log_flush();
1805 qemu_log_close();
1806 }
1807 va_end(ap2);
1808 va_end(ap);
1809 #if defined(CONFIG_USER_ONLY)
1810 {
1811 struct sigaction act;
1812 sigfillset(&act.sa_mask);
1813 act.sa_handler = SIG_DFL;
1814 sigaction(SIGABRT, &act, NULL);
1815 }
1816 #endif
1817 abort();
1818 }
1819
1820 CPUState *cpu_copy(CPUState *env)
1821 {
1822 CPUState *new_env = cpu_init(env->cpu_model_str);
1823 CPUState *next_cpu = new_env->next_cpu;
1824 int cpu_index = new_env->cpu_index;
1825 #if defined(TARGET_HAS_ICE)
1826 CPUBreakpoint *bp;
1827 CPUWatchpoint *wp;
1828 #endif
1829
1830 memcpy(new_env, env, sizeof(CPUState));
1831
1832 /* Preserve chaining and index. */
1833 new_env->next_cpu = next_cpu;
1834 new_env->cpu_index = cpu_index;
1835
1836 /* Clone all break/watchpoints.
1837 Note: Once we support ptrace with hw-debug register access, make sure
1838 BP_CPU break/watchpoints are handled correctly on clone. */
1839 QTAILQ_INIT(&env->breakpoints);
1840 QTAILQ_INIT(&env->watchpoints);
1841 #if defined(TARGET_HAS_ICE)
1842 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1843 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1844 }
1845 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1846 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1847 wp->flags, NULL);
1848 }
1849 #endif
1850
1851 return new_env;
1852 }
1853
1854 #if !defined(CONFIG_USER_ONLY)
1855
1856 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1857 {
1858 unsigned int i;
1859
1860 /* Discard jump cache entries for any tb which might potentially
1861 overlap the flushed page. */
1862 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1863 memset (&env->tb_jmp_cache[i], 0,
1864 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1865
1866 i = tb_jmp_cache_hash_page(addr);
1867 memset (&env->tb_jmp_cache[i], 0,
1868 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1869 }
1870
1871 static CPUTLBEntry s_cputlb_empty_entry = {
1872 .addr_read = -1,
1873 .addr_write = -1,
1874 .addr_code = -1,
1875 .addend = -1,
1876 };
1877
1878 /* NOTE: if flush_global is true, also flush global entries (not
1879 implemented yet) */
1880 void tlb_flush(CPUState *env, int flush_global)
1881 {
1882 int i;
1883
1884 #if defined(DEBUG_TLB)
1885 printf("tlb_flush:\n");
1886 #endif
1887 /* must reset current TB so that interrupts cannot modify the
1888 links while we are modifying them */
1889 env->current_tb = NULL;
1890
1891 for(i = 0; i < CPU_TLB_SIZE; i++) {
1892 int mmu_idx;
1893 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1894 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1895 }
1896 }
1897
1898 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1899
1900 env->tlb_flush_addr = -1;
1901 env->tlb_flush_mask = 0;
1902 tlb_flush_count++;
1903 }
1904
1905 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1906 {
1907 if (addr == (tlb_entry->addr_read &
1908 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1909 addr == (tlb_entry->addr_write &
1910 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1911 addr == (tlb_entry->addr_code &
1912 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1913 *tlb_entry = s_cputlb_empty_entry;
1914 }
1915 }
1916
1917 void tlb_flush_page(CPUState *env, target_ulong addr)
1918 {
1919 int i;
1920 int mmu_idx;
1921
1922 #if defined(DEBUG_TLB)
1923 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1924 #endif
1925 /* Check if we need to flush due to large pages. */
1926 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1927 #if defined(DEBUG_TLB)
1928 printf("tlb_flush_page: forced full flush ("
1929 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1930 env->tlb_flush_addr, env->tlb_flush_mask);
1931 #endif
1932 tlb_flush(env, 1);
1933 return;
1934 }
1935 /* must reset current TB so that interrupts cannot modify the
1936 links while we are modifying them */
1937 env->current_tb = NULL;
1938
1939 addr &= TARGET_PAGE_MASK;
1940 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1941 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1942 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1943
1944 tlb_flush_jmp_cache(env, addr);
1945 }
1946
1947 /* update the TLBs so that writes to code in the virtual page 'addr'
1948 can be detected */
1949 static void tlb_protect_code(ram_addr_t ram_addr)
1950 {
1951 cpu_physical_memory_reset_dirty(ram_addr,
1952 ram_addr + TARGET_PAGE_SIZE,
1953 CODE_DIRTY_FLAG);
1954 }
1955
1956 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1957 tested for self modifying code */
1958 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1959 target_ulong vaddr)
1960 {
1961 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1962 }
1963
1964 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1965 unsigned long start, unsigned long length)
1966 {
1967 unsigned long addr;
1968 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1969 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1970 if ((addr - start) < length) {
1971 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1972 }
1973 }
1974 }
1975
1976 /* Note: start and end must be within the same ram block. */
1977 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1978 int dirty_flags)
1979 {
1980 CPUState *env;
1981 unsigned long length, start1;
1982 int i;
1983
1984 start &= TARGET_PAGE_MASK;
1985 end = TARGET_PAGE_ALIGN(end);
1986
1987 length = end - start;
1988 if (length == 0)
1989 return;
1990 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1991
1992 /* we modify the TLB cache so that the dirty bit will be set again
1993 when accessing the range */
1994 start1 = (unsigned long)qemu_safe_ram_ptr(start);
1995 /* Check that we don't span multiple blocks - this breaks the
1996 address comparisons below. */
1997 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
1998 != (end - 1) - start) {
1999 abort();
2000 }
2001
2002 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2003 int mmu_idx;
2004 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2005 for(i = 0; i < CPU_TLB_SIZE; i++)
2006 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2007 start1, length);
2008 }
2009 }
2010 }
2011
2012 int cpu_physical_memory_set_dirty_tracking(int enable)
2013 {
2014 int ret = 0;
2015 in_migration = enable;
2016 return ret;
2017 }
2018
2019 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2020 {
2021 ram_addr_t ram_addr;
2022 void *p;
2023
2024 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2025 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2026 + tlb_entry->addend);
2027 ram_addr = qemu_ram_addr_from_host_nofail(p);
2028 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2029 tlb_entry->addr_write |= TLB_NOTDIRTY;
2030 }
2031 }
2032 }
2033
2034 /* update the TLB according to the current state of the dirty bits */
2035 void cpu_tlb_update_dirty(CPUState *env)
2036 {
2037 int i;
2038 int mmu_idx;
2039 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2040 for(i = 0; i < CPU_TLB_SIZE; i++)
2041 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2042 }
2043 }
2044
2045 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2046 {
2047 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2048 tlb_entry->addr_write = vaddr;
2049 }
2050
2051 /* update the TLB corresponding to virtual page vaddr
2052 so that it is no longer dirty */
2053 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2054 {
2055 int i;
2056 int mmu_idx;
2057
2058 vaddr &= TARGET_PAGE_MASK;
2059 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2060 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2061 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2062 }
2063
2064 /* Our TLB does not support large pages, so remember the area covered by
2065 large pages and trigger a full TLB flush if these are invalidated. */
2066 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2067 target_ulong size)
2068 {
2069 target_ulong mask = ~(size - 1);
2070
2071 if (env->tlb_flush_addr == (target_ulong)-1) {
2072 env->tlb_flush_addr = vaddr & mask;
2073 env->tlb_flush_mask = mask;
2074 return;
2075 }
2076 /* Extend the existing region to include the new page.
2077 This is a compromise between unnecessary flushes and the cost
2078 of maintaining a full variable size TLB. */
2079 mask &= env->tlb_flush_mask;
2080 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2081 mask <<= 1;
2082 }
2083 env->tlb_flush_addr &= mask;
2084 env->tlb_flush_mask = mask;
2085 }
2086
2087 /* Add a new TLB entry. At most one entry for a given virtual address
2088 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2089 supplied size is only used by tlb_flush_page. */
2090 void tlb_set_page(CPUState *env, target_ulong vaddr,
2091 target_phys_addr_t paddr, int prot,
2092 int mmu_idx, target_ulong size)
2093 {
2094 PhysPageDesc p;
2095 unsigned long pd;
2096 unsigned int index;
2097 target_ulong address;
2098 target_ulong code_address;
2099 unsigned long addend;
2100 CPUTLBEntry *te;
2101 CPUWatchpoint *wp;
2102 target_phys_addr_t iotlb;
2103
2104 assert(size >= TARGET_PAGE_SIZE);
2105 if (size != TARGET_PAGE_SIZE) {
2106 tlb_add_large_page(env, vaddr, size);
2107 }
2108 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2109 pd = p.phys_offset;
2110 #if defined(DEBUG_TLB)
2111 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2112 " prot=%x idx=%d pd=0x%08lx\n",
2113 vaddr, paddr, prot, mmu_idx, pd);
2114 #endif
2115
2116 address = vaddr;
2117 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2118 /* IO memory case (romd handled later) */
2119 address |= TLB_MMIO;
2120 }
2121 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2122 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2123 /* Normal RAM. */
2124 iotlb = pd & TARGET_PAGE_MASK;
2125 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2126 iotlb |= IO_MEM_NOTDIRTY;
2127 else
2128 iotlb |= IO_MEM_ROM;
2129 } else {
2130 /* IO handlers are currently passed a physical address.
2131 It would be nice to pass an offset from the base address
2132 of that region. This would avoid having to special case RAM,
2133 and avoid full address decoding in every device.
2134 We can't use the high bits of pd for this because
2135 IO_MEM_ROMD uses these as a ram address. */
2136 iotlb = (pd & ~TARGET_PAGE_MASK);
2137 iotlb += p.region_offset;
2138 }
2139
2140 code_address = address;
2141 /* Make accesses to pages with watchpoints go via the
2142 watchpoint trap routines. */
2143 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2144 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2145 /* Avoid trapping reads of pages with a write breakpoint. */
2146 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2147 iotlb = io_mem_watch + paddr;
2148 address |= TLB_MMIO;
2149 break;
2150 }
2151 }
2152 }
2153
2154 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2155 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2156 te = &env->tlb_table[mmu_idx][index];
2157 te->addend = addend - vaddr;
2158 if (prot & PAGE_READ) {
2159 te->addr_read = address;
2160 } else {
2161 te->addr_read = -1;
2162 }
2163
2164 if (prot & PAGE_EXEC) {
2165 te->addr_code = code_address;
2166 } else {
2167 te->addr_code = -1;
2168 }
2169 if (prot & PAGE_WRITE) {
2170 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2171 (pd & IO_MEM_ROMD)) {
2172 /* Write access calls the I/O callback. */
2173 te->addr_write = address | TLB_MMIO;
2174 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2175 !cpu_physical_memory_is_dirty(pd)) {
2176 te->addr_write = address | TLB_NOTDIRTY;
2177 } else {
2178 te->addr_write = address;
2179 }
2180 } else {
2181 te->addr_write = -1;
2182 }
2183 }
2184
2185 #else
2186
2187 void tlb_flush(CPUState *env, int flush_global)
2188 {
2189 }
2190
2191 void tlb_flush_page(CPUState *env, target_ulong addr)
2192 {
2193 }
2194
2195 /*
2196 * Walks guest process memory "regions" one by one
2197 * and calls callback function 'fn' for each region.
2198 */
2199
2200 struct walk_memory_regions_data
2201 {
2202 walk_memory_regions_fn fn;
2203 void *priv;
2204 unsigned long start;
2205 int prot;
2206 };
2207
2208 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2209 abi_ulong end, int new_prot)
2210 {
2211 if (data->start != -1ul) {
2212 int rc = data->fn(data->priv, data->start, end, data->prot);
2213 if (rc != 0) {
2214 return rc;
2215 }
2216 }
2217
2218 data->start = (new_prot ? end : -1ul);
2219 data->prot = new_prot;
2220
2221 return 0;
2222 }
2223
2224 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2225 abi_ulong base, int level, void **lp)
2226 {
2227 abi_ulong pa;
2228 int i, rc;
2229
2230 if (*lp == NULL) {
2231 return walk_memory_regions_end(data, base, 0);
2232 }
2233
2234 if (level == 0) {
2235 PageDesc *pd = *lp;
2236 for (i = 0; i < L2_SIZE; ++i) {
2237 int prot = pd[i].flags;
2238
2239 pa = base | (i << TARGET_PAGE_BITS);
2240 if (prot != data->prot) {
2241 rc = walk_memory_regions_end(data, pa, prot);
2242 if (rc != 0) {
2243 return rc;
2244 }
2245 }
2246 }
2247 } else {
2248 void **pp = *lp;
2249 for (i = 0; i < L2_SIZE; ++i) {
2250 pa = base | ((abi_ulong)i <<
2251 (TARGET_PAGE_BITS + L2_BITS * level));
2252 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2253 if (rc != 0) {
2254 return rc;
2255 }
2256 }
2257 }
2258
2259 return 0;
2260 }
2261
2262 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2263 {
2264 struct walk_memory_regions_data data;
2265 unsigned long i;
2266
2267 data.fn = fn;
2268 data.priv = priv;
2269 data.start = -1ul;
2270 data.prot = 0;
2271
2272 for (i = 0; i < V_L1_SIZE; i++) {
2273 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2274 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2275 if (rc != 0) {
2276 return rc;
2277 }
2278 }
2279
2280 return walk_memory_regions_end(&data, 0, 0);
2281 }
2282
2283 static int dump_region(void *priv, abi_ulong start,
2284 abi_ulong end, unsigned long prot)
2285 {
2286 FILE *f = (FILE *)priv;
2287
2288 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2289 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2290 start, end, end - start,
2291 ((prot & PAGE_READ) ? 'r' : '-'),
2292 ((prot & PAGE_WRITE) ? 'w' : '-'),
2293 ((prot & PAGE_EXEC) ? 'x' : '-'));
2294
2295 return (0);
2296 }
2297
2298 /* dump memory mappings */
2299 void page_dump(FILE *f)
2300 {
2301 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2302 "start", "end", "size", "prot");
2303 walk_memory_regions(f, dump_region);
2304 }
2305
2306 int page_get_flags(target_ulong address)
2307 {
2308 PageDesc *p;
2309
2310 p = page_find(address >> TARGET_PAGE_BITS);
2311 if (!p)
2312 return 0;
2313 return p->flags;
2314 }
2315
2316 /* Modify the flags of a page and invalidate the code if necessary.
2317 The flag PAGE_WRITE_ORG is positioned automatically depending
2318 on PAGE_WRITE. The mmap_lock should already be held. */
2319 void page_set_flags(target_ulong start, target_ulong end, int flags)
2320 {
2321 target_ulong addr, len;
2322
2323 /* This function should never be called with addresses outside the
2324 guest address space. If this assert fires, it probably indicates
2325 a missing call to h2g_valid. */
2326 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2327 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2328 #endif
2329 assert(start < end);
2330
2331 start = start & TARGET_PAGE_MASK;
2332 end = TARGET_PAGE_ALIGN(end);
2333
2334 if (flags & PAGE_WRITE) {
2335 flags |= PAGE_WRITE_ORG;
2336 }
2337
2338 for (addr = start, len = end - start;
2339 len != 0;
2340 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2341 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2342
2343 /* If the write protection bit is set, then we invalidate
2344 the code inside. */
2345 if (!(p->flags & PAGE_WRITE) &&
2346 (flags & PAGE_WRITE) &&
2347 p->first_tb) {
2348 tb_invalidate_phys_page(addr, 0, NULL);
2349 }
2350 p->flags = flags;
2351 }
2352 }
2353
2354 int page_check_range(target_ulong start, target_ulong len, int flags)
2355 {
2356 PageDesc *p;
2357 target_ulong end;
2358 target_ulong addr;
2359
2360 /* This function should never be called with addresses outside the
2361 guest address space. If this assert fires, it probably indicates
2362 a missing call to h2g_valid. */
2363 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2364 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2365 #endif
2366
2367 if (len == 0) {
2368 return 0;
2369 }
2370 if (start + len - 1 < start) {
2371 /* We've wrapped around. */
2372 return -1;
2373 }
2374
2375 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2376 start = start & TARGET_PAGE_MASK;
2377
2378 for (addr = start, len = end - start;
2379 len != 0;
2380 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2381 p = page_find(addr >> TARGET_PAGE_BITS);
2382 if( !p )
2383 return -1;
2384 if( !(p->flags & PAGE_VALID) )
2385 return -1;
2386
2387 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2388 return -1;
2389 if (flags & PAGE_WRITE) {
2390 if (!(p->flags & PAGE_WRITE_ORG))
2391 return -1;
2392 /* unprotect the page if it was put read-only because it
2393 contains translated code */
2394 if (!(p->flags & PAGE_WRITE)) {
2395 if (!page_unprotect(addr, 0, NULL))
2396 return -1;
2397 }
2398 return 0;
2399 }
2400 }
2401 return 0;
2402 }
2403
2404 /* called from signal handler: invalidate the code and unprotect the
2405 page. Return TRUE if the fault was successfully handled. */
2406 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2407 {
2408 unsigned int prot;
2409 PageDesc *p;
2410 target_ulong host_start, host_end, addr;
2411
2412 /* Technically this isn't safe inside a signal handler. However we
2413 know this only ever happens in a synchronous SEGV handler, so in
2414 practice it seems to be ok. */
2415 mmap_lock();
2416
2417 p = page_find(address >> TARGET_PAGE_BITS);
2418 if (!p) {
2419 mmap_unlock();
2420 return 0;
2421 }
2422
2423 /* if the page was really writable, then we change its
2424 protection back to writable */
2425 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2426 host_start = address & qemu_host_page_mask;
2427 host_end = host_start + qemu_host_page_size;
2428
2429 prot = 0;
2430 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2431 p = page_find(addr >> TARGET_PAGE_BITS);
2432 p->flags |= PAGE_WRITE;
2433 prot |= p->flags;
2434
2435 /* and since the content will be modified, we must invalidate
2436 the corresponding translated code. */
2437 tb_invalidate_phys_page(addr, pc, puc);
2438 #ifdef DEBUG_TB_CHECK
2439 tb_invalidate_check(addr);
2440 #endif
2441 }
2442 mprotect((void *)g2h(host_start), qemu_host_page_size,
2443 prot & PAGE_BITS);
2444
2445 mmap_unlock();
2446 return 1;
2447 }
2448 mmap_unlock();
2449 return 0;
2450 }
2451
2452 static inline void tlb_set_dirty(CPUState *env,
2453 unsigned long addr, target_ulong vaddr)
2454 {
2455 }
2456 #endif /* defined(CONFIG_USER_ONLY) */
2457
2458 #if !defined(CONFIG_USER_ONLY)
2459
2460 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2461 typedef struct subpage_t {
2462 target_phys_addr_t base;
2463 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2464 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2465 } subpage_t;
2466
2467 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2468 ram_addr_t memory, ram_addr_t region_offset);
2469 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2470 ram_addr_t orig_memory,
2471 ram_addr_t region_offset);
2472 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2473 need_subpage) \
2474 do { \
2475 if (addr > start_addr) \
2476 start_addr2 = 0; \
2477 else { \
2478 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2479 if (start_addr2 > 0) \
2480 need_subpage = 1; \
2481 } \
2482 \
2483 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2484 end_addr2 = TARGET_PAGE_SIZE - 1; \
2485 else { \
2486 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2487 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2488 need_subpage = 1; \
2489 } \
2490 } while (0)
2491
2492 /* register physical memory.
2493 For RAM, 'size' must be a multiple of the target page size.
2494 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2495 io memory page. The address used when calling the IO function is
2496 the offset from the start of the region, plus region_offset. Both
2497 start_addr and region_offset are rounded down to a page boundary
2498 before calculating this offset. This should not be a problem unless
2499 the low bits of start_addr and region_offset differ. */
2500 void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
2501 ram_addr_t size,
2502 ram_addr_t phys_offset,
2503 ram_addr_t region_offset,
2504 bool log_dirty)
2505 {
2506 target_phys_addr_t addr, end_addr;
2507 PhysPageDesc *p;
2508 CPUState *env;
2509 ram_addr_t orig_size = size;
2510 subpage_t *subpage;
2511
2512 assert(size);
2513
2514 if (phys_offset == IO_MEM_UNASSIGNED) {
2515 region_offset = start_addr;
2516 }
2517 region_offset &= TARGET_PAGE_MASK;
2518 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2519 end_addr = start_addr + (target_phys_addr_t)size;
2520
2521 addr = start_addr;
2522 do {
2523 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
2524 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2525 ram_addr_t orig_memory = p->phys_offset;
2526 target_phys_addr_t start_addr2, end_addr2;
2527 int need_subpage = 0;
2528
2529 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2530 need_subpage);
2531 if (need_subpage) {
2532 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2533 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2534 &p->phys_offset, orig_memory,
2535 p->region_offset);
2536 } else {
2537 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2538 >> IO_MEM_SHIFT];
2539 }
2540 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2541 region_offset);
2542 p->region_offset = 0;
2543 } else {
2544 p->phys_offset = phys_offset;
2545 p->region_offset = region_offset;
2546 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2547 (phys_offset & IO_MEM_ROMD))
2548 phys_offset += TARGET_PAGE_SIZE;
2549 }
2550 } else {
2551 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2552 p->phys_offset = phys_offset;
2553 p->region_offset = region_offset;
2554 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2555 (phys_offset & IO_MEM_ROMD)) {
2556 phys_offset += TARGET_PAGE_SIZE;
2557 } else {
2558 target_phys_addr_t start_addr2, end_addr2;
2559 int need_subpage = 0;
2560
2561 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2562 end_addr2, need_subpage);
2563
2564 if (need_subpage) {
2565 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2566 &p->phys_offset, IO_MEM_UNASSIGNED,
2567 addr & TARGET_PAGE_MASK);
2568 subpage_register(subpage, start_addr2, end_addr2,
2569 phys_offset, region_offset);
2570 p->region_offset = 0;
2571 }
2572 }
2573 }
2574 region_offset += TARGET_PAGE_SIZE;
2575 addr += TARGET_PAGE_SIZE;
2576 } while (addr != end_addr);
2577
2578 /* since each CPU stores ram addresses in its TLB cache, we must
2579 reset the modified entries */
2580 /* XXX: slow ! */
2581 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2582 tlb_flush(env, 1);
2583 }
2584 }
2585
2586 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2587 {
2588 if (kvm_enabled())
2589 kvm_coalesce_mmio_region(addr, size);
2590 }
2591
2592 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2593 {
2594 if (kvm_enabled())
2595 kvm_uncoalesce_mmio_region(addr, size);
2596 }
2597
2598 void qemu_flush_coalesced_mmio_buffer(void)
2599 {
2600 if (kvm_enabled())
2601 kvm_flush_coalesced_mmio_buffer();
2602 }
2603
2604 #if defined(__linux__) && !defined(TARGET_S390X)
2605
2606 #include <sys/vfs.h>
2607
2608 #define HUGETLBFS_MAGIC 0x958458f6
2609
2610 static long gethugepagesize(const char *path)
2611 {
2612 struct statfs fs;
2613 int ret;
2614
2615 do {
2616 ret = statfs(path, &fs);
2617 } while (ret != 0 && errno == EINTR);
2618
2619 if (ret != 0) {
2620 perror(path);
2621 return 0;
2622 }
2623
2624 if (fs.f_type != HUGETLBFS_MAGIC)
2625 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2626
2627 return fs.f_bsize;
2628 }
2629
2630 static void *file_ram_alloc(RAMBlock *block,
2631 ram_addr_t memory,
2632 const char *path)
2633 {
2634 char *filename;
2635 void *area;
2636 int fd;
2637 #ifdef MAP_POPULATE
2638 int flags;
2639 #endif
2640 unsigned long hpagesize;
2641
2642 hpagesize = gethugepagesize(path);
2643 if (!hpagesize) {
2644 return NULL;
2645 }
2646
2647 if (memory < hpagesize) {
2648 return NULL;
2649 }
2650
2651 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2652 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2653 return NULL;
2654 }
2655
2656 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2657 return NULL;
2658 }
2659
2660 fd = mkstemp(filename);
2661 if (fd < 0) {
2662 perror("unable to create backing store for hugepages");
2663 free(filename);
2664 return NULL;
2665 }
2666 unlink(filename);
2667 free(filename);
2668
2669 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2670
2671 /*
2672 * ftruncate is not supported by hugetlbfs in older
2673 * hosts, so don't bother bailing out on errors.
2674 * If anything goes wrong with it under other filesystems,
2675 * mmap will fail.
2676 */
2677 if (ftruncate(fd, memory))
2678 perror("ftruncate");
2679
2680 #ifdef MAP_POPULATE
2681 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2682 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2683 * to sidestep this quirk.
2684 */
2685 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2686 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2687 #else
2688 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2689 #endif
2690 if (area == MAP_FAILED) {
2691 perror("file_ram_alloc: can't mmap RAM pages");
2692 close(fd);
2693 return (NULL);
2694 }
2695 block->fd = fd;
2696 return area;
2697 }
2698 #endif
2699
2700 static ram_addr_t find_ram_offset(ram_addr_t size)
2701 {
2702 RAMBlock *block, *next_block;
2703 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
2704
2705 if (QLIST_EMPTY(&ram_list.blocks))
2706 return 0;
2707
2708 QLIST_FOREACH(block, &ram_list.blocks, next) {
2709 ram_addr_t end, next = RAM_ADDR_MAX;
2710
2711 end = block->offset + block->length;
2712
2713 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2714 if (next_block->offset >= end) {
2715 next = MIN(next, next_block->offset);
2716 }
2717 }
2718 if (next - end >= size && next - end < mingap) {
2719 offset = end;
2720 mingap = next - end;
2721 }
2722 }
2723
2724 if (offset == RAM_ADDR_MAX) {
2725 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2726 (uint64_t)size);
2727 abort();
2728 }
2729
2730 return offset;
2731 }
2732
2733 static ram_addr_t last_ram_offset(void)
2734 {
2735 RAMBlock *block;
2736 ram_addr_t last = 0;
2737
2738 QLIST_FOREACH(block, &ram_list.blocks, next)
2739 last = MAX(last, block->offset + block->length);
2740
2741 return last;
2742 }
2743
2744 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
2745 {
2746 RAMBlock *new_block, *block;
2747
2748 new_block = NULL;
2749 QLIST_FOREACH(block, &ram_list.blocks, next) {
2750 if (block->offset == addr) {
2751 new_block = block;
2752 break;
2753 }
2754 }
2755 assert(new_block);
2756 assert(!new_block->idstr[0]);
2757
2758 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2759 char *id = dev->parent_bus->info->get_dev_path(dev);
2760 if (id) {
2761 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2762 g_free(id);
2763 }
2764 }
2765 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2766
2767 QLIST_FOREACH(block, &ram_list.blocks, next) {
2768 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
2769 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2770 new_block->idstr);
2771 abort();
2772 }
2773 }
2774 }
2775
2776 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2777 MemoryRegion *mr)
2778 {
2779 RAMBlock *new_block;
2780
2781 size = TARGET_PAGE_ALIGN(size);
2782 new_block = g_malloc0(sizeof(*new_block));
2783
2784 new_block->mr = mr;
2785 new_block->offset = find_ram_offset(size);
2786 if (host) {
2787 new_block->host = host;
2788 new_block->flags |= RAM_PREALLOC_MASK;
2789 } else {
2790 if (mem_path) {
2791 #if defined (__linux__) && !defined(TARGET_S390X)
2792 new_block->host = file_ram_alloc(new_block, size, mem_path);
2793 if (!new_block->host) {
2794 new_block->host = qemu_vmalloc(size);
2795 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2796 }
2797 #else
2798 fprintf(stderr, "-mem-path option unsupported\n");
2799 exit(1);
2800 #endif
2801 } else {
2802 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2803 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2804 an system defined value, which is at least 256GB. Larger systems
2805 have larger values. We put the guest between the end of data
2806 segment (system break) and this value. We use 32GB as a base to
2807 have enough room for the system break to grow. */
2808 new_block->host = mmap((void*)0x800000000, size,
2809 PROT_EXEC|PROT_READ|PROT_WRITE,
2810 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2811 if (new_block->host == MAP_FAILED) {
2812 fprintf(stderr, "Allocating RAM failed\n");
2813 abort();
2814 }
2815 #else
2816 if (xen_enabled()) {
2817 xen_ram_alloc(new_block->offset, size, mr);
2818 } else {
2819 new_block->host = qemu_vmalloc(size);
2820 }
2821 #endif
2822 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2823 }
2824 }
2825 new_block->length = size;
2826
2827 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2828
2829 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
2830 last_ram_offset() >> TARGET_PAGE_BITS);
2831 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2832 0xff, size >> TARGET_PAGE_BITS);
2833
2834 if (kvm_enabled())
2835 kvm_setup_guest_memory(new_block->host, size);
2836
2837 return new_block->offset;
2838 }
2839
2840 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
2841 {
2842 return qemu_ram_alloc_from_ptr(size, NULL, mr);
2843 }
2844
2845 void qemu_ram_free_from_ptr(ram_addr_t addr)
2846 {
2847 RAMBlock *block;
2848
2849 QLIST_FOREACH(block, &ram_list.blocks, next) {
2850 if (addr == block->offset) {
2851 QLIST_REMOVE(block, next);
2852 g_free(block);
2853 return;
2854 }
2855 }
2856 }
2857
2858 void qemu_ram_free(ram_addr_t addr)
2859 {
2860 RAMBlock *block;
2861
2862 QLIST_FOREACH(block, &ram_list.blocks, next) {
2863 if (addr == block->offset) {
2864 QLIST_REMOVE(block, next);
2865 if (block->flags & RAM_PREALLOC_MASK) {
2866 ;
2867 } else if (mem_path) {
2868 #if defined (__linux__) && !defined(TARGET_S390X)
2869 if (block->fd) {
2870 munmap(block->host, block->length);
2871 close(block->fd);
2872 } else {
2873 qemu_vfree(block->host);
2874 }
2875 #else
2876 abort();
2877 #endif
2878 } else {
2879 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2880 munmap(block->host, block->length);
2881 #else
2882 if (xen_enabled()) {
2883 xen_invalidate_map_cache_entry(block->host);
2884 } else {
2885 qemu_vfree(block->host);
2886 }
2887 #endif
2888 }
2889 g_free(block);
2890 return;
2891 }
2892 }
2893
2894 }
2895
2896 #ifndef _WIN32
2897 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2898 {
2899 RAMBlock *block;
2900 ram_addr_t offset;
2901 int flags;
2902 void *area, *vaddr;
2903
2904 QLIST_FOREACH(block, &ram_list.blocks, next) {
2905 offset = addr - block->offset;
2906 if (offset < block->length) {
2907 vaddr = block->host + offset;
2908 if (block->flags & RAM_PREALLOC_MASK) {
2909 ;
2910 } else {
2911 flags = MAP_FIXED;
2912 munmap(vaddr, length);
2913 if (mem_path) {
2914 #if defined(__linux__) && !defined(TARGET_S390X)
2915 if (block->fd) {
2916 #ifdef MAP_POPULATE
2917 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2918 MAP_PRIVATE;
2919 #else
2920 flags |= MAP_PRIVATE;
2921 #endif
2922 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2923 flags, block->fd, offset);
2924 } else {
2925 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2926 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2927 flags, -1, 0);
2928 }
2929 #else
2930 abort();
2931 #endif
2932 } else {
2933 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2934 flags |= MAP_SHARED | MAP_ANONYMOUS;
2935 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2936 flags, -1, 0);
2937 #else
2938 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2939 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2940 flags, -1, 0);
2941 #endif
2942 }
2943 if (area != vaddr) {
2944 fprintf(stderr, "Could not remap addr: "
2945 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2946 length, addr);
2947 exit(1);
2948 }
2949 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2950 }
2951 return;
2952 }
2953 }
2954 }
2955 #endif /* !_WIN32 */
2956
2957 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2958 With the exception of the softmmu code in this file, this should
2959 only be used for local memory (e.g. video ram) that the device owns,
2960 and knows it isn't going to access beyond the end of the block.
2961
2962 It should not be used for general purpose DMA.
2963 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2964 */
2965 void *qemu_get_ram_ptr(ram_addr_t addr)
2966 {
2967 RAMBlock *block;
2968
2969 QLIST_FOREACH(block, &ram_list.blocks, next) {
2970 if (addr - block->offset < block->length) {
2971 /* Move this entry to to start of the list. */
2972 if (block != QLIST_FIRST(&ram_list.blocks)) {
2973 QLIST_REMOVE(block, next);
2974 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2975 }
2976 if (xen_enabled()) {
2977 /* We need to check if the requested address is in the RAM
2978 * because we don't want to map the entire memory in QEMU.
2979 * In that case just map until the end of the page.
2980 */
2981 if (block->offset == 0) {
2982 return xen_map_cache(addr, 0, 0);
2983 } else if (block->host == NULL) {
2984 block->host =
2985 xen_map_cache(block->offset, block->length, 1);
2986 }
2987 }
2988 return block->host + (addr - block->offset);
2989 }
2990 }
2991
2992 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2993 abort();
2994
2995 return NULL;
2996 }
2997
2998 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2999 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3000 */
3001 void *qemu_safe_ram_ptr(ram_addr_t addr)
3002 {
3003 RAMBlock *block;
3004
3005 QLIST_FOREACH(block, &ram_list.blocks, next) {
3006 if (addr - block->offset < block->length) {
3007 if (xen_enabled()) {
3008 /* We need to check if the requested address is in the RAM
3009 * because we don't want to map the entire memory in QEMU.
3010 * In that case just map until the end of the page.
3011 */
3012 if (block->offset == 0) {
3013 return xen_map_cache(addr, 0, 0);
3014 } else if (block->host == NULL) {
3015 block->host =
3016 xen_map_cache(block->offset, block->length, 1);
3017 }
3018 }
3019 return block->host + (addr - block->offset);
3020 }
3021 }
3022
3023 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3024 abort();
3025
3026 return NULL;
3027 }
3028
3029 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3030 * but takes a size argument */
3031 void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
3032 {
3033 if (*size == 0) {
3034 return NULL;
3035 }
3036 if (xen_enabled()) {
3037 return xen_map_cache(addr, *size, 1);
3038 } else {
3039 RAMBlock *block;
3040
3041 QLIST_FOREACH(block, &ram_list.blocks, next) {
3042 if (addr - block->offset < block->length) {
3043 if (addr - block->offset + *size > block->length)
3044 *size = block->length - addr + block->offset;
3045 return block->host + (addr - block->offset);
3046 }
3047 }
3048
3049 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3050 abort();
3051 }
3052 }
3053
3054 void qemu_put_ram_ptr(void *addr)
3055 {
3056 trace_qemu_put_ram_ptr(addr);
3057 }
3058
3059 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3060 {
3061 RAMBlock *block;
3062 uint8_t *host = ptr;
3063
3064 if (xen_enabled()) {
3065 *ram_addr = xen_ram_addr_from_mapcache(ptr);
3066 return 0;
3067 }
3068
3069 QLIST_FOREACH(block, &ram_list.blocks, next) {
3070 /* This case append when the block is not mapped. */
3071 if (block->host == NULL) {
3072 continue;
3073 }
3074 if (host - block->host < block->length) {
3075 *ram_addr = block->offset + (host - block->host);
3076 return 0;
3077 }
3078 }
3079
3080 return -1;
3081 }
3082
3083 /* Some of the softmmu routines need to translate from a host pointer
3084 (typically a TLB entry) back to a ram offset. */
3085 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3086 {
3087 ram_addr_t ram_addr;
3088
3089 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3090 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3091 abort();
3092 }
3093 return ram_addr;
3094 }
3095
3096 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
3097 {
3098 #ifdef DEBUG_UNASSIGNED
3099 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3100 #endif
3101 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3102 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
3103 #endif
3104 return 0;
3105 }
3106
3107 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3108 {
3109 #ifdef DEBUG_UNASSIGNED
3110 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3111 #endif
3112 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3113 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
3114 #endif
3115 return 0;
3116 }
3117
3118 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3119 {
3120 #ifdef DEBUG_UNASSIGNED
3121 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3122 #endif
3123 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3124 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
3125 #endif
3126 return 0;
3127 }
3128
3129 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3130 {
3131 #ifdef DEBUG_UNASSIGNED
3132 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3133 #endif
3134 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3135 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
3136 #endif
3137 }
3138
3139 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3140 {
3141 #ifdef DEBUG_UNASSIGNED
3142 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3143 #endif
3144 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3145 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
3146 #endif
3147 }
3148
3149 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3150 {
3151 #ifdef DEBUG_UNASSIGNED
3152 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3153 #endif
3154 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3155 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
3156 #endif
3157 }
3158
3159 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3160 unassigned_mem_readb,
3161 unassigned_mem_readw,
3162 unassigned_mem_readl,
3163 };
3164
3165 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3166 unassigned_mem_writeb,
3167 unassigned_mem_writew,
3168 unassigned_mem_writel,
3169 };
3170
3171 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3172 uint32_t val)
3173 {
3174 int dirty_flags;
3175 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3176 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3177 #if !defined(CONFIG_USER_ONLY)
3178 tb_invalidate_phys_page_fast(ram_addr, 1);
3179 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3180 #endif
3181 }
3182 stb_p(qemu_get_ram_ptr(ram_addr), val);
3183 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3184 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3185 /* we remove the notdirty callback only if the code has been
3186 flushed */
3187 if (dirty_flags == 0xff)
3188 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3189 }
3190
3191 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3192 uint32_t val)
3193 {
3194 int dirty_flags;
3195 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3196 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3197 #if !defined(CONFIG_USER_ONLY)
3198 tb_invalidate_phys_page_fast(ram_addr, 2);
3199 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3200 #endif
3201 }
3202 stw_p(qemu_get_ram_ptr(ram_addr), val);
3203 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3204 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3205 /* we remove the notdirty callback only if the code has been
3206 flushed */
3207 if (dirty_flags == 0xff)
3208 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3209 }
3210
3211 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3212 uint32_t val)
3213 {
3214 int dirty_flags;
3215 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3216 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3217 #if !defined(CONFIG_USER_ONLY)
3218 tb_invalidate_phys_page_fast(ram_addr, 4);
3219 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3220 #endif
3221 }
3222 stl_p(qemu_get_ram_ptr(ram_addr), val);
3223 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3224 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3225 /* we remove the notdirty callback only if the code has been
3226 flushed */
3227 if (dirty_flags == 0xff)
3228 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3229 }
3230
3231 static CPUReadMemoryFunc * const error_mem_read[3] = {
3232 NULL, /* never used */
3233 NULL, /* never used */
3234 NULL, /* never used */
3235 };
3236
3237 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3238 notdirty_mem_writeb,
3239 notdirty_mem_writew,
3240 notdirty_mem_writel,
3241 };
3242
3243 /* Generate a debug exception if a watchpoint has been hit. */
3244 static void check_watchpoint(int offset, int len_mask, int flags)
3245 {
3246 CPUState *env = cpu_single_env;
3247 target_ulong pc, cs_base;
3248 TranslationBlock *tb;
3249 target_ulong vaddr;
3250 CPUWatchpoint *wp;
3251 int cpu_flags;
3252
3253 if (env->watchpoint_hit) {
3254 /* We re-entered the check after replacing the TB. Now raise
3255 * the debug interrupt so that is will trigger after the
3256 * current instruction. */
3257 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3258 return;
3259 }
3260 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3261 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3262 if ((vaddr == (wp->vaddr & len_mask) ||
3263 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3264 wp->flags |= BP_WATCHPOINT_HIT;
3265 if (!env->watchpoint_hit) {
3266 env->watchpoint_hit = wp;
3267 tb = tb_find_pc(env->mem_io_pc);
3268 if (!tb) {
3269 cpu_abort(env, "check_watchpoint: could not find TB for "
3270 "pc=%p", (void *)env->mem_io_pc);
3271 }
3272 cpu_restore_state(tb, env, env->mem_io_pc);
3273 tb_phys_invalidate(tb, -1);
3274 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3275 env->exception_index = EXCP_DEBUG;
3276 } else {
3277 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3278 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3279 }
3280 cpu_resume_from_signal(env, NULL);
3281 }
3282 } else {
3283 wp->flags &= ~BP_WATCHPOINT_HIT;
3284 }
3285 }
3286 }
3287
3288 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3289 so these check for a hit then pass through to the normal out-of-line
3290 phys routines. */
3291 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3292 {
3293 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3294 return ldub_phys(addr);
3295 }
3296
3297 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3298 {
3299 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3300 return lduw_phys(addr);
3301 }
3302
3303 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3304 {
3305 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3306 return ldl_phys(addr);
3307 }
3308
3309 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3310 uint32_t val)
3311 {
3312 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3313 stb_phys(addr, val);
3314 }
3315
3316 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3317 uint32_t val)
3318 {
3319 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3320 stw_phys(addr, val);
3321 }
3322
3323 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3324 uint32_t val)
3325 {
3326 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3327 stl_phys(addr, val);
3328 }
3329
3330 static CPUReadMemoryFunc * const watch_mem_read[3] = {
3331 watch_mem_readb,
3332 watch_mem_readw,
3333 watch_mem_readl,
3334 };
3335
3336 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3337 watch_mem_writeb,
3338 watch_mem_writew,
3339 watch_mem_writel,
3340 };
3341
3342 static inline uint32_t subpage_readlen (subpage_t *mmio,
3343 target_phys_addr_t addr,
3344 unsigned int len)
3345 {
3346 unsigned int idx = SUBPAGE_IDX(addr);
3347 #if defined(DEBUG_SUBPAGE)
3348 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3349 mmio, len, addr, idx);
3350 #endif
3351
3352 addr += mmio->region_offset[idx];
3353 idx = mmio->sub_io_index[idx];
3354 return io_mem_read(idx, addr, 1 <<len);
3355 }
3356
3357 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3358 uint32_t value, unsigned int len)
3359 {
3360 unsigned int idx = SUBPAGE_IDX(addr);
3361 #if defined(DEBUG_SUBPAGE)
3362 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3363 __func__, mmio, len, addr, idx, value);
3364 #endif
3365
3366 addr += mmio->region_offset[idx];
3367 idx = mmio->sub_io_index[idx];
3368 io_mem_write(idx, addr, value, 1 << len);
3369 }
3370
3371 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3372 {
3373 return subpage_readlen(opaque, addr, 0);
3374 }
3375
3376 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3377 uint32_t value)
3378 {
3379 subpage_writelen(opaque, addr, value, 0);
3380 }
3381
3382 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3383 {
3384 return subpage_readlen(opaque, addr, 1);
3385 }
3386
3387 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3388 uint32_t value)
3389 {
3390 subpage_writelen(opaque, addr, value, 1);
3391 }
3392
3393 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3394 {
3395 return subpage_readlen(opaque, addr, 2);
3396 }
3397
3398 static void subpage_writel (void *opaque, target_phys_addr_t addr,
3399 uint32_t value)
3400 {
3401 subpage_writelen(opaque, addr, value, 2);
3402 }
3403
3404 static CPUReadMemoryFunc * const subpage_read[] = {
3405 &subpage_readb,
3406 &subpage_readw,
3407 &subpage_readl,
3408 };
3409
3410 static CPUWriteMemoryFunc * const subpage_write[] = {
3411 &subpage_writeb,
3412 &subpage_writew,
3413 &subpage_writel,
3414 };
3415
3416 static uint32_t subpage_ram_readb(void *opaque, target_phys_addr_t addr)
3417 {
3418 ram_addr_t raddr = addr;
3419 void *ptr = qemu_get_ram_ptr(raddr);
3420 return ldub_p(ptr);
3421 }
3422
3423 static void subpage_ram_writeb(void *opaque, target_phys_addr_t addr,
3424 uint32_t value)
3425 {
3426 ram_addr_t raddr = addr;
3427 void *ptr = qemu_get_ram_ptr(raddr);
3428 stb_p(ptr, value);
3429 }
3430
3431 static uint32_t subpage_ram_readw(void *opaque, target_phys_addr_t addr)
3432 {
3433 ram_addr_t raddr = addr;
3434 void *ptr = qemu_get_ram_ptr(raddr);
3435 return lduw_p(ptr);
3436 }
3437
3438 static void subpage_ram_writew(void *opaque, target_phys_addr_t addr,
3439 uint32_t value)
3440 {
3441 ram_addr_t raddr = addr;
3442 void *ptr = qemu_get_ram_ptr(raddr);
3443 stw_p(ptr, value);
3444 }
3445
3446 static uint32_t subpage_ram_readl(void *opaque, target_phys_addr_t addr)
3447 {
3448 ram_addr_t raddr = addr;
3449 void *ptr = qemu_get_ram_ptr(raddr);
3450 return ldl_p(ptr);
3451 }
3452
3453 static void subpage_ram_writel(void *opaque, target_phys_addr_t addr,
3454 uint32_t value)
3455 {
3456 ram_addr_t raddr = addr;
3457 void *ptr = qemu_get_ram_ptr(raddr);
3458 stl_p(ptr, value);
3459 }
3460
3461 static CPUReadMemoryFunc * const subpage_ram_read[] = {
3462 &subpage_ram_readb,
3463 &subpage_ram_readw,
3464 &subpage_ram_readl,
3465 };
3466
3467 static CPUWriteMemoryFunc * const subpage_ram_write[] = {
3468 &subpage_ram_writeb,
3469 &subpage_ram_writew,
3470 &subpage_ram_writel,
3471 };
3472
3473 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3474 ram_addr_t memory, ram_addr_t region_offset)
3475 {
3476 int idx, eidx;
3477
3478 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3479 return -1;
3480 idx = SUBPAGE_IDX(start);
3481 eidx = SUBPAGE_IDX(end);
3482 #if defined(DEBUG_SUBPAGE)
3483 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3484 mmio, start, end, idx, eidx, memory);
3485 #endif
3486 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
3487 memory = IO_MEM_SUBPAGE_RAM;
3488 }
3489 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3490 for (; idx <= eidx; idx++) {
3491 mmio->sub_io_index[idx] = memory;
3492 mmio->region_offset[idx] = region_offset;
3493 }
3494
3495 return 0;
3496 }
3497
3498 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3499 ram_addr_t orig_memory,
3500 ram_addr_t region_offset)
3501 {
3502 subpage_t *mmio;
3503 int subpage_memory;
3504
3505 mmio = g_malloc0(sizeof(subpage_t));
3506
3507 mmio->base = base;
3508 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3509 #if defined(DEBUG_SUBPAGE)
3510 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3511 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3512 #endif
3513 *phys = subpage_memory | IO_MEM_SUBPAGE;
3514 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3515
3516 return mmio;
3517 }
3518
3519 static int get_free_io_mem_idx(void)
3520 {
3521 int i;
3522
3523 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3524 if (!io_mem_used[i]) {
3525 io_mem_used[i] = 1;
3526 return i;
3527 }
3528 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3529 return -1;
3530 }
3531
3532 /* mem_read and mem_write are arrays of functions containing the
3533 function to access byte (index 0), word (index 1) and dword (index
3534 2). Functions can be omitted with a NULL function pointer.
3535 If io_index is non zero, the corresponding io zone is
3536 modified. If it is zero, a new io zone is allocated. The return
3537 value can be used with cpu_register_physical_memory(). (-1) is
3538 returned if error. */
3539 static int cpu_register_io_memory_fixed(int io_index,
3540 CPUReadMemoryFunc * const *mem_read,
3541 CPUWriteMemoryFunc * const *mem_write,
3542 void *opaque)
3543 {
3544 int i;
3545
3546 if (io_index <= 0) {
3547 io_index = get_free_io_mem_idx();
3548 if (io_index == -1)
3549 return io_index;
3550 } else {
3551 io_index >>= IO_MEM_SHIFT;
3552 if (io_index >= IO_MEM_NB_ENTRIES)
3553 return -1;
3554 }
3555
3556 for (i = 0; i < 3; ++i) {
3557 _io_mem_read[io_index][i]
3558 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3559 }
3560 for (i = 0; i < 3; ++i) {
3561 _io_mem_write[io_index][i]
3562 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3563 }
3564 io_mem_opaque[io_index] = opaque;
3565
3566 return (io_index << IO_MEM_SHIFT);
3567 }
3568
3569 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3570 CPUWriteMemoryFunc * const *mem_write,
3571 void *opaque)
3572 {
3573 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3574 }
3575
3576 void cpu_unregister_io_memory(int io_table_address)
3577 {
3578 int i;
3579 int io_index = io_table_address >> IO_MEM_SHIFT;
3580
3581 for (i=0;i < 3; i++) {
3582 _io_mem_read[io_index][i] = unassigned_mem_read[i];
3583 _io_mem_write[io_index][i] = unassigned_mem_write[i];
3584 }
3585 io_mem_opaque[io_index] = NULL;
3586 io_mem_used[io_index] = 0;
3587 }
3588
3589 static void io_mem_init(void)
3590 {
3591 int i;
3592
3593 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3594 unassigned_mem_write, NULL);
3595 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3596 unassigned_mem_write, NULL);
3597 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3598 notdirty_mem_write, NULL);
3599 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM, subpage_ram_read,
3600 subpage_ram_write, NULL);
3601 for (i=0; i<5; i++)
3602 io_mem_used[i] = 1;
3603
3604 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3605 watch_mem_write, NULL);
3606 }
3607
3608 static void memory_map_init(void)
3609 {
3610 system_memory = g_malloc(sizeof(*system_memory));
3611 memory_region_init(system_memory, "system", INT64_MAX);
3612 set_system_memory_map(system_memory);
3613
3614 system_io = g_malloc(sizeof(*system_io));
3615 memory_region_init(system_io, "io", 65536);
3616 set_system_io_map(system_io);
3617 }
3618
3619 MemoryRegion *get_system_memory(void)
3620 {
3621 return system_memory;
3622 }
3623
3624 MemoryRegion *get_system_io(void)
3625 {
3626 return system_io;
3627 }
3628
3629 #endif /* !defined(CONFIG_USER_ONLY) */
3630
3631 /* physical memory access (slow version, mainly for debug) */
3632 #if defined(CONFIG_USER_ONLY)
3633 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3634 uint8_t *buf, int len, int is_write)
3635 {
3636 int l, flags;
3637 target_ulong page;
3638 void * p;
3639
3640 while (len > 0) {
3641 page = addr & TARGET_PAGE_MASK;
3642 l = (page + TARGET_PAGE_SIZE) - addr;
3643 if (l > len)
3644 l = len;
3645 flags = page_get_flags(page);
3646 if (!(flags & PAGE_VALID))
3647 return -1;
3648 if (is_write) {
3649 if (!(flags & PAGE_WRITE))
3650 return -1;
3651 /* XXX: this code should not depend on lock_user */
3652 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3653 return -1;
3654 memcpy(p, buf, l);
3655 unlock_user(p, addr, l);
3656 } else {
3657 if (!(flags & PAGE_READ))
3658 return -1;
3659 /* XXX: this code should not depend on lock_user */
3660 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3661 return -1;
3662 memcpy(buf, p, l);
3663 unlock_user(p, addr, 0);
3664 }
3665 len -= l;
3666 buf += l;
3667 addr += l;
3668 }
3669 return 0;
3670 }
3671
3672 #else
3673 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3674 int len, int is_write)
3675 {
3676 int l, io_index;
3677 uint8_t *ptr;
3678 uint32_t val;
3679 target_phys_addr_t page;
3680 ram_addr_t pd;
3681 PhysPageDesc p;
3682
3683 while (len > 0) {
3684 page = addr & TARGET_PAGE_MASK;
3685 l = (page + TARGET_PAGE_SIZE) - addr;
3686 if (l > len)
3687 l = len;
3688 p = phys_page_find(page >> TARGET_PAGE_BITS);
3689 pd = p.phys_offset;
3690
3691 if (is_write) {
3692 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3693 target_phys_addr_t addr1;
3694 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3695 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3696 /* XXX: could force cpu_single_env to NULL to avoid
3697 potential bugs */
3698 if (l >= 4 && ((addr1 & 3) == 0)) {
3699 /* 32 bit write access */
3700 val = ldl_p(buf);
3701 io_mem_write(io_index, addr1, val, 4);
3702 l = 4;
3703 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3704 /* 16 bit write access */
3705 val = lduw_p(buf);
3706 io_mem_write(io_index, addr1, val, 2);
3707 l = 2;
3708 } else {
3709 /* 8 bit write access */
3710 val = ldub_p(buf);
3711 io_mem_write(io_index, addr1, val, 1);
3712 l = 1;
3713 }
3714 } else {
3715 ram_addr_t addr1;
3716 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3717 /* RAM case */
3718 ptr = qemu_get_ram_ptr(addr1);
3719 memcpy(ptr, buf, l);
3720 if (!cpu_physical_memory_is_dirty(addr1)) {
3721 /* invalidate code */
3722 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3723 /* set dirty bit */
3724 cpu_physical_memory_set_dirty_flags(
3725 addr1, (0xff & ~CODE_DIRTY_FLAG));
3726 }
3727 qemu_put_ram_ptr(ptr);
3728 }
3729 } else {
3730 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3731 !(pd & IO_MEM_ROMD)) {
3732 target_phys_addr_t addr1;
3733 /* I/O case */
3734 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3735 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3736 if (l >= 4 && ((addr1 & 3) == 0)) {
3737 /* 32 bit read access */
3738 val = io_mem_read(io_index, addr1, 4);
3739 stl_p(buf, val);
3740 l = 4;
3741 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3742 /* 16 bit read access */
3743 val = io_mem_read(io_index, addr1, 2);
3744 stw_p(buf, val);
3745 l = 2;
3746 } else {
3747 /* 8 bit read access */
3748 val = io_mem_read(io_index, addr1, 1);
3749 stb_p(buf, val);
3750 l = 1;
3751 }
3752 } else {
3753 /* RAM case */
3754 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3755 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3756 qemu_put_ram_ptr(ptr);
3757 }
3758 }
3759 len -= l;
3760 buf += l;
3761 addr += l;
3762 }
3763 }
3764
3765 /* used for ROM loading : can write in RAM and ROM */
3766 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3767 const uint8_t *buf, int len)
3768 {
3769 int l;
3770 uint8_t *ptr;
3771 target_phys_addr_t page;
3772 unsigned long pd;
3773 PhysPageDesc p;
3774
3775 while (len > 0) {
3776 page = addr & TARGET_PAGE_MASK;
3777 l = (page + TARGET_PAGE_SIZE) - addr;
3778 if (l > len)
3779 l = len;
3780 p = phys_page_find(page >> TARGET_PAGE_BITS);
3781 pd = p.phys_offset;
3782
3783 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3784 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3785 !(pd & IO_MEM_ROMD)) {
3786 /* do nothing */
3787 } else {
3788 unsigned long addr1;
3789 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3790 /* ROM/RAM case */
3791 ptr = qemu_get_ram_ptr(addr1);
3792 memcpy(ptr, buf, l);
3793 qemu_put_ram_ptr(ptr);
3794 }
3795 len -= l;
3796 buf += l;
3797 addr += l;
3798 }
3799 }
3800
3801 typedef struct {
3802 void *buffer;
3803 target_phys_addr_t addr;
3804 target_phys_addr_t len;
3805 } BounceBuffer;
3806
3807 static BounceBuffer bounce;
3808
3809 typedef struct MapClient {
3810 void *opaque;
3811 void (*callback)(void *opaque);
3812 QLIST_ENTRY(MapClient) link;
3813 } MapClient;
3814
3815 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3816 = QLIST_HEAD_INITIALIZER(map_client_list);
3817
3818 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3819 {
3820 MapClient *client = g_malloc(sizeof(*client));
3821
3822 client->opaque = opaque;
3823 client->callback = callback;
3824 QLIST_INSERT_HEAD(&map_client_list, client, link);
3825 return client;
3826 }
3827
3828 void cpu_unregister_map_client(void *_client)
3829 {
3830 MapClient *client = (MapClient *)_client;
3831
3832 QLIST_REMOVE(client, link);
3833 g_free(client);
3834 }
3835
3836 static void cpu_notify_map_clients(void)
3837 {
3838 MapClient *client;
3839
3840 while (!QLIST_EMPTY(&map_client_list)) {
3841 client = QLIST_FIRST(&map_client_list);
3842 client->callback(client->opaque);
3843 cpu_unregister_map_client(client);
3844 }
3845 }
3846
3847 /* Map a physical memory region into a host virtual address.
3848 * May map a subset of the requested range, given by and returned in *plen.
3849 * May return NULL if resources needed to perform the mapping are exhausted.
3850 * Use only for reads OR writes - not for read-modify-write operations.
3851 * Use cpu_register_map_client() to know when retrying the map operation is
3852 * likely to succeed.
3853 */
3854 void *cpu_physical_memory_map(target_phys_addr_t addr,
3855 target_phys_addr_t *plen,
3856 int is_write)
3857 {
3858 target_phys_addr_t len = *plen;
3859 target_phys_addr_t todo = 0;
3860 int l;
3861 target_phys_addr_t page;
3862 unsigned long pd;
3863 PhysPageDesc p;
3864 ram_addr_t raddr = RAM_ADDR_MAX;
3865 ram_addr_t rlen;
3866 void *ret;
3867
3868 while (len > 0) {
3869 page = addr & TARGET_PAGE_MASK;
3870 l = (page + TARGET_PAGE_SIZE) - addr;
3871 if (l > len)
3872 l = len;
3873 p = phys_page_find(page >> TARGET_PAGE_BITS);
3874 pd = p.phys_offset;
3875
3876 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3877 if (todo || bounce.buffer) {
3878 break;
3879 }
3880 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3881 bounce.addr = addr;
3882 bounce.len = l;
3883 if (!is_write) {
3884 cpu_physical_memory_read(addr, bounce.buffer, l);
3885 }
3886
3887 *plen = l;
3888 return bounce.buffer;
3889 }
3890 if (!todo) {
3891 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3892 }
3893
3894 len -= l;
3895 addr += l;
3896 todo += l;
3897 }
3898 rlen = todo;
3899 ret = qemu_ram_ptr_length(raddr, &rlen);
3900 *plen = rlen;
3901 return ret;
3902 }
3903
3904 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3905 * Will also mark the memory as dirty if is_write == 1. access_len gives
3906 * the amount of memory that was actually read or written by the caller.
3907 */
3908 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3909 int is_write, target_phys_addr_t access_len)
3910 {
3911 if (buffer != bounce.buffer) {
3912 if (is_write) {
3913 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3914 while (access_len) {
3915 unsigned l;
3916 l = TARGET_PAGE_SIZE;
3917 if (l > access_len)
3918 l = access_len;
3919 if (!cpu_physical_memory_is_dirty(addr1)) {
3920 /* invalidate code */
3921 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3922 /* set dirty bit */
3923 cpu_physical_memory_set_dirty_flags(
3924 addr1, (0xff & ~CODE_DIRTY_FLAG));
3925 }
3926 addr1 += l;
3927 access_len -= l;
3928 }
3929 }
3930 if (xen_enabled()) {
3931 xen_invalidate_map_cache_entry(buffer);
3932 }
3933 return;
3934 }
3935 if (is_write) {
3936 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3937 }
3938 qemu_vfree(bounce.buffer);
3939 bounce.buffer = NULL;
3940 cpu_notify_map_clients();
3941 }
3942
3943 /* warning: addr must be aligned */
3944 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3945 enum device_endian endian)
3946 {
3947 int io_index;
3948 uint8_t *ptr;
3949 uint32_t val;
3950 unsigned long pd;
3951 PhysPageDesc p;
3952
3953 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3954 pd = p.phys_offset;
3955
3956 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3957 !(pd & IO_MEM_ROMD)) {
3958 /* I/O case */
3959 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3960 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3961 val = io_mem_read(io_index, addr, 4);
3962 #if defined(TARGET_WORDS_BIGENDIAN)
3963 if (endian == DEVICE_LITTLE_ENDIAN) {
3964 val = bswap32(val);
3965 }
3966 #else
3967 if (endian == DEVICE_BIG_ENDIAN) {
3968 val = bswap32(val);
3969 }
3970 #endif
3971 } else {
3972 /* RAM case */
3973 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3974 (addr & ~TARGET_PAGE_MASK);
3975 switch (endian) {
3976 case DEVICE_LITTLE_ENDIAN:
3977 val = ldl_le_p(ptr);
3978 break;
3979 case DEVICE_BIG_ENDIAN:
3980 val = ldl_be_p(ptr);
3981 break;
3982 default:
3983 val = ldl_p(ptr);
3984 break;
3985 }
3986 }
3987 return val;
3988 }
3989
3990 uint32_t ldl_phys(target_phys_addr_t addr)
3991 {
3992 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3993 }
3994
3995 uint32_t ldl_le_phys(target_phys_addr_t addr)
3996 {
3997 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3998 }
3999
4000 uint32_t ldl_be_phys(target_phys_addr_t addr)
4001 {
4002 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4003 }
4004
4005 /* warning: addr must be aligned */
4006 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4007 enum device_endian endian)
4008 {
4009 int io_index;
4010 uint8_t *ptr;
4011 uint64_t val;
4012 unsigned long pd;
4013 PhysPageDesc p;
4014
4015 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4016 pd = p.phys_offset;
4017
4018 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4019 !(pd & IO_MEM_ROMD)) {
4020 /* I/O case */
4021 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4022 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4023
4024 /* XXX This is broken when device endian != cpu endian.
4025 Fix and add "endian" variable check */
4026 #ifdef TARGET_WORDS_BIGENDIAN
4027 val = io_mem_read(io_index, addr, 4) << 32;
4028 val |= io_mem_read(io_index, addr + 4, 4);
4029 #else
4030 val = io_mem_read(io_index, addr, 4);
4031 val |= io_mem_read(io_index, addr + 4, 4) << 32;
4032 #endif
4033 } else {
4034 /* RAM case */
4035 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4036 (addr & ~TARGET_PAGE_MASK);
4037 switch (endian) {
4038 case DEVICE_LITTLE_ENDIAN:
4039 val = ldq_le_p(ptr);
4040 break;
4041 case DEVICE_BIG_ENDIAN:
4042 val = ldq_be_p(ptr);
4043 break;
4044 default:
4045 val = ldq_p(ptr);
4046 break;
4047 }
4048 }
4049 return val;
4050 }
4051
4052 uint64_t ldq_phys(target_phys_addr_t addr)
4053 {
4054 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4055 }
4056
4057 uint64_t ldq_le_phys(target_phys_addr_t addr)
4058 {
4059 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4060 }
4061
4062 uint64_t ldq_be_phys(target_phys_addr_t addr)
4063 {
4064 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4065 }
4066
4067 /* XXX: optimize */
4068 uint32_t ldub_phys(target_phys_addr_t addr)
4069 {
4070 uint8_t val;
4071 cpu_physical_memory_read(addr, &val, 1);
4072 return val;
4073 }
4074
4075 /* warning: addr must be aligned */
4076 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4077 enum device_endian endian)
4078 {
4079 int io_index;
4080 uint8_t *ptr;
4081 uint64_t val;
4082 unsigned long pd;
4083 PhysPageDesc p;
4084
4085 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4086 pd = p.phys_offset;
4087
4088 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4089 !(pd & IO_MEM_ROMD)) {
4090 /* I/O case */
4091 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4092 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4093 val = io_mem_read(io_index, addr, 2);
4094 #if defined(TARGET_WORDS_BIGENDIAN)
4095 if (endian == DEVICE_LITTLE_ENDIAN) {
4096 val = bswap16(val);
4097 }
4098 #else
4099 if (endian == DEVICE_BIG_ENDIAN) {
4100 val = bswap16(val);
4101 }
4102 #endif
4103 } else {
4104 /* RAM case */
4105 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4106 (addr & ~TARGET_PAGE_MASK);
4107 switch (endian) {
4108 case DEVICE_LITTLE_ENDIAN:
4109 val = lduw_le_p(ptr);
4110 break;
4111 case DEVICE_BIG_ENDIAN:
4112 val = lduw_be_p(ptr);
4113 break;
4114 default:
4115 val = lduw_p(ptr);
4116 break;
4117 }
4118 }
4119 return val;
4120 }
4121
4122 uint32_t lduw_phys(target_phys_addr_t addr)
4123 {
4124 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4125 }
4126
4127 uint32_t lduw_le_phys(target_phys_addr_t addr)
4128 {
4129 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4130 }
4131
4132 uint32_t lduw_be_phys(target_phys_addr_t addr)
4133 {
4134 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4135 }
4136
4137 /* warning: addr must be aligned. The ram page is not masked as dirty
4138 and the code inside is not invalidated. It is useful if the dirty
4139 bits are used to track modified PTEs */
4140 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4141 {
4142 int io_index;
4143 uint8_t *ptr;
4144 unsigned long pd;
4145 PhysPageDesc p;
4146
4147 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4148 pd = p.phys_offset;
4149
4150 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4151 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4152 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4153 io_mem_write(io_index, addr, val, 4);
4154 } else {
4155 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4156 ptr = qemu_get_ram_ptr(addr1);
4157 stl_p(ptr, val);
4158
4159 if (unlikely(in_migration)) {
4160 if (!cpu_physical_memory_is_dirty(addr1)) {
4161 /* invalidate code */
4162 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4163 /* set dirty bit */
4164 cpu_physical_memory_set_dirty_flags(
4165 addr1, (0xff & ~CODE_DIRTY_FLAG));
4166 }
4167 }
4168 }
4169 }
4170
4171 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4172 {
4173 int io_index;
4174 uint8_t *ptr;
4175 unsigned long pd;
4176 PhysPageDesc p;
4177
4178 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4179 pd = p.phys_offset;
4180
4181 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4182 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4183 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4184 #ifdef TARGET_WORDS_BIGENDIAN
4185 io_mem_write(io_index, addr, val >> 32, 4);
4186 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
4187 #else
4188 io_mem_write(io_index, addr, (uint32_t)val, 4);
4189 io_mem_write(io_index, addr + 4, val >> 32, 4);
4190 #endif
4191 } else {
4192 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4193 (addr & ~TARGET_PAGE_MASK);
4194 stq_p(ptr, val);
4195 }
4196 }
4197
4198 /* warning: addr must be aligned */
4199 static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4200 enum device_endian endian)
4201 {
4202 int io_index;
4203 uint8_t *ptr;
4204 unsigned long pd;
4205 PhysPageDesc p;
4206
4207 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4208 pd = p.phys_offset;
4209
4210 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4211 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4212 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4213 #if defined(TARGET_WORDS_BIGENDIAN)
4214 if (endian == DEVICE_LITTLE_ENDIAN) {
4215 val = bswap32(val);
4216 }
4217 #else
4218 if (endian == DEVICE_BIG_ENDIAN) {
4219 val = bswap32(val);
4220 }
4221 #endif
4222 io_mem_write(io_index, addr, val, 4);
4223 } else {
4224 unsigned long addr1;
4225 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4226 /* RAM case */
4227 ptr = qemu_get_ram_ptr(addr1);
4228 switch (endian) {
4229 case DEVICE_LITTLE_ENDIAN:
4230 stl_le_p(ptr, val);
4231 break;
4232 case DEVICE_BIG_ENDIAN:
4233 stl_be_p(ptr, val);
4234 break;
4235 default:
4236 stl_p(ptr, val);
4237 break;
4238 }
4239 if (!cpu_physical_memory_is_dirty(addr1)) {
4240 /* invalidate code */
4241 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4242 /* set dirty bit */
4243 cpu_physical_memory_set_dirty_flags(addr1,
4244 (0xff & ~CODE_DIRTY_FLAG));
4245 }
4246 }
4247 }
4248
4249 void stl_phys(target_phys_addr_t addr, uint32_t val)
4250 {
4251 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4252 }
4253
4254 void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4255 {
4256 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4257 }
4258
4259 void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4260 {
4261 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4262 }
4263
4264 /* XXX: optimize */
4265 void stb_phys(target_phys_addr_t addr, uint32_t val)
4266 {
4267 uint8_t v = val;
4268 cpu_physical_memory_write(addr, &v, 1);
4269 }
4270
4271 /* warning: addr must be aligned */
4272 static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4273 enum device_endian endian)
4274 {
4275 int io_index;
4276 uint8_t *ptr;
4277 unsigned long pd;
4278 PhysPageDesc p;
4279
4280 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4281 pd = p.phys_offset;
4282
4283 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4284 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4285 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4286 #if defined(TARGET_WORDS_BIGENDIAN)
4287 if (endian == DEVICE_LITTLE_ENDIAN) {
4288 val = bswap16(val);
4289 }
4290 #else
4291 if (endian == DEVICE_BIG_ENDIAN) {
4292 val = bswap16(val);
4293 }
4294 #endif
4295 io_mem_write(io_index, addr, val, 2);
4296 } else {
4297 unsigned long addr1;
4298 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4299 /* RAM case */
4300 ptr = qemu_get_ram_ptr(addr1);
4301 switch (endian) {
4302 case DEVICE_LITTLE_ENDIAN:
4303 stw_le_p(ptr, val);
4304 break;
4305 case DEVICE_BIG_ENDIAN:
4306 stw_be_p(ptr, val);
4307 break;
4308 default:
4309 stw_p(ptr, val);
4310 break;
4311 }
4312 if (!cpu_physical_memory_is_dirty(addr1)) {
4313 /* invalidate code */
4314 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4315 /* set dirty bit */
4316 cpu_physical_memory_set_dirty_flags(addr1,
4317 (0xff & ~CODE_DIRTY_FLAG));
4318 }
4319 }
4320 }
4321
4322 void stw_phys(target_phys_addr_t addr, uint32_t val)
4323 {
4324 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4325 }
4326
4327 void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4328 {
4329 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4330 }
4331
4332 void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4333 {
4334 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4335 }
4336
4337 /* XXX: optimize */
4338 void stq_phys(target_phys_addr_t addr, uint64_t val)
4339 {
4340 val = tswap64(val);
4341 cpu_physical_memory_write(addr, &val, 8);
4342 }
4343
4344 void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4345 {
4346 val = cpu_to_le64(val);
4347 cpu_physical_memory_write(addr, &val, 8);
4348 }
4349
4350 void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4351 {
4352 val = cpu_to_be64(val);
4353 cpu_physical_memory_write(addr, &val, 8);
4354 }
4355
4356 /* virtual memory access for debug (includes writing to ROM) */
4357 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4358 uint8_t *buf, int len, int is_write)
4359 {
4360 int l;
4361 target_phys_addr_t phys_addr;
4362 target_ulong page;
4363
4364 while (len > 0) {
4365 page = addr & TARGET_PAGE_MASK;
4366 phys_addr = cpu_get_phys_page_debug(env, page);
4367 /* if no physical page mapped, return an error */
4368 if (phys_addr == -1)
4369 return -1;
4370 l = (page + TARGET_PAGE_SIZE) - addr;
4371 if (l > len)
4372 l = len;
4373 phys_addr += (addr & ~TARGET_PAGE_MASK);
4374 if (is_write)
4375 cpu_physical_memory_write_rom(phys_addr, buf, l);
4376 else
4377 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4378 len -= l;
4379 buf += l;
4380 addr += l;
4381 }
4382 return 0;
4383 }
4384 #endif
4385
4386 /* in deterministic execution mode, instructions doing device I/Os
4387 must be at the end of the TB */
4388 void cpu_io_recompile(CPUState *env, void *retaddr)
4389 {
4390 TranslationBlock *tb;
4391 uint32_t n, cflags;
4392 target_ulong pc, cs_base;
4393 uint64_t flags;
4394
4395 tb = tb_find_pc((unsigned long)retaddr);
4396 if (!tb) {
4397 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4398 retaddr);
4399 }
4400 n = env->icount_decr.u16.low + tb->icount;
4401 cpu_restore_state(tb, env, (unsigned long)retaddr);
4402 /* Calculate how many instructions had been executed before the fault
4403 occurred. */
4404 n = n - env->icount_decr.u16.low;
4405 /* Generate a new TB ending on the I/O insn. */
4406 n++;
4407 /* On MIPS and SH, delay slot instructions can only be restarted if
4408 they were already the first instruction in the TB. If this is not
4409 the first instruction in a TB then re-execute the preceding
4410 branch. */
4411 #if defined(TARGET_MIPS)
4412 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4413 env->active_tc.PC -= 4;
4414 env->icount_decr.u16.low++;
4415 env->hflags &= ~MIPS_HFLAG_BMASK;
4416 }
4417 #elif defined(TARGET_SH4)
4418 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4419 && n > 1) {
4420 env->pc -= 2;
4421 env->icount_decr.u16.low++;
4422 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4423 }
4424 #endif
4425 /* This should never happen. */
4426 if (n > CF_COUNT_MASK)
4427 cpu_abort(env, "TB too big during recompile");
4428
4429 cflags = n | CF_LAST_IO;
4430 pc = tb->pc;
4431 cs_base = tb->cs_base;
4432 flags = tb->flags;
4433 tb_phys_invalidate(tb, -1);
4434 /* FIXME: In theory this could raise an exception. In practice
4435 we have already translated the block once so it's probably ok. */
4436 tb_gen_code(env, pc, cs_base, flags, cflags);
4437 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4438 the first in the TB) then we end up generating a whole new TB and
4439 repeating the fault, which is horribly inefficient.
4440 Better would be to execute just this insn uncached, or generate a
4441 second new TB. */
4442 cpu_resume_from_signal(env, NULL);
4443 }
4444
4445 #if !defined(CONFIG_USER_ONLY)
4446
4447 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4448 {
4449 int i, target_code_size, max_target_code_size;
4450 int direct_jmp_count, direct_jmp2_count, cross_page;
4451 TranslationBlock *tb;
4452
4453 target_code_size = 0;
4454 max_target_code_size = 0;
4455 cross_page = 0;
4456 direct_jmp_count = 0;
4457 direct_jmp2_count = 0;
4458 for(i = 0; i < nb_tbs; i++) {
4459 tb = &tbs[i];
4460 target_code_size += tb->size;
4461 if (tb->size > max_target_code_size)
4462 max_target_code_size = tb->size;
4463 if (tb->page_addr[1] != -1)
4464 cross_page++;
4465 if (tb->tb_next_offset[0] != 0xffff) {
4466 direct_jmp_count++;
4467 if (tb->tb_next_offset[1] != 0xffff) {
4468 direct_jmp2_count++;
4469 }
4470 }
4471 }
4472 /* XXX: avoid using doubles ? */
4473 cpu_fprintf(f, "Translation buffer state:\n");
4474 cpu_fprintf(f, "gen code size %td/%ld\n",
4475 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4476 cpu_fprintf(f, "TB count %d/%d\n",
4477 nb_tbs, code_gen_max_blocks);
4478 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4479 nb_tbs ? target_code_size / nb_tbs : 0,
4480 max_target_code_size);
4481 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4482 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4483 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4484 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4485 cross_page,
4486 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4487 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4488 direct_jmp_count,
4489 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4490 direct_jmp2_count,
4491 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4492 cpu_fprintf(f, "\nStatistics:\n");
4493 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4494 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4495 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4496 tcg_dump_info(f, cpu_fprintf);
4497 }
4498
4499 #define MMUSUFFIX _cmmu
4500 #undef GETPC
4501 #define GETPC() NULL
4502 #define env cpu_single_env
4503 #define SOFTMMU_CODE_ACCESS
4504
4505 #define SHIFT 0
4506 #include "softmmu_template.h"
4507
4508 #define SHIFT 1
4509 #include "softmmu_template.h"
4510
4511 #define SHIFT 2
4512 #include "softmmu_template.h"
4513
4514 #define SHIFT 3
4515 #include "softmmu_template.h"
4516
4517 #undef env
4518
4519 #endif