]> git.proxmox.com Git - qemu.git/blob - exec.c
memory: unify phys_map last level with intermediate levels
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "tcg.h"
30 #include "hw/hw.h"
31 #include "hw/qdev.h"
32 #include "osdep.h"
33 #include "kvm.h"
34 #include "hw/xen.h"
35 #include "qemu-timer.h"
36 #include "memory.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
39 #include <qemu.h>
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
45 #include <sys/time.h>
46 #include <sys/proc.h>
47 #include <machine/profile.h>
48 #define _KERNEL
49 #include <sys/user.h>
50 #undef _KERNEL
51 #undef sigqueue
52 #include <libutil.h>
53 #endif
54 #endif
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
57 #include "trace.h"
58 #endif
59
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
62
63 //#define DEBUG_TB_INVALIDATE
64 //#define DEBUG_FLUSH
65 //#define DEBUG_TLB
66 //#define DEBUG_UNASSIGNED
67
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
71
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
74
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
77 #undef DEBUG_TB_CHECK
78 #endif
79
80 #define SMC_BITMAP_USE_THRESHOLD 10
81
82 static TranslationBlock *tbs;
83 static int code_gen_max_blocks;
84 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 static int nb_tbs;
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
96 #elif defined(_WIN32)
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
100 #else
101 #define code_gen_section \
102 __attribute__((aligned (32)))
103 #endif
104
105 uint8_t code_gen_prologue[1024] code_gen_section;
106 static uint8_t *code_gen_buffer;
107 static unsigned long code_gen_buffer_size;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size;
110 static uint8_t *code_gen_ptr;
111
112 #if !defined(CONFIG_USER_ONLY)
113 int phys_ram_fd;
114 static int in_migration;
115
116 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
117
118 static MemoryRegion *system_memory;
119 static MemoryRegion *system_io;
120
121 MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
122 static MemoryRegion io_mem_subpage_ram;
123
124 #endif
125
126 CPUState *first_cpu;
127 /* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
129 DEFINE_TLS(CPUState *,cpu_single_env);
130 /* 0 = Do not count executed instructions.
131 1 = Precise instruction counting.
132 2 = Adaptive rate instruction counting. */
133 int use_icount = 0;
134
135 typedef struct PageDesc {
136 /* list of TBs intersecting this ram page */
137 TranslationBlock *first_tb;
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142 #if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144 #endif
145 } PageDesc;
146
147 /* In system mode we want L1_MAP to be based on ram offsets,
148 while in user mode we want it to be based on virtual addresses. */
149 #if !defined(CONFIG_USER_ONLY)
150 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152 #else
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
154 #endif
155 #else
156 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
157 #endif
158
159 /* Size of the L2 (and L3, etc) page tables. */
160 #define L2_BITS 10
161 #define L2_SIZE (1 << L2_BITS)
162
163 #define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
166 /* The bits remaining after N lower levels of page tables. */
167 #define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
170 #if V_L1_BITS_REM < 4
171 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172 #else
173 #define V_L1_BITS V_L1_BITS_REM
174 #endif
175
176 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
178 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
180 unsigned long qemu_real_host_page_size;
181 unsigned long qemu_host_page_size;
182 unsigned long qemu_host_page_mask;
183
184 /* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186 static void *l1_map[V_L1_SIZE];
187
188 #if !defined(CONFIG_USER_ONLY)
189 typedef struct PhysPageDesc {
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset;
192 ram_addr_t region_offset;
193 } PhysPageDesc;
194
195 typedef struct PhysPageEntry PhysPageEntry;
196
197 struct PhysPageEntry {
198 union {
199 PhysPageDesc leaf;
200 PhysPageEntry *node;
201 } u;
202 };
203
204 /* This is a multi-level map on the physical address space.
205 The bottom level has pointers to PhysPageDesc. */
206 static PhysPageEntry phys_map;
207
208 static void io_mem_init(void);
209 static void memory_map_init(void);
210
211 /* io memory support */
212 MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
213 static char io_mem_used[IO_MEM_NB_ENTRIES];
214 static MemoryRegion io_mem_watch;
215 #endif
216
217 /* log support */
218 #ifdef WIN32
219 static const char *logfilename = "qemu.log";
220 #else
221 static const char *logfilename = "/tmp/qemu.log";
222 #endif
223 FILE *logfile;
224 int loglevel;
225 static int log_append = 0;
226
227 /* statistics */
228 #if !defined(CONFIG_USER_ONLY)
229 static int tlb_flush_count;
230 #endif
231 static int tb_flush_count;
232 static int tb_phys_invalidate_count;
233
234 #ifdef _WIN32
235 static void map_exec(void *addr, long size)
236 {
237 DWORD old_protect;
238 VirtualProtect(addr, size,
239 PAGE_EXECUTE_READWRITE, &old_protect);
240
241 }
242 #else
243 static void map_exec(void *addr, long size)
244 {
245 unsigned long start, end, page_size;
246
247 page_size = getpagesize();
248 start = (unsigned long)addr;
249 start &= ~(page_size - 1);
250
251 end = (unsigned long)addr + size;
252 end += page_size - 1;
253 end &= ~(page_size - 1);
254
255 mprotect((void *)start, end - start,
256 PROT_READ | PROT_WRITE | PROT_EXEC);
257 }
258 #endif
259
260 static void page_init(void)
261 {
262 /* NOTE: we can always suppose that qemu_host_page_size >=
263 TARGET_PAGE_SIZE */
264 #ifdef _WIN32
265 {
266 SYSTEM_INFO system_info;
267
268 GetSystemInfo(&system_info);
269 qemu_real_host_page_size = system_info.dwPageSize;
270 }
271 #else
272 qemu_real_host_page_size = getpagesize();
273 #endif
274 if (qemu_host_page_size == 0)
275 qemu_host_page_size = qemu_real_host_page_size;
276 if (qemu_host_page_size < TARGET_PAGE_SIZE)
277 qemu_host_page_size = TARGET_PAGE_SIZE;
278 qemu_host_page_mask = ~(qemu_host_page_size - 1);
279
280 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
281 {
282 #ifdef HAVE_KINFO_GETVMMAP
283 struct kinfo_vmentry *freep;
284 int i, cnt;
285
286 freep = kinfo_getvmmap(getpid(), &cnt);
287 if (freep) {
288 mmap_lock();
289 for (i = 0; i < cnt; i++) {
290 unsigned long startaddr, endaddr;
291
292 startaddr = freep[i].kve_start;
293 endaddr = freep[i].kve_end;
294 if (h2g_valid(startaddr)) {
295 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
296
297 if (h2g_valid(endaddr)) {
298 endaddr = h2g(endaddr);
299 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
300 } else {
301 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
302 endaddr = ~0ul;
303 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
304 #endif
305 }
306 }
307 }
308 free(freep);
309 mmap_unlock();
310 }
311 #else
312 FILE *f;
313
314 last_brk = (unsigned long)sbrk(0);
315
316 f = fopen("/compat/linux/proc/self/maps", "r");
317 if (f) {
318 mmap_lock();
319
320 do {
321 unsigned long startaddr, endaddr;
322 int n;
323
324 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
325
326 if (n == 2 && h2g_valid(startaddr)) {
327 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
328
329 if (h2g_valid(endaddr)) {
330 endaddr = h2g(endaddr);
331 } else {
332 endaddr = ~0ul;
333 }
334 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
335 }
336 } while (!feof(f));
337
338 fclose(f);
339 mmap_unlock();
340 }
341 #endif
342 }
343 #endif
344 }
345
346 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
347 {
348 PageDesc *pd;
349 void **lp;
350 int i;
351
352 #if defined(CONFIG_USER_ONLY)
353 /* We can't use g_malloc because it may recurse into a locked mutex. */
354 # define ALLOC(P, SIZE) \
355 do { \
356 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
357 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
358 } while (0)
359 #else
360 # define ALLOC(P, SIZE) \
361 do { P = g_malloc0(SIZE); } while (0)
362 #endif
363
364 /* Level 1. Always allocated. */
365 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
366
367 /* Level 2..N-1. */
368 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
369 void **p = *lp;
370
371 if (p == NULL) {
372 if (!alloc) {
373 return NULL;
374 }
375 ALLOC(p, sizeof(void *) * L2_SIZE);
376 *lp = p;
377 }
378
379 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
380 }
381
382 pd = *lp;
383 if (pd == NULL) {
384 if (!alloc) {
385 return NULL;
386 }
387 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
388 *lp = pd;
389 }
390
391 #undef ALLOC
392
393 return pd + (index & (L2_SIZE - 1));
394 }
395
396 static inline PageDesc *page_find(tb_page_addr_t index)
397 {
398 return page_find_alloc(index, 0);
399 }
400
401 #if !defined(CONFIG_USER_ONLY)
402 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
403 {
404 PhysPageEntry *lp, *p;
405 int i, j;
406
407 lp = &phys_map;
408
409 /* Level 1..N. */
410 for (i = P_L2_LEVELS - 1; i >= 0; i--) {
411 if (lp->u.node == NULL) {
412 if (!alloc) {
413 return NULL;
414 }
415 lp->u.node = p = g_malloc0(sizeof(PhysPageEntry) * L2_SIZE);
416 if (i == 0) {
417 int first_index = index & ~(L2_SIZE - 1);
418 for (j = 0; j < L2_SIZE; j++) {
419 p[j].u.leaf.phys_offset = io_mem_unassigned.ram_addr;
420 p[j].u.leaf.region_offset
421 = (first_index + j) << TARGET_PAGE_BITS;
422 }
423 }
424 }
425 lp = &lp->u.node[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
426 }
427
428 return &lp->u.leaf;
429 }
430
431 static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
432 {
433 PhysPageDesc *p = phys_page_find_alloc(index, 0);
434
435 if (p) {
436 return *p;
437 } else {
438 return (PhysPageDesc) {
439 .phys_offset = io_mem_unassigned.ram_addr,
440 .region_offset = index << TARGET_PAGE_BITS,
441 };
442 }
443 }
444
445 static void tlb_protect_code(ram_addr_t ram_addr);
446 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
447 target_ulong vaddr);
448 #define mmap_lock() do { } while(0)
449 #define mmap_unlock() do { } while(0)
450 #endif
451
452 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
453
454 #if defined(CONFIG_USER_ONLY)
455 /* Currently it is not recommended to allocate big chunks of data in
456 user mode. It will change when a dedicated libc will be used */
457 #define USE_STATIC_CODE_GEN_BUFFER
458 #endif
459
460 #ifdef USE_STATIC_CODE_GEN_BUFFER
461 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
462 __attribute__((aligned (CODE_GEN_ALIGN)));
463 #endif
464
465 static void code_gen_alloc(unsigned long tb_size)
466 {
467 #ifdef USE_STATIC_CODE_GEN_BUFFER
468 code_gen_buffer = static_code_gen_buffer;
469 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
470 map_exec(code_gen_buffer, code_gen_buffer_size);
471 #else
472 code_gen_buffer_size = tb_size;
473 if (code_gen_buffer_size == 0) {
474 #if defined(CONFIG_USER_ONLY)
475 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
476 #else
477 /* XXX: needs adjustments */
478 code_gen_buffer_size = (unsigned long)(ram_size / 4);
479 #endif
480 }
481 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
482 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
483 /* The code gen buffer location may have constraints depending on
484 the host cpu and OS */
485 #if defined(__linux__)
486 {
487 int flags;
488 void *start = NULL;
489
490 flags = MAP_PRIVATE | MAP_ANONYMOUS;
491 #if defined(__x86_64__)
492 flags |= MAP_32BIT;
493 /* Cannot map more than that */
494 if (code_gen_buffer_size > (800 * 1024 * 1024))
495 code_gen_buffer_size = (800 * 1024 * 1024);
496 #elif defined(__sparc_v9__)
497 // Map the buffer below 2G, so we can use direct calls and branches
498 flags |= MAP_FIXED;
499 start = (void *) 0x60000000UL;
500 if (code_gen_buffer_size > (512 * 1024 * 1024))
501 code_gen_buffer_size = (512 * 1024 * 1024);
502 #elif defined(__arm__)
503 /* Keep the buffer no bigger than 16MB to branch between blocks */
504 if (code_gen_buffer_size > 16 * 1024 * 1024)
505 code_gen_buffer_size = 16 * 1024 * 1024;
506 #elif defined(__s390x__)
507 /* Map the buffer so that we can use direct calls and branches. */
508 /* We have a +- 4GB range on the branches; leave some slop. */
509 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
510 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
511 }
512 start = (void *)0x90000000UL;
513 #endif
514 code_gen_buffer = mmap(start, code_gen_buffer_size,
515 PROT_WRITE | PROT_READ | PROT_EXEC,
516 flags, -1, 0);
517 if (code_gen_buffer == MAP_FAILED) {
518 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
519 exit(1);
520 }
521 }
522 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
523 || defined(__DragonFly__) || defined(__OpenBSD__) \
524 || defined(__NetBSD__)
525 {
526 int flags;
527 void *addr = NULL;
528 flags = MAP_PRIVATE | MAP_ANONYMOUS;
529 #if defined(__x86_64__)
530 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
531 * 0x40000000 is free */
532 flags |= MAP_FIXED;
533 addr = (void *)0x40000000;
534 /* Cannot map more than that */
535 if (code_gen_buffer_size > (800 * 1024 * 1024))
536 code_gen_buffer_size = (800 * 1024 * 1024);
537 #elif defined(__sparc_v9__)
538 // Map the buffer below 2G, so we can use direct calls and branches
539 flags |= MAP_FIXED;
540 addr = (void *) 0x60000000UL;
541 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
542 code_gen_buffer_size = (512 * 1024 * 1024);
543 }
544 #endif
545 code_gen_buffer = mmap(addr, code_gen_buffer_size,
546 PROT_WRITE | PROT_READ | PROT_EXEC,
547 flags, -1, 0);
548 if (code_gen_buffer == MAP_FAILED) {
549 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
550 exit(1);
551 }
552 }
553 #else
554 code_gen_buffer = g_malloc(code_gen_buffer_size);
555 map_exec(code_gen_buffer, code_gen_buffer_size);
556 #endif
557 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
558 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
559 code_gen_buffer_max_size = code_gen_buffer_size -
560 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
561 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
562 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
563 }
564
565 /* Must be called before using the QEMU cpus. 'tb_size' is the size
566 (in bytes) allocated to the translation buffer. Zero means default
567 size. */
568 void tcg_exec_init(unsigned long tb_size)
569 {
570 cpu_gen_init();
571 code_gen_alloc(tb_size);
572 code_gen_ptr = code_gen_buffer;
573 page_init();
574 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575 /* There's no guest base to take into account, so go ahead and
576 initialize the prologue now. */
577 tcg_prologue_init(&tcg_ctx);
578 #endif
579 }
580
581 bool tcg_enabled(void)
582 {
583 return code_gen_buffer != NULL;
584 }
585
586 void cpu_exec_init_all(void)
587 {
588 #if !defined(CONFIG_USER_ONLY)
589 memory_map_init();
590 io_mem_init();
591 #endif
592 }
593
594 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
595
596 static int cpu_common_post_load(void *opaque, int version_id)
597 {
598 CPUState *env = opaque;
599
600 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
601 version_id is increased. */
602 env->interrupt_request &= ~0x01;
603 tlb_flush(env, 1);
604
605 return 0;
606 }
607
608 static const VMStateDescription vmstate_cpu_common = {
609 .name = "cpu_common",
610 .version_id = 1,
611 .minimum_version_id = 1,
612 .minimum_version_id_old = 1,
613 .post_load = cpu_common_post_load,
614 .fields = (VMStateField []) {
615 VMSTATE_UINT32(halted, CPUState),
616 VMSTATE_UINT32(interrupt_request, CPUState),
617 VMSTATE_END_OF_LIST()
618 }
619 };
620 #endif
621
622 CPUState *qemu_get_cpu(int cpu)
623 {
624 CPUState *env = first_cpu;
625
626 while (env) {
627 if (env->cpu_index == cpu)
628 break;
629 env = env->next_cpu;
630 }
631
632 return env;
633 }
634
635 void cpu_exec_init(CPUState *env)
636 {
637 CPUState **penv;
638 int cpu_index;
639
640 #if defined(CONFIG_USER_ONLY)
641 cpu_list_lock();
642 #endif
643 env->next_cpu = NULL;
644 penv = &first_cpu;
645 cpu_index = 0;
646 while (*penv != NULL) {
647 penv = &(*penv)->next_cpu;
648 cpu_index++;
649 }
650 env->cpu_index = cpu_index;
651 env->numa_node = 0;
652 QTAILQ_INIT(&env->breakpoints);
653 QTAILQ_INIT(&env->watchpoints);
654 #ifndef CONFIG_USER_ONLY
655 env->thread_id = qemu_get_thread_id();
656 #endif
657 *penv = env;
658 #if defined(CONFIG_USER_ONLY)
659 cpu_list_unlock();
660 #endif
661 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
662 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
663 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
664 cpu_save, cpu_load, env);
665 #endif
666 }
667
668 /* Allocate a new translation block. Flush the translation buffer if
669 too many translation blocks or too much generated code. */
670 static TranslationBlock *tb_alloc(target_ulong pc)
671 {
672 TranslationBlock *tb;
673
674 if (nb_tbs >= code_gen_max_blocks ||
675 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
676 return NULL;
677 tb = &tbs[nb_tbs++];
678 tb->pc = pc;
679 tb->cflags = 0;
680 return tb;
681 }
682
683 void tb_free(TranslationBlock *tb)
684 {
685 /* In practice this is mostly used for single use temporary TB
686 Ignore the hard cases and just back up if this TB happens to
687 be the last one generated. */
688 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
689 code_gen_ptr = tb->tc_ptr;
690 nb_tbs--;
691 }
692 }
693
694 static inline void invalidate_page_bitmap(PageDesc *p)
695 {
696 if (p->code_bitmap) {
697 g_free(p->code_bitmap);
698 p->code_bitmap = NULL;
699 }
700 p->code_write_count = 0;
701 }
702
703 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
704
705 static void page_flush_tb_1 (int level, void **lp)
706 {
707 int i;
708
709 if (*lp == NULL) {
710 return;
711 }
712 if (level == 0) {
713 PageDesc *pd = *lp;
714 for (i = 0; i < L2_SIZE; ++i) {
715 pd[i].first_tb = NULL;
716 invalidate_page_bitmap(pd + i);
717 }
718 } else {
719 void **pp = *lp;
720 for (i = 0; i < L2_SIZE; ++i) {
721 page_flush_tb_1 (level - 1, pp + i);
722 }
723 }
724 }
725
726 static void page_flush_tb(void)
727 {
728 int i;
729 for (i = 0; i < V_L1_SIZE; i++) {
730 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
731 }
732 }
733
734 /* flush all the translation blocks */
735 /* XXX: tb_flush is currently not thread safe */
736 void tb_flush(CPUState *env1)
737 {
738 CPUState *env;
739 #if defined(DEBUG_FLUSH)
740 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
741 (unsigned long)(code_gen_ptr - code_gen_buffer),
742 nb_tbs, nb_tbs > 0 ?
743 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
744 #endif
745 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
746 cpu_abort(env1, "Internal error: code buffer overflow\n");
747
748 nb_tbs = 0;
749
750 for(env = first_cpu; env != NULL; env = env->next_cpu) {
751 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
752 }
753
754 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
755 page_flush_tb();
756
757 code_gen_ptr = code_gen_buffer;
758 /* XXX: flush processor icache at this point if cache flush is
759 expensive */
760 tb_flush_count++;
761 }
762
763 #ifdef DEBUG_TB_CHECK
764
765 static void tb_invalidate_check(target_ulong address)
766 {
767 TranslationBlock *tb;
768 int i;
769 address &= TARGET_PAGE_MASK;
770 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
771 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
772 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
773 address >= tb->pc + tb->size)) {
774 printf("ERROR invalidate: address=" TARGET_FMT_lx
775 " PC=%08lx size=%04x\n",
776 address, (long)tb->pc, tb->size);
777 }
778 }
779 }
780 }
781
782 /* verify that all the pages have correct rights for code */
783 static void tb_page_check(void)
784 {
785 TranslationBlock *tb;
786 int i, flags1, flags2;
787
788 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
789 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
790 flags1 = page_get_flags(tb->pc);
791 flags2 = page_get_flags(tb->pc + tb->size - 1);
792 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
793 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
794 (long)tb->pc, tb->size, flags1, flags2);
795 }
796 }
797 }
798 }
799
800 #endif
801
802 /* invalidate one TB */
803 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
804 int next_offset)
805 {
806 TranslationBlock *tb1;
807 for(;;) {
808 tb1 = *ptb;
809 if (tb1 == tb) {
810 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
811 break;
812 }
813 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
814 }
815 }
816
817 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
818 {
819 TranslationBlock *tb1;
820 unsigned int n1;
821
822 for(;;) {
823 tb1 = *ptb;
824 n1 = (long)tb1 & 3;
825 tb1 = (TranslationBlock *)((long)tb1 & ~3);
826 if (tb1 == tb) {
827 *ptb = tb1->page_next[n1];
828 break;
829 }
830 ptb = &tb1->page_next[n1];
831 }
832 }
833
834 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
835 {
836 TranslationBlock *tb1, **ptb;
837 unsigned int n1;
838
839 ptb = &tb->jmp_next[n];
840 tb1 = *ptb;
841 if (tb1) {
842 /* find tb(n) in circular list */
843 for(;;) {
844 tb1 = *ptb;
845 n1 = (long)tb1 & 3;
846 tb1 = (TranslationBlock *)((long)tb1 & ~3);
847 if (n1 == n && tb1 == tb)
848 break;
849 if (n1 == 2) {
850 ptb = &tb1->jmp_first;
851 } else {
852 ptb = &tb1->jmp_next[n1];
853 }
854 }
855 /* now we can suppress tb(n) from the list */
856 *ptb = tb->jmp_next[n];
857
858 tb->jmp_next[n] = NULL;
859 }
860 }
861
862 /* reset the jump entry 'n' of a TB so that it is not chained to
863 another TB */
864 static inline void tb_reset_jump(TranslationBlock *tb, int n)
865 {
866 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
867 }
868
869 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
870 {
871 CPUState *env;
872 PageDesc *p;
873 unsigned int h, n1;
874 tb_page_addr_t phys_pc;
875 TranslationBlock *tb1, *tb2;
876
877 /* remove the TB from the hash list */
878 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
879 h = tb_phys_hash_func(phys_pc);
880 tb_remove(&tb_phys_hash[h], tb,
881 offsetof(TranslationBlock, phys_hash_next));
882
883 /* remove the TB from the page list */
884 if (tb->page_addr[0] != page_addr) {
885 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
886 tb_page_remove(&p->first_tb, tb);
887 invalidate_page_bitmap(p);
888 }
889 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
890 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
891 tb_page_remove(&p->first_tb, tb);
892 invalidate_page_bitmap(p);
893 }
894
895 tb_invalidated_flag = 1;
896
897 /* remove the TB from the hash list */
898 h = tb_jmp_cache_hash_func(tb->pc);
899 for(env = first_cpu; env != NULL; env = env->next_cpu) {
900 if (env->tb_jmp_cache[h] == tb)
901 env->tb_jmp_cache[h] = NULL;
902 }
903
904 /* suppress this TB from the two jump lists */
905 tb_jmp_remove(tb, 0);
906 tb_jmp_remove(tb, 1);
907
908 /* suppress any remaining jumps to this TB */
909 tb1 = tb->jmp_first;
910 for(;;) {
911 n1 = (long)tb1 & 3;
912 if (n1 == 2)
913 break;
914 tb1 = (TranslationBlock *)((long)tb1 & ~3);
915 tb2 = tb1->jmp_next[n1];
916 tb_reset_jump(tb1, n1);
917 tb1->jmp_next[n1] = NULL;
918 tb1 = tb2;
919 }
920 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
921
922 tb_phys_invalidate_count++;
923 }
924
925 static inline void set_bits(uint8_t *tab, int start, int len)
926 {
927 int end, mask, end1;
928
929 end = start + len;
930 tab += start >> 3;
931 mask = 0xff << (start & 7);
932 if ((start & ~7) == (end & ~7)) {
933 if (start < end) {
934 mask &= ~(0xff << (end & 7));
935 *tab |= mask;
936 }
937 } else {
938 *tab++ |= mask;
939 start = (start + 8) & ~7;
940 end1 = end & ~7;
941 while (start < end1) {
942 *tab++ = 0xff;
943 start += 8;
944 }
945 if (start < end) {
946 mask = ~(0xff << (end & 7));
947 *tab |= mask;
948 }
949 }
950 }
951
952 static void build_page_bitmap(PageDesc *p)
953 {
954 int n, tb_start, tb_end;
955 TranslationBlock *tb;
956
957 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
958
959 tb = p->first_tb;
960 while (tb != NULL) {
961 n = (long)tb & 3;
962 tb = (TranslationBlock *)((long)tb & ~3);
963 /* NOTE: this is subtle as a TB may span two physical pages */
964 if (n == 0) {
965 /* NOTE: tb_end may be after the end of the page, but
966 it is not a problem */
967 tb_start = tb->pc & ~TARGET_PAGE_MASK;
968 tb_end = tb_start + tb->size;
969 if (tb_end > TARGET_PAGE_SIZE)
970 tb_end = TARGET_PAGE_SIZE;
971 } else {
972 tb_start = 0;
973 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
974 }
975 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
976 tb = tb->page_next[n];
977 }
978 }
979
980 TranslationBlock *tb_gen_code(CPUState *env,
981 target_ulong pc, target_ulong cs_base,
982 int flags, int cflags)
983 {
984 TranslationBlock *tb;
985 uint8_t *tc_ptr;
986 tb_page_addr_t phys_pc, phys_page2;
987 target_ulong virt_page2;
988 int code_gen_size;
989
990 phys_pc = get_page_addr_code(env, pc);
991 tb = tb_alloc(pc);
992 if (!tb) {
993 /* flush must be done */
994 tb_flush(env);
995 /* cannot fail at this point */
996 tb = tb_alloc(pc);
997 /* Don't forget to invalidate previous TB info. */
998 tb_invalidated_flag = 1;
999 }
1000 tc_ptr = code_gen_ptr;
1001 tb->tc_ptr = tc_ptr;
1002 tb->cs_base = cs_base;
1003 tb->flags = flags;
1004 tb->cflags = cflags;
1005 cpu_gen_code(env, tb, &code_gen_size);
1006 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1007
1008 /* check next page if needed */
1009 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1010 phys_page2 = -1;
1011 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1012 phys_page2 = get_page_addr_code(env, virt_page2);
1013 }
1014 tb_link_page(tb, phys_pc, phys_page2);
1015 return tb;
1016 }
1017
1018 /* invalidate all TBs which intersect with the target physical page
1019 starting in range [start;end[. NOTE: start and end must refer to
1020 the same physical page. 'is_cpu_write_access' should be true if called
1021 from a real cpu write access: the virtual CPU will exit the current
1022 TB if code is modified inside this TB. */
1023 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1024 int is_cpu_write_access)
1025 {
1026 TranslationBlock *tb, *tb_next, *saved_tb;
1027 CPUState *env = cpu_single_env;
1028 tb_page_addr_t tb_start, tb_end;
1029 PageDesc *p;
1030 int n;
1031 #ifdef TARGET_HAS_PRECISE_SMC
1032 int current_tb_not_found = is_cpu_write_access;
1033 TranslationBlock *current_tb = NULL;
1034 int current_tb_modified = 0;
1035 target_ulong current_pc = 0;
1036 target_ulong current_cs_base = 0;
1037 int current_flags = 0;
1038 #endif /* TARGET_HAS_PRECISE_SMC */
1039
1040 p = page_find(start >> TARGET_PAGE_BITS);
1041 if (!p)
1042 return;
1043 if (!p->code_bitmap &&
1044 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1045 is_cpu_write_access) {
1046 /* build code bitmap */
1047 build_page_bitmap(p);
1048 }
1049
1050 /* we remove all the TBs in the range [start, end[ */
1051 /* XXX: see if in some cases it could be faster to invalidate all the code */
1052 tb = p->first_tb;
1053 while (tb != NULL) {
1054 n = (long)tb & 3;
1055 tb = (TranslationBlock *)((long)tb & ~3);
1056 tb_next = tb->page_next[n];
1057 /* NOTE: this is subtle as a TB may span two physical pages */
1058 if (n == 0) {
1059 /* NOTE: tb_end may be after the end of the page, but
1060 it is not a problem */
1061 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1062 tb_end = tb_start + tb->size;
1063 } else {
1064 tb_start = tb->page_addr[1];
1065 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1066 }
1067 if (!(tb_end <= start || tb_start >= end)) {
1068 #ifdef TARGET_HAS_PRECISE_SMC
1069 if (current_tb_not_found) {
1070 current_tb_not_found = 0;
1071 current_tb = NULL;
1072 if (env->mem_io_pc) {
1073 /* now we have a real cpu fault */
1074 current_tb = tb_find_pc(env->mem_io_pc);
1075 }
1076 }
1077 if (current_tb == tb &&
1078 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1079 /* If we are modifying the current TB, we must stop
1080 its execution. We could be more precise by checking
1081 that the modification is after the current PC, but it
1082 would require a specialized function to partially
1083 restore the CPU state */
1084
1085 current_tb_modified = 1;
1086 cpu_restore_state(current_tb, env, env->mem_io_pc);
1087 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1088 &current_flags);
1089 }
1090 #endif /* TARGET_HAS_PRECISE_SMC */
1091 /* we need to do that to handle the case where a signal
1092 occurs while doing tb_phys_invalidate() */
1093 saved_tb = NULL;
1094 if (env) {
1095 saved_tb = env->current_tb;
1096 env->current_tb = NULL;
1097 }
1098 tb_phys_invalidate(tb, -1);
1099 if (env) {
1100 env->current_tb = saved_tb;
1101 if (env->interrupt_request && env->current_tb)
1102 cpu_interrupt(env, env->interrupt_request);
1103 }
1104 }
1105 tb = tb_next;
1106 }
1107 #if !defined(CONFIG_USER_ONLY)
1108 /* if no code remaining, no need to continue to use slow writes */
1109 if (!p->first_tb) {
1110 invalidate_page_bitmap(p);
1111 if (is_cpu_write_access) {
1112 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1113 }
1114 }
1115 #endif
1116 #ifdef TARGET_HAS_PRECISE_SMC
1117 if (current_tb_modified) {
1118 /* we generate a block containing just the instruction
1119 modifying the memory. It will ensure that it cannot modify
1120 itself */
1121 env->current_tb = NULL;
1122 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1123 cpu_resume_from_signal(env, NULL);
1124 }
1125 #endif
1126 }
1127
1128 /* len must be <= 8 and start must be a multiple of len */
1129 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1130 {
1131 PageDesc *p;
1132 int offset, b;
1133 #if 0
1134 if (1) {
1135 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1136 cpu_single_env->mem_io_vaddr, len,
1137 cpu_single_env->eip,
1138 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1139 }
1140 #endif
1141 p = page_find(start >> TARGET_PAGE_BITS);
1142 if (!p)
1143 return;
1144 if (p->code_bitmap) {
1145 offset = start & ~TARGET_PAGE_MASK;
1146 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1147 if (b & ((1 << len) - 1))
1148 goto do_invalidate;
1149 } else {
1150 do_invalidate:
1151 tb_invalidate_phys_page_range(start, start + len, 1);
1152 }
1153 }
1154
1155 #if !defined(CONFIG_SOFTMMU)
1156 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1157 unsigned long pc, void *puc)
1158 {
1159 TranslationBlock *tb;
1160 PageDesc *p;
1161 int n;
1162 #ifdef TARGET_HAS_PRECISE_SMC
1163 TranslationBlock *current_tb = NULL;
1164 CPUState *env = cpu_single_env;
1165 int current_tb_modified = 0;
1166 target_ulong current_pc = 0;
1167 target_ulong current_cs_base = 0;
1168 int current_flags = 0;
1169 #endif
1170
1171 addr &= TARGET_PAGE_MASK;
1172 p = page_find(addr >> TARGET_PAGE_BITS);
1173 if (!p)
1174 return;
1175 tb = p->first_tb;
1176 #ifdef TARGET_HAS_PRECISE_SMC
1177 if (tb && pc != 0) {
1178 current_tb = tb_find_pc(pc);
1179 }
1180 #endif
1181 while (tb != NULL) {
1182 n = (long)tb & 3;
1183 tb = (TranslationBlock *)((long)tb & ~3);
1184 #ifdef TARGET_HAS_PRECISE_SMC
1185 if (current_tb == tb &&
1186 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1187 /* If we are modifying the current TB, we must stop
1188 its execution. We could be more precise by checking
1189 that the modification is after the current PC, but it
1190 would require a specialized function to partially
1191 restore the CPU state */
1192
1193 current_tb_modified = 1;
1194 cpu_restore_state(current_tb, env, pc);
1195 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1196 &current_flags);
1197 }
1198 #endif /* TARGET_HAS_PRECISE_SMC */
1199 tb_phys_invalidate(tb, addr);
1200 tb = tb->page_next[n];
1201 }
1202 p->first_tb = NULL;
1203 #ifdef TARGET_HAS_PRECISE_SMC
1204 if (current_tb_modified) {
1205 /* we generate a block containing just the instruction
1206 modifying the memory. It will ensure that it cannot modify
1207 itself */
1208 env->current_tb = NULL;
1209 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1210 cpu_resume_from_signal(env, puc);
1211 }
1212 #endif
1213 }
1214 #endif
1215
1216 /* add the tb in the target page and protect it if necessary */
1217 static inline void tb_alloc_page(TranslationBlock *tb,
1218 unsigned int n, tb_page_addr_t page_addr)
1219 {
1220 PageDesc *p;
1221 #ifndef CONFIG_USER_ONLY
1222 bool page_already_protected;
1223 #endif
1224
1225 tb->page_addr[n] = page_addr;
1226 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1227 tb->page_next[n] = p->first_tb;
1228 #ifndef CONFIG_USER_ONLY
1229 page_already_protected = p->first_tb != NULL;
1230 #endif
1231 p->first_tb = (TranslationBlock *)((long)tb | n);
1232 invalidate_page_bitmap(p);
1233
1234 #if defined(TARGET_HAS_SMC) || 1
1235
1236 #if defined(CONFIG_USER_ONLY)
1237 if (p->flags & PAGE_WRITE) {
1238 target_ulong addr;
1239 PageDesc *p2;
1240 int prot;
1241
1242 /* force the host page as non writable (writes will have a
1243 page fault + mprotect overhead) */
1244 page_addr &= qemu_host_page_mask;
1245 prot = 0;
1246 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1247 addr += TARGET_PAGE_SIZE) {
1248
1249 p2 = page_find (addr >> TARGET_PAGE_BITS);
1250 if (!p2)
1251 continue;
1252 prot |= p2->flags;
1253 p2->flags &= ~PAGE_WRITE;
1254 }
1255 mprotect(g2h(page_addr), qemu_host_page_size,
1256 (prot & PAGE_BITS) & ~PAGE_WRITE);
1257 #ifdef DEBUG_TB_INVALIDATE
1258 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1259 page_addr);
1260 #endif
1261 }
1262 #else
1263 /* if some code is already present, then the pages are already
1264 protected. So we handle the case where only the first TB is
1265 allocated in a physical page */
1266 if (!page_already_protected) {
1267 tlb_protect_code(page_addr);
1268 }
1269 #endif
1270
1271 #endif /* TARGET_HAS_SMC */
1272 }
1273
1274 /* add a new TB and link it to the physical page tables. phys_page2 is
1275 (-1) to indicate that only one page contains the TB. */
1276 void tb_link_page(TranslationBlock *tb,
1277 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1278 {
1279 unsigned int h;
1280 TranslationBlock **ptb;
1281
1282 /* Grab the mmap lock to stop another thread invalidating this TB
1283 before we are done. */
1284 mmap_lock();
1285 /* add in the physical hash table */
1286 h = tb_phys_hash_func(phys_pc);
1287 ptb = &tb_phys_hash[h];
1288 tb->phys_hash_next = *ptb;
1289 *ptb = tb;
1290
1291 /* add in the page list */
1292 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1293 if (phys_page2 != -1)
1294 tb_alloc_page(tb, 1, phys_page2);
1295 else
1296 tb->page_addr[1] = -1;
1297
1298 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1299 tb->jmp_next[0] = NULL;
1300 tb->jmp_next[1] = NULL;
1301
1302 /* init original jump addresses */
1303 if (tb->tb_next_offset[0] != 0xffff)
1304 tb_reset_jump(tb, 0);
1305 if (tb->tb_next_offset[1] != 0xffff)
1306 tb_reset_jump(tb, 1);
1307
1308 #ifdef DEBUG_TB_CHECK
1309 tb_page_check();
1310 #endif
1311 mmap_unlock();
1312 }
1313
1314 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1315 tb[1].tc_ptr. Return NULL if not found */
1316 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1317 {
1318 int m_min, m_max, m;
1319 unsigned long v;
1320 TranslationBlock *tb;
1321
1322 if (nb_tbs <= 0)
1323 return NULL;
1324 if (tc_ptr < (unsigned long)code_gen_buffer ||
1325 tc_ptr >= (unsigned long)code_gen_ptr)
1326 return NULL;
1327 /* binary search (cf Knuth) */
1328 m_min = 0;
1329 m_max = nb_tbs - 1;
1330 while (m_min <= m_max) {
1331 m = (m_min + m_max) >> 1;
1332 tb = &tbs[m];
1333 v = (unsigned long)tb->tc_ptr;
1334 if (v == tc_ptr)
1335 return tb;
1336 else if (tc_ptr < v) {
1337 m_max = m - 1;
1338 } else {
1339 m_min = m + 1;
1340 }
1341 }
1342 return &tbs[m_max];
1343 }
1344
1345 static void tb_reset_jump_recursive(TranslationBlock *tb);
1346
1347 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1348 {
1349 TranslationBlock *tb1, *tb_next, **ptb;
1350 unsigned int n1;
1351
1352 tb1 = tb->jmp_next[n];
1353 if (tb1 != NULL) {
1354 /* find head of list */
1355 for(;;) {
1356 n1 = (long)tb1 & 3;
1357 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1358 if (n1 == 2)
1359 break;
1360 tb1 = tb1->jmp_next[n1];
1361 }
1362 /* we are now sure now that tb jumps to tb1 */
1363 tb_next = tb1;
1364
1365 /* remove tb from the jmp_first list */
1366 ptb = &tb_next->jmp_first;
1367 for(;;) {
1368 tb1 = *ptb;
1369 n1 = (long)tb1 & 3;
1370 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1371 if (n1 == n && tb1 == tb)
1372 break;
1373 ptb = &tb1->jmp_next[n1];
1374 }
1375 *ptb = tb->jmp_next[n];
1376 tb->jmp_next[n] = NULL;
1377
1378 /* suppress the jump to next tb in generated code */
1379 tb_reset_jump(tb, n);
1380
1381 /* suppress jumps in the tb on which we could have jumped */
1382 tb_reset_jump_recursive(tb_next);
1383 }
1384 }
1385
1386 static void tb_reset_jump_recursive(TranslationBlock *tb)
1387 {
1388 tb_reset_jump_recursive2(tb, 0);
1389 tb_reset_jump_recursive2(tb, 1);
1390 }
1391
1392 #if defined(TARGET_HAS_ICE)
1393 #if defined(CONFIG_USER_ONLY)
1394 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1395 {
1396 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1397 }
1398 #else
1399 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1400 {
1401 target_phys_addr_t addr;
1402 target_ulong pd;
1403 ram_addr_t ram_addr;
1404 PhysPageDesc p;
1405
1406 addr = cpu_get_phys_page_debug(env, pc);
1407 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1408 pd = p.phys_offset;
1409 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1410 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1411 }
1412 #endif
1413 #endif /* TARGET_HAS_ICE */
1414
1415 #if defined(CONFIG_USER_ONLY)
1416 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1417
1418 {
1419 }
1420
1421 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1422 int flags, CPUWatchpoint **watchpoint)
1423 {
1424 return -ENOSYS;
1425 }
1426 #else
1427 /* Add a watchpoint. */
1428 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1429 int flags, CPUWatchpoint **watchpoint)
1430 {
1431 target_ulong len_mask = ~(len - 1);
1432 CPUWatchpoint *wp;
1433
1434 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1435 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1436 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1437 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1438 return -EINVAL;
1439 }
1440 wp = g_malloc(sizeof(*wp));
1441
1442 wp->vaddr = addr;
1443 wp->len_mask = len_mask;
1444 wp->flags = flags;
1445
1446 /* keep all GDB-injected watchpoints in front */
1447 if (flags & BP_GDB)
1448 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1449 else
1450 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1451
1452 tlb_flush_page(env, addr);
1453
1454 if (watchpoint)
1455 *watchpoint = wp;
1456 return 0;
1457 }
1458
1459 /* Remove a specific watchpoint. */
1460 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1461 int flags)
1462 {
1463 target_ulong len_mask = ~(len - 1);
1464 CPUWatchpoint *wp;
1465
1466 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1467 if (addr == wp->vaddr && len_mask == wp->len_mask
1468 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1469 cpu_watchpoint_remove_by_ref(env, wp);
1470 return 0;
1471 }
1472 }
1473 return -ENOENT;
1474 }
1475
1476 /* Remove a specific watchpoint by reference. */
1477 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1478 {
1479 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1480
1481 tlb_flush_page(env, watchpoint->vaddr);
1482
1483 g_free(watchpoint);
1484 }
1485
1486 /* Remove all matching watchpoints. */
1487 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1488 {
1489 CPUWatchpoint *wp, *next;
1490
1491 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1492 if (wp->flags & mask)
1493 cpu_watchpoint_remove_by_ref(env, wp);
1494 }
1495 }
1496 #endif
1497
1498 /* Add a breakpoint. */
1499 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1500 CPUBreakpoint **breakpoint)
1501 {
1502 #if defined(TARGET_HAS_ICE)
1503 CPUBreakpoint *bp;
1504
1505 bp = g_malloc(sizeof(*bp));
1506
1507 bp->pc = pc;
1508 bp->flags = flags;
1509
1510 /* keep all GDB-injected breakpoints in front */
1511 if (flags & BP_GDB)
1512 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1513 else
1514 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1515
1516 breakpoint_invalidate(env, pc);
1517
1518 if (breakpoint)
1519 *breakpoint = bp;
1520 return 0;
1521 #else
1522 return -ENOSYS;
1523 #endif
1524 }
1525
1526 /* Remove a specific breakpoint. */
1527 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1528 {
1529 #if defined(TARGET_HAS_ICE)
1530 CPUBreakpoint *bp;
1531
1532 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1533 if (bp->pc == pc && bp->flags == flags) {
1534 cpu_breakpoint_remove_by_ref(env, bp);
1535 return 0;
1536 }
1537 }
1538 return -ENOENT;
1539 #else
1540 return -ENOSYS;
1541 #endif
1542 }
1543
1544 /* Remove a specific breakpoint by reference. */
1545 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1546 {
1547 #if defined(TARGET_HAS_ICE)
1548 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1549
1550 breakpoint_invalidate(env, breakpoint->pc);
1551
1552 g_free(breakpoint);
1553 #endif
1554 }
1555
1556 /* Remove all matching breakpoints. */
1557 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1558 {
1559 #if defined(TARGET_HAS_ICE)
1560 CPUBreakpoint *bp, *next;
1561
1562 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1563 if (bp->flags & mask)
1564 cpu_breakpoint_remove_by_ref(env, bp);
1565 }
1566 #endif
1567 }
1568
1569 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1570 CPU loop after each instruction */
1571 void cpu_single_step(CPUState *env, int enabled)
1572 {
1573 #if defined(TARGET_HAS_ICE)
1574 if (env->singlestep_enabled != enabled) {
1575 env->singlestep_enabled = enabled;
1576 if (kvm_enabled())
1577 kvm_update_guest_debug(env, 0);
1578 else {
1579 /* must flush all the translated code to avoid inconsistencies */
1580 /* XXX: only flush what is necessary */
1581 tb_flush(env);
1582 }
1583 }
1584 #endif
1585 }
1586
1587 /* enable or disable low levels log */
1588 void cpu_set_log(int log_flags)
1589 {
1590 loglevel = log_flags;
1591 if (loglevel && !logfile) {
1592 logfile = fopen(logfilename, log_append ? "a" : "w");
1593 if (!logfile) {
1594 perror(logfilename);
1595 _exit(1);
1596 }
1597 #if !defined(CONFIG_SOFTMMU)
1598 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1599 {
1600 static char logfile_buf[4096];
1601 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1602 }
1603 #elif defined(_WIN32)
1604 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1605 setvbuf(logfile, NULL, _IONBF, 0);
1606 #else
1607 setvbuf(logfile, NULL, _IOLBF, 0);
1608 #endif
1609 log_append = 1;
1610 }
1611 if (!loglevel && logfile) {
1612 fclose(logfile);
1613 logfile = NULL;
1614 }
1615 }
1616
1617 void cpu_set_log_filename(const char *filename)
1618 {
1619 logfilename = strdup(filename);
1620 if (logfile) {
1621 fclose(logfile);
1622 logfile = NULL;
1623 }
1624 cpu_set_log(loglevel);
1625 }
1626
1627 static void cpu_unlink_tb(CPUState *env)
1628 {
1629 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1630 problem and hope the cpu will stop of its own accord. For userspace
1631 emulation this often isn't actually as bad as it sounds. Often
1632 signals are used primarily to interrupt blocking syscalls. */
1633 TranslationBlock *tb;
1634 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1635
1636 spin_lock(&interrupt_lock);
1637 tb = env->current_tb;
1638 /* if the cpu is currently executing code, we must unlink it and
1639 all the potentially executing TB */
1640 if (tb) {
1641 env->current_tb = NULL;
1642 tb_reset_jump_recursive(tb);
1643 }
1644 spin_unlock(&interrupt_lock);
1645 }
1646
1647 #ifndef CONFIG_USER_ONLY
1648 /* mask must never be zero, except for A20 change call */
1649 static void tcg_handle_interrupt(CPUState *env, int mask)
1650 {
1651 int old_mask;
1652
1653 old_mask = env->interrupt_request;
1654 env->interrupt_request |= mask;
1655
1656 /*
1657 * If called from iothread context, wake the target cpu in
1658 * case its halted.
1659 */
1660 if (!qemu_cpu_is_self(env)) {
1661 qemu_cpu_kick(env);
1662 return;
1663 }
1664
1665 if (use_icount) {
1666 env->icount_decr.u16.high = 0xffff;
1667 if (!can_do_io(env)
1668 && (mask & ~old_mask) != 0) {
1669 cpu_abort(env, "Raised interrupt while not in I/O function");
1670 }
1671 } else {
1672 cpu_unlink_tb(env);
1673 }
1674 }
1675
1676 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1677
1678 #else /* CONFIG_USER_ONLY */
1679
1680 void cpu_interrupt(CPUState *env, int mask)
1681 {
1682 env->interrupt_request |= mask;
1683 cpu_unlink_tb(env);
1684 }
1685 #endif /* CONFIG_USER_ONLY */
1686
1687 void cpu_reset_interrupt(CPUState *env, int mask)
1688 {
1689 env->interrupt_request &= ~mask;
1690 }
1691
1692 void cpu_exit(CPUState *env)
1693 {
1694 env->exit_request = 1;
1695 cpu_unlink_tb(env);
1696 }
1697
1698 const CPULogItem cpu_log_items[] = {
1699 { CPU_LOG_TB_OUT_ASM, "out_asm",
1700 "show generated host assembly code for each compiled TB" },
1701 { CPU_LOG_TB_IN_ASM, "in_asm",
1702 "show target assembly code for each compiled TB" },
1703 { CPU_LOG_TB_OP, "op",
1704 "show micro ops for each compiled TB" },
1705 { CPU_LOG_TB_OP_OPT, "op_opt",
1706 "show micro ops "
1707 #ifdef TARGET_I386
1708 "before eflags optimization and "
1709 #endif
1710 "after liveness analysis" },
1711 { CPU_LOG_INT, "int",
1712 "show interrupts/exceptions in short format" },
1713 { CPU_LOG_EXEC, "exec",
1714 "show trace before each executed TB (lots of logs)" },
1715 { CPU_LOG_TB_CPU, "cpu",
1716 "show CPU state before block translation" },
1717 #ifdef TARGET_I386
1718 { CPU_LOG_PCALL, "pcall",
1719 "show protected mode far calls/returns/exceptions" },
1720 { CPU_LOG_RESET, "cpu_reset",
1721 "show CPU state before CPU resets" },
1722 #endif
1723 #ifdef DEBUG_IOPORT
1724 { CPU_LOG_IOPORT, "ioport",
1725 "show all i/o ports accesses" },
1726 #endif
1727 { 0, NULL, NULL },
1728 };
1729
1730 static int cmp1(const char *s1, int n, const char *s2)
1731 {
1732 if (strlen(s2) != n)
1733 return 0;
1734 return memcmp(s1, s2, n) == 0;
1735 }
1736
1737 /* takes a comma separated list of log masks. Return 0 if error. */
1738 int cpu_str_to_log_mask(const char *str)
1739 {
1740 const CPULogItem *item;
1741 int mask;
1742 const char *p, *p1;
1743
1744 p = str;
1745 mask = 0;
1746 for(;;) {
1747 p1 = strchr(p, ',');
1748 if (!p1)
1749 p1 = p + strlen(p);
1750 if(cmp1(p,p1-p,"all")) {
1751 for(item = cpu_log_items; item->mask != 0; item++) {
1752 mask |= item->mask;
1753 }
1754 } else {
1755 for(item = cpu_log_items; item->mask != 0; item++) {
1756 if (cmp1(p, p1 - p, item->name))
1757 goto found;
1758 }
1759 return 0;
1760 }
1761 found:
1762 mask |= item->mask;
1763 if (*p1 != ',')
1764 break;
1765 p = p1 + 1;
1766 }
1767 return mask;
1768 }
1769
1770 void cpu_abort(CPUState *env, const char *fmt, ...)
1771 {
1772 va_list ap;
1773 va_list ap2;
1774
1775 va_start(ap, fmt);
1776 va_copy(ap2, ap);
1777 fprintf(stderr, "qemu: fatal: ");
1778 vfprintf(stderr, fmt, ap);
1779 fprintf(stderr, "\n");
1780 #ifdef TARGET_I386
1781 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1782 #else
1783 cpu_dump_state(env, stderr, fprintf, 0);
1784 #endif
1785 if (qemu_log_enabled()) {
1786 qemu_log("qemu: fatal: ");
1787 qemu_log_vprintf(fmt, ap2);
1788 qemu_log("\n");
1789 #ifdef TARGET_I386
1790 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1791 #else
1792 log_cpu_state(env, 0);
1793 #endif
1794 qemu_log_flush();
1795 qemu_log_close();
1796 }
1797 va_end(ap2);
1798 va_end(ap);
1799 #if defined(CONFIG_USER_ONLY)
1800 {
1801 struct sigaction act;
1802 sigfillset(&act.sa_mask);
1803 act.sa_handler = SIG_DFL;
1804 sigaction(SIGABRT, &act, NULL);
1805 }
1806 #endif
1807 abort();
1808 }
1809
1810 CPUState *cpu_copy(CPUState *env)
1811 {
1812 CPUState *new_env = cpu_init(env->cpu_model_str);
1813 CPUState *next_cpu = new_env->next_cpu;
1814 int cpu_index = new_env->cpu_index;
1815 #if defined(TARGET_HAS_ICE)
1816 CPUBreakpoint *bp;
1817 CPUWatchpoint *wp;
1818 #endif
1819
1820 memcpy(new_env, env, sizeof(CPUState));
1821
1822 /* Preserve chaining and index. */
1823 new_env->next_cpu = next_cpu;
1824 new_env->cpu_index = cpu_index;
1825
1826 /* Clone all break/watchpoints.
1827 Note: Once we support ptrace with hw-debug register access, make sure
1828 BP_CPU break/watchpoints are handled correctly on clone. */
1829 QTAILQ_INIT(&env->breakpoints);
1830 QTAILQ_INIT(&env->watchpoints);
1831 #if defined(TARGET_HAS_ICE)
1832 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1833 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1834 }
1835 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1836 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1837 wp->flags, NULL);
1838 }
1839 #endif
1840
1841 return new_env;
1842 }
1843
1844 #if !defined(CONFIG_USER_ONLY)
1845
1846 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1847 {
1848 unsigned int i;
1849
1850 /* Discard jump cache entries for any tb which might potentially
1851 overlap the flushed page. */
1852 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1853 memset (&env->tb_jmp_cache[i], 0,
1854 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1855
1856 i = tb_jmp_cache_hash_page(addr);
1857 memset (&env->tb_jmp_cache[i], 0,
1858 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1859 }
1860
1861 static CPUTLBEntry s_cputlb_empty_entry = {
1862 .addr_read = -1,
1863 .addr_write = -1,
1864 .addr_code = -1,
1865 .addend = -1,
1866 };
1867
1868 /* NOTE:
1869 * If flush_global is true (the usual case), flush all tlb entries.
1870 * If flush_global is false, flush (at least) all tlb entries not
1871 * marked global.
1872 *
1873 * Since QEMU doesn't currently implement a global/not-global flag
1874 * for tlb entries, at the moment tlb_flush() will also flush all
1875 * tlb entries in the flush_global == false case. This is OK because
1876 * CPU architectures generally permit an implementation to drop
1877 * entries from the TLB at any time, so flushing more entries than
1878 * required is only an efficiency issue, not a correctness issue.
1879 */
1880 void tlb_flush(CPUState *env, int flush_global)
1881 {
1882 int i;
1883
1884 #if defined(DEBUG_TLB)
1885 printf("tlb_flush:\n");
1886 #endif
1887 /* must reset current TB so that interrupts cannot modify the
1888 links while we are modifying them */
1889 env->current_tb = NULL;
1890
1891 for(i = 0; i < CPU_TLB_SIZE; i++) {
1892 int mmu_idx;
1893 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1894 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1895 }
1896 }
1897
1898 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1899
1900 env->tlb_flush_addr = -1;
1901 env->tlb_flush_mask = 0;
1902 tlb_flush_count++;
1903 }
1904
1905 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1906 {
1907 if (addr == (tlb_entry->addr_read &
1908 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1909 addr == (tlb_entry->addr_write &
1910 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1911 addr == (tlb_entry->addr_code &
1912 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1913 *tlb_entry = s_cputlb_empty_entry;
1914 }
1915 }
1916
1917 void tlb_flush_page(CPUState *env, target_ulong addr)
1918 {
1919 int i;
1920 int mmu_idx;
1921
1922 #if defined(DEBUG_TLB)
1923 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1924 #endif
1925 /* Check if we need to flush due to large pages. */
1926 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1927 #if defined(DEBUG_TLB)
1928 printf("tlb_flush_page: forced full flush ("
1929 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1930 env->tlb_flush_addr, env->tlb_flush_mask);
1931 #endif
1932 tlb_flush(env, 1);
1933 return;
1934 }
1935 /* must reset current TB so that interrupts cannot modify the
1936 links while we are modifying them */
1937 env->current_tb = NULL;
1938
1939 addr &= TARGET_PAGE_MASK;
1940 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1941 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1942 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1943
1944 tlb_flush_jmp_cache(env, addr);
1945 }
1946
1947 /* update the TLBs so that writes to code in the virtual page 'addr'
1948 can be detected */
1949 static void tlb_protect_code(ram_addr_t ram_addr)
1950 {
1951 cpu_physical_memory_reset_dirty(ram_addr,
1952 ram_addr + TARGET_PAGE_SIZE,
1953 CODE_DIRTY_FLAG);
1954 }
1955
1956 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1957 tested for self modifying code */
1958 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1959 target_ulong vaddr)
1960 {
1961 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1962 }
1963
1964 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1965 unsigned long start, unsigned long length)
1966 {
1967 unsigned long addr;
1968 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
1969 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1970 if ((addr - start) < length) {
1971 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1972 }
1973 }
1974 }
1975
1976 /* Note: start and end must be within the same ram block. */
1977 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1978 int dirty_flags)
1979 {
1980 CPUState *env;
1981 unsigned long length, start1;
1982 int i;
1983
1984 start &= TARGET_PAGE_MASK;
1985 end = TARGET_PAGE_ALIGN(end);
1986
1987 length = end - start;
1988 if (length == 0)
1989 return;
1990 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1991
1992 /* we modify the TLB cache so that the dirty bit will be set again
1993 when accessing the range */
1994 start1 = (unsigned long)qemu_safe_ram_ptr(start);
1995 /* Check that we don't span multiple blocks - this breaks the
1996 address comparisons below. */
1997 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
1998 != (end - 1) - start) {
1999 abort();
2000 }
2001
2002 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2003 int mmu_idx;
2004 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2005 for(i = 0; i < CPU_TLB_SIZE; i++)
2006 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2007 start1, length);
2008 }
2009 }
2010 }
2011
2012 int cpu_physical_memory_set_dirty_tracking(int enable)
2013 {
2014 int ret = 0;
2015 in_migration = enable;
2016 return ret;
2017 }
2018
2019 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2020 {
2021 ram_addr_t ram_addr;
2022 void *p;
2023
2024 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
2025 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2026 + tlb_entry->addend);
2027 ram_addr = qemu_ram_addr_from_host_nofail(p);
2028 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2029 tlb_entry->addr_write |= TLB_NOTDIRTY;
2030 }
2031 }
2032 }
2033
2034 /* update the TLB according to the current state of the dirty bits */
2035 void cpu_tlb_update_dirty(CPUState *env)
2036 {
2037 int i;
2038 int mmu_idx;
2039 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2040 for(i = 0; i < CPU_TLB_SIZE; i++)
2041 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2042 }
2043 }
2044
2045 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2046 {
2047 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2048 tlb_entry->addr_write = vaddr;
2049 }
2050
2051 /* update the TLB corresponding to virtual page vaddr
2052 so that it is no longer dirty */
2053 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2054 {
2055 int i;
2056 int mmu_idx;
2057
2058 vaddr &= TARGET_PAGE_MASK;
2059 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2060 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2061 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2062 }
2063
2064 /* Our TLB does not support large pages, so remember the area covered by
2065 large pages and trigger a full TLB flush if these are invalidated. */
2066 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2067 target_ulong size)
2068 {
2069 target_ulong mask = ~(size - 1);
2070
2071 if (env->tlb_flush_addr == (target_ulong)-1) {
2072 env->tlb_flush_addr = vaddr & mask;
2073 env->tlb_flush_mask = mask;
2074 return;
2075 }
2076 /* Extend the existing region to include the new page.
2077 This is a compromise between unnecessary flushes and the cost
2078 of maintaining a full variable size TLB. */
2079 mask &= env->tlb_flush_mask;
2080 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2081 mask <<= 1;
2082 }
2083 env->tlb_flush_addr &= mask;
2084 env->tlb_flush_mask = mask;
2085 }
2086
2087 static bool is_ram_rom(ram_addr_t pd)
2088 {
2089 pd &= ~TARGET_PAGE_MASK;
2090 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
2091 }
2092
2093 static bool is_romd(ram_addr_t pd)
2094 {
2095 MemoryRegion *mr;
2096
2097 pd &= ~TARGET_PAGE_MASK;
2098 mr = io_mem_region[pd];
2099 return mr->rom_device && mr->readable;
2100 }
2101
2102 static bool is_ram_rom_romd(ram_addr_t pd)
2103 {
2104 return is_ram_rom(pd) || is_romd(pd);
2105 }
2106
2107 /* Add a new TLB entry. At most one entry for a given virtual address
2108 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2109 supplied size is only used by tlb_flush_page. */
2110 void tlb_set_page(CPUState *env, target_ulong vaddr,
2111 target_phys_addr_t paddr, int prot,
2112 int mmu_idx, target_ulong size)
2113 {
2114 PhysPageDesc p;
2115 unsigned long pd;
2116 unsigned int index;
2117 target_ulong address;
2118 target_ulong code_address;
2119 unsigned long addend;
2120 CPUTLBEntry *te;
2121 CPUWatchpoint *wp;
2122 target_phys_addr_t iotlb;
2123
2124 assert(size >= TARGET_PAGE_SIZE);
2125 if (size != TARGET_PAGE_SIZE) {
2126 tlb_add_large_page(env, vaddr, size);
2127 }
2128 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2129 pd = p.phys_offset;
2130 #if defined(DEBUG_TLB)
2131 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2132 " prot=%x idx=%d pd=0x%08lx\n",
2133 vaddr, paddr, prot, mmu_idx, pd);
2134 #endif
2135
2136 address = vaddr;
2137 if (!is_ram_rom_romd(pd)) {
2138 /* IO memory case (romd handled later) */
2139 address |= TLB_MMIO;
2140 }
2141 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2142 if (is_ram_rom(pd)) {
2143 /* Normal RAM. */
2144 iotlb = pd & TARGET_PAGE_MASK;
2145 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2146 iotlb |= io_mem_notdirty.ram_addr;
2147 else
2148 iotlb |= io_mem_rom.ram_addr;
2149 } else {
2150 /* IO handlers are currently passed a physical address.
2151 It would be nice to pass an offset from the base address
2152 of that region. This would avoid having to special case RAM,
2153 and avoid full address decoding in every device.
2154 We can't use the high bits of pd for this because
2155 IO_MEM_ROMD uses these as a ram address. */
2156 iotlb = (pd & ~TARGET_PAGE_MASK);
2157 iotlb += p.region_offset;
2158 }
2159
2160 code_address = address;
2161 /* Make accesses to pages with watchpoints go via the
2162 watchpoint trap routines. */
2163 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2164 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2165 /* Avoid trapping reads of pages with a write breakpoint. */
2166 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2167 iotlb = io_mem_watch.ram_addr + paddr;
2168 address |= TLB_MMIO;
2169 break;
2170 }
2171 }
2172 }
2173
2174 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2175 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2176 te = &env->tlb_table[mmu_idx][index];
2177 te->addend = addend - vaddr;
2178 if (prot & PAGE_READ) {
2179 te->addr_read = address;
2180 } else {
2181 te->addr_read = -1;
2182 }
2183
2184 if (prot & PAGE_EXEC) {
2185 te->addr_code = code_address;
2186 } else {
2187 te->addr_code = -1;
2188 }
2189 if (prot & PAGE_WRITE) {
2190 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
2191 /* Write access calls the I/O callback. */
2192 te->addr_write = address | TLB_MMIO;
2193 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
2194 !cpu_physical_memory_is_dirty(pd)) {
2195 te->addr_write = address | TLB_NOTDIRTY;
2196 } else {
2197 te->addr_write = address;
2198 }
2199 } else {
2200 te->addr_write = -1;
2201 }
2202 }
2203
2204 #else
2205
2206 void tlb_flush(CPUState *env, int flush_global)
2207 {
2208 }
2209
2210 void tlb_flush_page(CPUState *env, target_ulong addr)
2211 {
2212 }
2213
2214 /*
2215 * Walks guest process memory "regions" one by one
2216 * and calls callback function 'fn' for each region.
2217 */
2218
2219 struct walk_memory_regions_data
2220 {
2221 walk_memory_regions_fn fn;
2222 void *priv;
2223 unsigned long start;
2224 int prot;
2225 };
2226
2227 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2228 abi_ulong end, int new_prot)
2229 {
2230 if (data->start != -1ul) {
2231 int rc = data->fn(data->priv, data->start, end, data->prot);
2232 if (rc != 0) {
2233 return rc;
2234 }
2235 }
2236
2237 data->start = (new_prot ? end : -1ul);
2238 data->prot = new_prot;
2239
2240 return 0;
2241 }
2242
2243 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2244 abi_ulong base, int level, void **lp)
2245 {
2246 abi_ulong pa;
2247 int i, rc;
2248
2249 if (*lp == NULL) {
2250 return walk_memory_regions_end(data, base, 0);
2251 }
2252
2253 if (level == 0) {
2254 PageDesc *pd = *lp;
2255 for (i = 0; i < L2_SIZE; ++i) {
2256 int prot = pd[i].flags;
2257
2258 pa = base | (i << TARGET_PAGE_BITS);
2259 if (prot != data->prot) {
2260 rc = walk_memory_regions_end(data, pa, prot);
2261 if (rc != 0) {
2262 return rc;
2263 }
2264 }
2265 }
2266 } else {
2267 void **pp = *lp;
2268 for (i = 0; i < L2_SIZE; ++i) {
2269 pa = base | ((abi_ulong)i <<
2270 (TARGET_PAGE_BITS + L2_BITS * level));
2271 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2272 if (rc != 0) {
2273 return rc;
2274 }
2275 }
2276 }
2277
2278 return 0;
2279 }
2280
2281 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2282 {
2283 struct walk_memory_regions_data data;
2284 unsigned long i;
2285
2286 data.fn = fn;
2287 data.priv = priv;
2288 data.start = -1ul;
2289 data.prot = 0;
2290
2291 for (i = 0; i < V_L1_SIZE; i++) {
2292 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2293 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2294 if (rc != 0) {
2295 return rc;
2296 }
2297 }
2298
2299 return walk_memory_regions_end(&data, 0, 0);
2300 }
2301
2302 static int dump_region(void *priv, abi_ulong start,
2303 abi_ulong end, unsigned long prot)
2304 {
2305 FILE *f = (FILE *)priv;
2306
2307 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2308 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2309 start, end, end - start,
2310 ((prot & PAGE_READ) ? 'r' : '-'),
2311 ((prot & PAGE_WRITE) ? 'w' : '-'),
2312 ((prot & PAGE_EXEC) ? 'x' : '-'));
2313
2314 return (0);
2315 }
2316
2317 /* dump memory mappings */
2318 void page_dump(FILE *f)
2319 {
2320 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2321 "start", "end", "size", "prot");
2322 walk_memory_regions(f, dump_region);
2323 }
2324
2325 int page_get_flags(target_ulong address)
2326 {
2327 PageDesc *p;
2328
2329 p = page_find(address >> TARGET_PAGE_BITS);
2330 if (!p)
2331 return 0;
2332 return p->flags;
2333 }
2334
2335 /* Modify the flags of a page and invalidate the code if necessary.
2336 The flag PAGE_WRITE_ORG is positioned automatically depending
2337 on PAGE_WRITE. The mmap_lock should already be held. */
2338 void page_set_flags(target_ulong start, target_ulong end, int flags)
2339 {
2340 target_ulong addr, len;
2341
2342 /* This function should never be called with addresses outside the
2343 guest address space. If this assert fires, it probably indicates
2344 a missing call to h2g_valid. */
2345 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2346 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2347 #endif
2348 assert(start < end);
2349
2350 start = start & TARGET_PAGE_MASK;
2351 end = TARGET_PAGE_ALIGN(end);
2352
2353 if (flags & PAGE_WRITE) {
2354 flags |= PAGE_WRITE_ORG;
2355 }
2356
2357 for (addr = start, len = end - start;
2358 len != 0;
2359 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2360 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2361
2362 /* If the write protection bit is set, then we invalidate
2363 the code inside. */
2364 if (!(p->flags & PAGE_WRITE) &&
2365 (flags & PAGE_WRITE) &&
2366 p->first_tb) {
2367 tb_invalidate_phys_page(addr, 0, NULL);
2368 }
2369 p->flags = flags;
2370 }
2371 }
2372
2373 int page_check_range(target_ulong start, target_ulong len, int flags)
2374 {
2375 PageDesc *p;
2376 target_ulong end;
2377 target_ulong addr;
2378
2379 /* This function should never be called with addresses outside the
2380 guest address space. If this assert fires, it probably indicates
2381 a missing call to h2g_valid. */
2382 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2383 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2384 #endif
2385
2386 if (len == 0) {
2387 return 0;
2388 }
2389 if (start + len - 1 < start) {
2390 /* We've wrapped around. */
2391 return -1;
2392 }
2393
2394 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2395 start = start & TARGET_PAGE_MASK;
2396
2397 for (addr = start, len = end - start;
2398 len != 0;
2399 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2400 p = page_find(addr >> TARGET_PAGE_BITS);
2401 if( !p )
2402 return -1;
2403 if( !(p->flags & PAGE_VALID) )
2404 return -1;
2405
2406 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2407 return -1;
2408 if (flags & PAGE_WRITE) {
2409 if (!(p->flags & PAGE_WRITE_ORG))
2410 return -1;
2411 /* unprotect the page if it was put read-only because it
2412 contains translated code */
2413 if (!(p->flags & PAGE_WRITE)) {
2414 if (!page_unprotect(addr, 0, NULL))
2415 return -1;
2416 }
2417 return 0;
2418 }
2419 }
2420 return 0;
2421 }
2422
2423 /* called from signal handler: invalidate the code and unprotect the
2424 page. Return TRUE if the fault was successfully handled. */
2425 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2426 {
2427 unsigned int prot;
2428 PageDesc *p;
2429 target_ulong host_start, host_end, addr;
2430
2431 /* Technically this isn't safe inside a signal handler. However we
2432 know this only ever happens in a synchronous SEGV handler, so in
2433 practice it seems to be ok. */
2434 mmap_lock();
2435
2436 p = page_find(address >> TARGET_PAGE_BITS);
2437 if (!p) {
2438 mmap_unlock();
2439 return 0;
2440 }
2441
2442 /* if the page was really writable, then we change its
2443 protection back to writable */
2444 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2445 host_start = address & qemu_host_page_mask;
2446 host_end = host_start + qemu_host_page_size;
2447
2448 prot = 0;
2449 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2450 p = page_find(addr >> TARGET_PAGE_BITS);
2451 p->flags |= PAGE_WRITE;
2452 prot |= p->flags;
2453
2454 /* and since the content will be modified, we must invalidate
2455 the corresponding translated code. */
2456 tb_invalidate_phys_page(addr, pc, puc);
2457 #ifdef DEBUG_TB_CHECK
2458 tb_invalidate_check(addr);
2459 #endif
2460 }
2461 mprotect((void *)g2h(host_start), qemu_host_page_size,
2462 prot & PAGE_BITS);
2463
2464 mmap_unlock();
2465 return 1;
2466 }
2467 mmap_unlock();
2468 return 0;
2469 }
2470
2471 static inline void tlb_set_dirty(CPUState *env,
2472 unsigned long addr, target_ulong vaddr)
2473 {
2474 }
2475 #endif /* defined(CONFIG_USER_ONLY) */
2476
2477 #if !defined(CONFIG_USER_ONLY)
2478
2479 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2480 typedef struct subpage_t {
2481 MemoryRegion iomem;
2482 target_phys_addr_t base;
2483 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2484 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2485 } subpage_t;
2486
2487 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2488 ram_addr_t memory, ram_addr_t region_offset);
2489 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2490 ram_addr_t orig_memory,
2491 ram_addr_t region_offset);
2492 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2493 need_subpage) \
2494 do { \
2495 if (addr > start_addr) \
2496 start_addr2 = 0; \
2497 else { \
2498 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2499 if (start_addr2 > 0) \
2500 need_subpage = 1; \
2501 } \
2502 \
2503 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2504 end_addr2 = TARGET_PAGE_SIZE - 1; \
2505 else { \
2506 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2507 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2508 need_subpage = 1; \
2509 } \
2510 } while (0)
2511
2512 static void destroy_page_desc(PhysPageDesc pd)
2513 {
2514 unsigned io_index = pd.phys_offset & ~TARGET_PAGE_MASK;
2515 MemoryRegion *mr = io_mem_region[io_index];
2516
2517 if (mr->subpage) {
2518 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2519 memory_region_destroy(&subpage->iomem);
2520 g_free(subpage);
2521 }
2522 }
2523
2524 static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
2525 {
2526 unsigned i;
2527 PhysPageEntry *p = lp->u.node;
2528
2529 if (!p) {
2530 return;
2531 }
2532
2533 for (i = 0; i < L2_SIZE; ++i) {
2534 if (level > 0) {
2535 destroy_l2_mapping(&p[i], level - 1);
2536 } else {
2537 destroy_page_desc(p[i].u.leaf);
2538 }
2539 }
2540 g_free(p);
2541 lp->u.node = NULL;
2542 }
2543
2544 static void destroy_all_mappings(void)
2545 {
2546 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
2547 }
2548
2549 /* register physical memory.
2550 For RAM, 'size' must be a multiple of the target page size.
2551 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2552 io memory page. The address used when calling the IO function is
2553 the offset from the start of the region, plus region_offset. Both
2554 start_addr and region_offset are rounded down to a page boundary
2555 before calculating this offset. This should not be a problem unless
2556 the low bits of start_addr and region_offset differ. */
2557 void cpu_register_physical_memory_log(MemoryRegionSection *section,
2558 bool readonly)
2559 {
2560 target_phys_addr_t start_addr = section->offset_within_address_space;
2561 ram_addr_t size = section->size;
2562 ram_addr_t phys_offset = section->mr->ram_addr;
2563 ram_addr_t region_offset = section->offset_within_region;
2564 target_phys_addr_t addr, end_addr;
2565 PhysPageDesc *p;
2566 CPUState *env;
2567 ram_addr_t orig_size = size;
2568 subpage_t *subpage;
2569
2570 if (memory_region_is_ram(section->mr)) {
2571 phys_offset += region_offset;
2572 region_offset = 0;
2573 }
2574
2575 if (readonly) {
2576 phys_offset |= io_mem_rom.ram_addr;
2577 }
2578
2579 assert(size);
2580
2581 if (phys_offset == io_mem_unassigned.ram_addr) {
2582 region_offset = start_addr;
2583 }
2584 region_offset &= TARGET_PAGE_MASK;
2585 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2586 end_addr = start_addr + (target_phys_addr_t)size;
2587
2588 addr = start_addr;
2589 do {
2590 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
2591 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
2592 ram_addr_t orig_memory = p->phys_offset;
2593 target_phys_addr_t start_addr2, end_addr2;
2594 int need_subpage = 0;
2595 MemoryRegion *mr = io_mem_region[orig_memory & ~TARGET_PAGE_MASK];
2596
2597 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2598 need_subpage);
2599 if (need_subpage) {
2600 if (!(mr->subpage)) {
2601 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2602 &p->phys_offset, orig_memory,
2603 p->region_offset);
2604 } else {
2605 subpage = container_of(mr, subpage_t, iomem);
2606 }
2607 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2608 region_offset);
2609 p->region_offset = 0;
2610 } else {
2611 p->phys_offset = phys_offset;
2612 p->region_offset = region_offset;
2613 if (is_ram_rom_romd(phys_offset))
2614 phys_offset += TARGET_PAGE_SIZE;
2615 }
2616 } else {
2617 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2618 p->phys_offset = phys_offset;
2619 p->region_offset = region_offset;
2620 if (is_ram_rom_romd(phys_offset)) {
2621 phys_offset += TARGET_PAGE_SIZE;
2622 } else {
2623 target_phys_addr_t start_addr2, end_addr2;
2624 int need_subpage = 0;
2625
2626 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2627 end_addr2, need_subpage);
2628
2629 if (need_subpage) {
2630 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2631 &p->phys_offset,
2632 io_mem_unassigned.ram_addr,
2633 addr & TARGET_PAGE_MASK);
2634 subpage_register(subpage, start_addr2, end_addr2,
2635 phys_offset, region_offset);
2636 p->region_offset = 0;
2637 }
2638 }
2639 }
2640 region_offset += TARGET_PAGE_SIZE;
2641 addr += TARGET_PAGE_SIZE;
2642 } while (addr != end_addr);
2643
2644 /* since each CPU stores ram addresses in its TLB cache, we must
2645 reset the modified entries */
2646 /* XXX: slow ! */
2647 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2648 tlb_flush(env, 1);
2649 }
2650 }
2651
2652 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2653 {
2654 if (kvm_enabled())
2655 kvm_coalesce_mmio_region(addr, size);
2656 }
2657
2658 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2659 {
2660 if (kvm_enabled())
2661 kvm_uncoalesce_mmio_region(addr, size);
2662 }
2663
2664 void qemu_flush_coalesced_mmio_buffer(void)
2665 {
2666 if (kvm_enabled())
2667 kvm_flush_coalesced_mmio_buffer();
2668 }
2669
2670 #if defined(__linux__) && !defined(TARGET_S390X)
2671
2672 #include <sys/vfs.h>
2673
2674 #define HUGETLBFS_MAGIC 0x958458f6
2675
2676 static long gethugepagesize(const char *path)
2677 {
2678 struct statfs fs;
2679 int ret;
2680
2681 do {
2682 ret = statfs(path, &fs);
2683 } while (ret != 0 && errno == EINTR);
2684
2685 if (ret != 0) {
2686 perror(path);
2687 return 0;
2688 }
2689
2690 if (fs.f_type != HUGETLBFS_MAGIC)
2691 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2692
2693 return fs.f_bsize;
2694 }
2695
2696 static void *file_ram_alloc(RAMBlock *block,
2697 ram_addr_t memory,
2698 const char *path)
2699 {
2700 char *filename;
2701 void *area;
2702 int fd;
2703 #ifdef MAP_POPULATE
2704 int flags;
2705 #endif
2706 unsigned long hpagesize;
2707
2708 hpagesize = gethugepagesize(path);
2709 if (!hpagesize) {
2710 return NULL;
2711 }
2712
2713 if (memory < hpagesize) {
2714 return NULL;
2715 }
2716
2717 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2718 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2719 return NULL;
2720 }
2721
2722 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2723 return NULL;
2724 }
2725
2726 fd = mkstemp(filename);
2727 if (fd < 0) {
2728 perror("unable to create backing store for hugepages");
2729 free(filename);
2730 return NULL;
2731 }
2732 unlink(filename);
2733 free(filename);
2734
2735 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2736
2737 /*
2738 * ftruncate is not supported by hugetlbfs in older
2739 * hosts, so don't bother bailing out on errors.
2740 * If anything goes wrong with it under other filesystems,
2741 * mmap will fail.
2742 */
2743 if (ftruncate(fd, memory))
2744 perror("ftruncate");
2745
2746 #ifdef MAP_POPULATE
2747 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2748 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2749 * to sidestep this quirk.
2750 */
2751 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2752 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2753 #else
2754 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2755 #endif
2756 if (area == MAP_FAILED) {
2757 perror("file_ram_alloc: can't mmap RAM pages");
2758 close(fd);
2759 return (NULL);
2760 }
2761 block->fd = fd;
2762 return area;
2763 }
2764 #endif
2765
2766 static ram_addr_t find_ram_offset(ram_addr_t size)
2767 {
2768 RAMBlock *block, *next_block;
2769 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
2770
2771 if (QLIST_EMPTY(&ram_list.blocks))
2772 return 0;
2773
2774 QLIST_FOREACH(block, &ram_list.blocks, next) {
2775 ram_addr_t end, next = RAM_ADDR_MAX;
2776
2777 end = block->offset + block->length;
2778
2779 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2780 if (next_block->offset >= end) {
2781 next = MIN(next, next_block->offset);
2782 }
2783 }
2784 if (next - end >= size && next - end < mingap) {
2785 offset = end;
2786 mingap = next - end;
2787 }
2788 }
2789
2790 if (offset == RAM_ADDR_MAX) {
2791 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2792 (uint64_t)size);
2793 abort();
2794 }
2795
2796 return offset;
2797 }
2798
2799 static ram_addr_t last_ram_offset(void)
2800 {
2801 RAMBlock *block;
2802 ram_addr_t last = 0;
2803
2804 QLIST_FOREACH(block, &ram_list.blocks, next)
2805 last = MAX(last, block->offset + block->length);
2806
2807 return last;
2808 }
2809
2810 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
2811 {
2812 RAMBlock *new_block, *block;
2813
2814 new_block = NULL;
2815 QLIST_FOREACH(block, &ram_list.blocks, next) {
2816 if (block->offset == addr) {
2817 new_block = block;
2818 break;
2819 }
2820 }
2821 assert(new_block);
2822 assert(!new_block->idstr[0]);
2823
2824 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2825 char *id = dev->parent_bus->info->get_dev_path(dev);
2826 if (id) {
2827 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2828 g_free(id);
2829 }
2830 }
2831 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2832
2833 QLIST_FOREACH(block, &ram_list.blocks, next) {
2834 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
2835 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2836 new_block->idstr);
2837 abort();
2838 }
2839 }
2840 }
2841
2842 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2843 MemoryRegion *mr)
2844 {
2845 RAMBlock *new_block;
2846
2847 size = TARGET_PAGE_ALIGN(size);
2848 new_block = g_malloc0(sizeof(*new_block));
2849
2850 new_block->mr = mr;
2851 new_block->offset = find_ram_offset(size);
2852 if (host) {
2853 new_block->host = host;
2854 new_block->flags |= RAM_PREALLOC_MASK;
2855 } else {
2856 if (mem_path) {
2857 #if defined (__linux__) && !defined(TARGET_S390X)
2858 new_block->host = file_ram_alloc(new_block, size, mem_path);
2859 if (!new_block->host) {
2860 new_block->host = qemu_vmalloc(size);
2861 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2862 }
2863 #else
2864 fprintf(stderr, "-mem-path option unsupported\n");
2865 exit(1);
2866 #endif
2867 } else {
2868 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2869 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2870 an system defined value, which is at least 256GB. Larger systems
2871 have larger values. We put the guest between the end of data
2872 segment (system break) and this value. We use 32GB as a base to
2873 have enough room for the system break to grow. */
2874 new_block->host = mmap((void*)0x800000000, size,
2875 PROT_EXEC|PROT_READ|PROT_WRITE,
2876 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2877 if (new_block->host == MAP_FAILED) {
2878 fprintf(stderr, "Allocating RAM failed\n");
2879 abort();
2880 }
2881 #else
2882 if (xen_enabled()) {
2883 xen_ram_alloc(new_block->offset, size, mr);
2884 } else {
2885 new_block->host = qemu_vmalloc(size);
2886 }
2887 #endif
2888 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2889 }
2890 }
2891 new_block->length = size;
2892
2893 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2894
2895 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
2896 last_ram_offset() >> TARGET_PAGE_BITS);
2897 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2898 0xff, size >> TARGET_PAGE_BITS);
2899
2900 if (kvm_enabled())
2901 kvm_setup_guest_memory(new_block->host, size);
2902
2903 return new_block->offset;
2904 }
2905
2906 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
2907 {
2908 return qemu_ram_alloc_from_ptr(size, NULL, mr);
2909 }
2910
2911 void qemu_ram_free_from_ptr(ram_addr_t addr)
2912 {
2913 RAMBlock *block;
2914
2915 QLIST_FOREACH(block, &ram_list.blocks, next) {
2916 if (addr == block->offset) {
2917 QLIST_REMOVE(block, next);
2918 g_free(block);
2919 return;
2920 }
2921 }
2922 }
2923
2924 void qemu_ram_free(ram_addr_t addr)
2925 {
2926 RAMBlock *block;
2927
2928 QLIST_FOREACH(block, &ram_list.blocks, next) {
2929 if (addr == block->offset) {
2930 QLIST_REMOVE(block, next);
2931 if (block->flags & RAM_PREALLOC_MASK) {
2932 ;
2933 } else if (mem_path) {
2934 #if defined (__linux__) && !defined(TARGET_S390X)
2935 if (block->fd) {
2936 munmap(block->host, block->length);
2937 close(block->fd);
2938 } else {
2939 qemu_vfree(block->host);
2940 }
2941 #else
2942 abort();
2943 #endif
2944 } else {
2945 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2946 munmap(block->host, block->length);
2947 #else
2948 if (xen_enabled()) {
2949 xen_invalidate_map_cache_entry(block->host);
2950 } else {
2951 qemu_vfree(block->host);
2952 }
2953 #endif
2954 }
2955 g_free(block);
2956 return;
2957 }
2958 }
2959
2960 }
2961
2962 #ifndef _WIN32
2963 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2964 {
2965 RAMBlock *block;
2966 ram_addr_t offset;
2967 int flags;
2968 void *area, *vaddr;
2969
2970 QLIST_FOREACH(block, &ram_list.blocks, next) {
2971 offset = addr - block->offset;
2972 if (offset < block->length) {
2973 vaddr = block->host + offset;
2974 if (block->flags & RAM_PREALLOC_MASK) {
2975 ;
2976 } else {
2977 flags = MAP_FIXED;
2978 munmap(vaddr, length);
2979 if (mem_path) {
2980 #if defined(__linux__) && !defined(TARGET_S390X)
2981 if (block->fd) {
2982 #ifdef MAP_POPULATE
2983 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2984 MAP_PRIVATE;
2985 #else
2986 flags |= MAP_PRIVATE;
2987 #endif
2988 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2989 flags, block->fd, offset);
2990 } else {
2991 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2992 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2993 flags, -1, 0);
2994 }
2995 #else
2996 abort();
2997 #endif
2998 } else {
2999 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3000 flags |= MAP_SHARED | MAP_ANONYMOUS;
3001 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3002 flags, -1, 0);
3003 #else
3004 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3005 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3006 flags, -1, 0);
3007 #endif
3008 }
3009 if (area != vaddr) {
3010 fprintf(stderr, "Could not remap addr: "
3011 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
3012 length, addr);
3013 exit(1);
3014 }
3015 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3016 }
3017 return;
3018 }
3019 }
3020 }
3021 #endif /* !_WIN32 */
3022
3023 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3024 With the exception of the softmmu code in this file, this should
3025 only be used for local memory (e.g. video ram) that the device owns,
3026 and knows it isn't going to access beyond the end of the block.
3027
3028 It should not be used for general purpose DMA.
3029 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3030 */
3031 void *qemu_get_ram_ptr(ram_addr_t addr)
3032 {
3033 RAMBlock *block;
3034
3035 QLIST_FOREACH(block, &ram_list.blocks, next) {
3036 if (addr - block->offset < block->length) {
3037 /* Move this entry to to start of the list. */
3038 if (block != QLIST_FIRST(&ram_list.blocks)) {
3039 QLIST_REMOVE(block, next);
3040 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3041 }
3042 if (xen_enabled()) {
3043 /* We need to check if the requested address is in the RAM
3044 * because we don't want to map the entire memory in QEMU.
3045 * In that case just map until the end of the page.
3046 */
3047 if (block->offset == 0) {
3048 return xen_map_cache(addr, 0, 0);
3049 } else if (block->host == NULL) {
3050 block->host =
3051 xen_map_cache(block->offset, block->length, 1);
3052 }
3053 }
3054 return block->host + (addr - block->offset);
3055 }
3056 }
3057
3058 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3059 abort();
3060
3061 return NULL;
3062 }
3063
3064 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3065 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3066 */
3067 void *qemu_safe_ram_ptr(ram_addr_t addr)
3068 {
3069 RAMBlock *block;
3070
3071 QLIST_FOREACH(block, &ram_list.blocks, next) {
3072 if (addr - block->offset < block->length) {
3073 if (xen_enabled()) {
3074 /* We need to check if the requested address is in the RAM
3075 * because we don't want to map the entire memory in QEMU.
3076 * In that case just map until the end of the page.
3077 */
3078 if (block->offset == 0) {
3079 return xen_map_cache(addr, 0, 0);
3080 } else if (block->host == NULL) {
3081 block->host =
3082 xen_map_cache(block->offset, block->length, 1);
3083 }
3084 }
3085 return block->host + (addr - block->offset);
3086 }
3087 }
3088
3089 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3090 abort();
3091
3092 return NULL;
3093 }
3094
3095 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3096 * but takes a size argument */
3097 void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
3098 {
3099 if (*size == 0) {
3100 return NULL;
3101 }
3102 if (xen_enabled()) {
3103 return xen_map_cache(addr, *size, 1);
3104 } else {
3105 RAMBlock *block;
3106
3107 QLIST_FOREACH(block, &ram_list.blocks, next) {
3108 if (addr - block->offset < block->length) {
3109 if (addr - block->offset + *size > block->length)
3110 *size = block->length - addr + block->offset;
3111 return block->host + (addr - block->offset);
3112 }
3113 }
3114
3115 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3116 abort();
3117 }
3118 }
3119
3120 void qemu_put_ram_ptr(void *addr)
3121 {
3122 trace_qemu_put_ram_ptr(addr);
3123 }
3124
3125 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3126 {
3127 RAMBlock *block;
3128 uint8_t *host = ptr;
3129
3130 if (xen_enabled()) {
3131 *ram_addr = xen_ram_addr_from_mapcache(ptr);
3132 return 0;
3133 }
3134
3135 QLIST_FOREACH(block, &ram_list.blocks, next) {
3136 /* This case append when the block is not mapped. */
3137 if (block->host == NULL) {
3138 continue;
3139 }
3140 if (host - block->host < block->length) {
3141 *ram_addr = block->offset + (host - block->host);
3142 return 0;
3143 }
3144 }
3145
3146 return -1;
3147 }
3148
3149 /* Some of the softmmu routines need to translate from a host pointer
3150 (typically a TLB entry) back to a ram offset. */
3151 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3152 {
3153 ram_addr_t ram_addr;
3154
3155 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3156 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3157 abort();
3158 }
3159 return ram_addr;
3160 }
3161
3162 static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3163 unsigned size)
3164 {
3165 #ifdef DEBUG_UNASSIGNED
3166 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3167 #endif
3168 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3169 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
3170 #endif
3171 return 0;
3172 }
3173
3174 static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3175 uint64_t val, unsigned size)
3176 {
3177 #ifdef DEBUG_UNASSIGNED
3178 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
3179 #endif
3180 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3181 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
3182 #endif
3183 }
3184
3185 static const MemoryRegionOps unassigned_mem_ops = {
3186 .read = unassigned_mem_read,
3187 .write = unassigned_mem_write,
3188 .endianness = DEVICE_NATIVE_ENDIAN,
3189 };
3190
3191 static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3192 unsigned size)
3193 {
3194 abort();
3195 }
3196
3197 static void error_mem_write(void *opaque, target_phys_addr_t addr,
3198 uint64_t value, unsigned size)
3199 {
3200 abort();
3201 }
3202
3203 static const MemoryRegionOps error_mem_ops = {
3204 .read = error_mem_read,
3205 .write = error_mem_write,
3206 .endianness = DEVICE_NATIVE_ENDIAN,
3207 };
3208
3209 static const MemoryRegionOps rom_mem_ops = {
3210 .read = error_mem_read,
3211 .write = unassigned_mem_write,
3212 .endianness = DEVICE_NATIVE_ENDIAN,
3213 };
3214
3215 static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3216 uint64_t val, unsigned size)
3217 {
3218 int dirty_flags;
3219 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3220 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3221 #if !defined(CONFIG_USER_ONLY)
3222 tb_invalidate_phys_page_fast(ram_addr, size);
3223 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3224 #endif
3225 }
3226 switch (size) {
3227 case 1:
3228 stb_p(qemu_get_ram_ptr(ram_addr), val);
3229 break;
3230 case 2:
3231 stw_p(qemu_get_ram_ptr(ram_addr), val);
3232 break;
3233 case 4:
3234 stl_p(qemu_get_ram_ptr(ram_addr), val);
3235 break;
3236 default:
3237 abort();
3238 }
3239 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3240 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3241 /* we remove the notdirty callback only if the code has been
3242 flushed */
3243 if (dirty_flags == 0xff)
3244 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3245 }
3246
3247 static const MemoryRegionOps notdirty_mem_ops = {
3248 .read = error_mem_read,
3249 .write = notdirty_mem_write,
3250 .endianness = DEVICE_NATIVE_ENDIAN,
3251 };
3252
3253 /* Generate a debug exception if a watchpoint has been hit. */
3254 static void check_watchpoint(int offset, int len_mask, int flags)
3255 {
3256 CPUState *env = cpu_single_env;
3257 target_ulong pc, cs_base;
3258 TranslationBlock *tb;
3259 target_ulong vaddr;
3260 CPUWatchpoint *wp;
3261 int cpu_flags;
3262
3263 if (env->watchpoint_hit) {
3264 /* We re-entered the check after replacing the TB. Now raise
3265 * the debug interrupt so that is will trigger after the
3266 * current instruction. */
3267 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3268 return;
3269 }
3270 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3271 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3272 if ((vaddr == (wp->vaddr & len_mask) ||
3273 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3274 wp->flags |= BP_WATCHPOINT_HIT;
3275 if (!env->watchpoint_hit) {
3276 env->watchpoint_hit = wp;
3277 tb = tb_find_pc(env->mem_io_pc);
3278 if (!tb) {
3279 cpu_abort(env, "check_watchpoint: could not find TB for "
3280 "pc=%p", (void *)env->mem_io_pc);
3281 }
3282 cpu_restore_state(tb, env, env->mem_io_pc);
3283 tb_phys_invalidate(tb, -1);
3284 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3285 env->exception_index = EXCP_DEBUG;
3286 } else {
3287 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3288 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3289 }
3290 cpu_resume_from_signal(env, NULL);
3291 }
3292 } else {
3293 wp->flags &= ~BP_WATCHPOINT_HIT;
3294 }
3295 }
3296 }
3297
3298 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3299 so these check for a hit then pass through to the normal out-of-line
3300 phys routines. */
3301 static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3302 unsigned size)
3303 {
3304 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3305 switch (size) {
3306 case 1: return ldub_phys(addr);
3307 case 2: return lduw_phys(addr);
3308 case 4: return ldl_phys(addr);
3309 default: abort();
3310 }
3311 }
3312
3313 static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3314 uint64_t val, unsigned size)
3315 {
3316 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3317 switch (size) {
3318 case 1: stb_phys(addr, val);
3319 case 2: stw_phys(addr, val);
3320 case 4: stl_phys(addr, val);
3321 default: abort();
3322 }
3323 }
3324
3325 static const MemoryRegionOps watch_mem_ops = {
3326 .read = watch_mem_read,
3327 .write = watch_mem_write,
3328 .endianness = DEVICE_NATIVE_ENDIAN,
3329 };
3330
3331 static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3332 unsigned len)
3333 {
3334 subpage_t *mmio = opaque;
3335 unsigned int idx = SUBPAGE_IDX(addr);
3336 #if defined(DEBUG_SUBPAGE)
3337 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3338 mmio, len, addr, idx);
3339 #endif
3340
3341 addr += mmio->region_offset[idx];
3342 idx = mmio->sub_io_index[idx];
3343 return io_mem_read(idx, addr, len);
3344 }
3345
3346 static void subpage_write(void *opaque, target_phys_addr_t addr,
3347 uint64_t value, unsigned len)
3348 {
3349 subpage_t *mmio = opaque;
3350 unsigned int idx = SUBPAGE_IDX(addr);
3351 #if defined(DEBUG_SUBPAGE)
3352 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3353 " idx %d value %"PRIx64"\n",
3354 __func__, mmio, len, addr, idx, value);
3355 #endif
3356
3357 addr += mmio->region_offset[idx];
3358 idx = mmio->sub_io_index[idx];
3359 io_mem_write(idx, addr, value, len);
3360 }
3361
3362 static const MemoryRegionOps subpage_ops = {
3363 .read = subpage_read,
3364 .write = subpage_write,
3365 .endianness = DEVICE_NATIVE_ENDIAN,
3366 };
3367
3368 static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3369 unsigned size)
3370 {
3371 ram_addr_t raddr = addr;
3372 void *ptr = qemu_get_ram_ptr(raddr);
3373 switch (size) {
3374 case 1: return ldub_p(ptr);
3375 case 2: return lduw_p(ptr);
3376 case 4: return ldl_p(ptr);
3377 default: abort();
3378 }
3379 }
3380
3381 static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3382 uint64_t value, unsigned size)
3383 {
3384 ram_addr_t raddr = addr;
3385 void *ptr = qemu_get_ram_ptr(raddr);
3386 switch (size) {
3387 case 1: return stb_p(ptr, value);
3388 case 2: return stw_p(ptr, value);
3389 case 4: return stl_p(ptr, value);
3390 default: abort();
3391 }
3392 }
3393
3394 static const MemoryRegionOps subpage_ram_ops = {
3395 .read = subpage_ram_read,
3396 .write = subpage_ram_write,
3397 .endianness = DEVICE_NATIVE_ENDIAN,
3398 };
3399
3400 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3401 ram_addr_t memory, ram_addr_t region_offset)
3402 {
3403 int idx, eidx;
3404
3405 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3406 return -1;
3407 idx = SUBPAGE_IDX(start);
3408 eidx = SUBPAGE_IDX(end);
3409 #if defined(DEBUG_SUBPAGE)
3410 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3411 mmio, start, end, idx, eidx, memory);
3412 #endif
3413 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
3414 memory = io_mem_subpage_ram.ram_addr;
3415 }
3416 memory &= IO_MEM_NB_ENTRIES - 1;
3417 for (; idx <= eidx; idx++) {
3418 mmio->sub_io_index[idx] = memory;
3419 mmio->region_offset[idx] = region_offset;
3420 }
3421
3422 return 0;
3423 }
3424
3425 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3426 ram_addr_t orig_memory,
3427 ram_addr_t region_offset)
3428 {
3429 subpage_t *mmio;
3430 int subpage_memory;
3431
3432 mmio = g_malloc0(sizeof(subpage_t));
3433
3434 mmio->base = base;
3435 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3436 "subpage", TARGET_PAGE_SIZE);
3437 mmio->iomem.subpage = true;
3438 subpage_memory = mmio->iomem.ram_addr;
3439 #if defined(DEBUG_SUBPAGE)
3440 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3441 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3442 #endif
3443 *phys = subpage_memory;
3444 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3445
3446 return mmio;
3447 }
3448
3449 static int get_free_io_mem_idx(void)
3450 {
3451 int i;
3452
3453 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3454 if (!io_mem_used[i]) {
3455 io_mem_used[i] = 1;
3456 return i;
3457 }
3458 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3459 return -1;
3460 }
3461
3462 /* mem_read and mem_write are arrays of functions containing the
3463 function to access byte (index 0), word (index 1) and dword (index
3464 2). Functions can be omitted with a NULL function pointer.
3465 If io_index is non zero, the corresponding io zone is
3466 modified. If it is zero, a new io zone is allocated. The return
3467 value can be used with cpu_register_physical_memory(). (-1) is
3468 returned if error. */
3469 static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
3470 {
3471 if (io_index <= 0) {
3472 io_index = get_free_io_mem_idx();
3473 if (io_index == -1)
3474 return io_index;
3475 } else {
3476 if (io_index >= IO_MEM_NB_ENTRIES)
3477 return -1;
3478 }
3479
3480 io_mem_region[io_index] = mr;
3481
3482 return io_index;
3483 }
3484
3485 int cpu_register_io_memory(MemoryRegion *mr)
3486 {
3487 return cpu_register_io_memory_fixed(0, mr);
3488 }
3489
3490 void cpu_unregister_io_memory(int io_index)
3491 {
3492 io_mem_region[io_index] = NULL;
3493 io_mem_used[io_index] = 0;
3494 }
3495
3496 static void io_mem_init(void)
3497 {
3498 int i;
3499
3500 /* Must be first: */
3501 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3502 assert(io_mem_ram.ram_addr == 0);
3503 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3504 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3505 "unassigned", UINT64_MAX);
3506 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3507 "notdirty", UINT64_MAX);
3508 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3509 "subpage-ram", UINT64_MAX);
3510 for (i=0; i<5; i++)
3511 io_mem_used[i] = 1;
3512
3513 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3514 "watch", UINT64_MAX);
3515 }
3516
3517 static void core_begin(MemoryListener *listener)
3518 {
3519 destroy_all_mappings();
3520 }
3521
3522 static void core_commit(MemoryListener *listener)
3523 {
3524 }
3525
3526 static void core_region_add(MemoryListener *listener,
3527 MemoryRegionSection *section)
3528 {
3529 cpu_register_physical_memory_log(section, section->readonly);
3530 }
3531
3532 static void core_region_del(MemoryListener *listener,
3533 MemoryRegionSection *section)
3534 {
3535 }
3536
3537 static void core_region_nop(MemoryListener *listener,
3538 MemoryRegionSection *section)
3539 {
3540 cpu_register_physical_memory_log(section, section->readonly);
3541 }
3542
3543 static void core_log_start(MemoryListener *listener,
3544 MemoryRegionSection *section)
3545 {
3546 }
3547
3548 static void core_log_stop(MemoryListener *listener,
3549 MemoryRegionSection *section)
3550 {
3551 }
3552
3553 static void core_log_sync(MemoryListener *listener,
3554 MemoryRegionSection *section)
3555 {
3556 }
3557
3558 static void core_log_global_start(MemoryListener *listener)
3559 {
3560 cpu_physical_memory_set_dirty_tracking(1);
3561 }
3562
3563 static void core_log_global_stop(MemoryListener *listener)
3564 {
3565 cpu_physical_memory_set_dirty_tracking(0);
3566 }
3567
3568 static void core_eventfd_add(MemoryListener *listener,
3569 MemoryRegionSection *section,
3570 bool match_data, uint64_t data, int fd)
3571 {
3572 }
3573
3574 static void core_eventfd_del(MemoryListener *listener,
3575 MemoryRegionSection *section,
3576 bool match_data, uint64_t data, int fd)
3577 {
3578 }
3579
3580 static void io_begin(MemoryListener *listener)
3581 {
3582 }
3583
3584 static void io_commit(MemoryListener *listener)
3585 {
3586 }
3587
3588 static void io_region_add(MemoryListener *listener,
3589 MemoryRegionSection *section)
3590 {
3591 iorange_init(&section->mr->iorange, &memory_region_iorange_ops,
3592 section->offset_within_address_space, section->size);
3593 ioport_register(&section->mr->iorange);
3594 }
3595
3596 static void io_region_del(MemoryListener *listener,
3597 MemoryRegionSection *section)
3598 {
3599 isa_unassign_ioport(section->offset_within_address_space, section->size);
3600 }
3601
3602 static void io_region_nop(MemoryListener *listener,
3603 MemoryRegionSection *section)
3604 {
3605 }
3606
3607 static void io_log_start(MemoryListener *listener,
3608 MemoryRegionSection *section)
3609 {
3610 }
3611
3612 static void io_log_stop(MemoryListener *listener,
3613 MemoryRegionSection *section)
3614 {
3615 }
3616
3617 static void io_log_sync(MemoryListener *listener,
3618 MemoryRegionSection *section)
3619 {
3620 }
3621
3622 static void io_log_global_start(MemoryListener *listener)
3623 {
3624 }
3625
3626 static void io_log_global_stop(MemoryListener *listener)
3627 {
3628 }
3629
3630 static void io_eventfd_add(MemoryListener *listener,
3631 MemoryRegionSection *section,
3632 bool match_data, uint64_t data, int fd)
3633 {
3634 }
3635
3636 static void io_eventfd_del(MemoryListener *listener,
3637 MemoryRegionSection *section,
3638 bool match_data, uint64_t data, int fd)
3639 {
3640 }
3641
3642 static MemoryListener core_memory_listener = {
3643 .begin = core_begin,
3644 .commit = core_commit,
3645 .region_add = core_region_add,
3646 .region_del = core_region_del,
3647 .region_nop = core_region_nop,
3648 .log_start = core_log_start,
3649 .log_stop = core_log_stop,
3650 .log_sync = core_log_sync,
3651 .log_global_start = core_log_global_start,
3652 .log_global_stop = core_log_global_stop,
3653 .eventfd_add = core_eventfd_add,
3654 .eventfd_del = core_eventfd_del,
3655 .priority = 0,
3656 };
3657
3658 static MemoryListener io_memory_listener = {
3659 .begin = io_begin,
3660 .commit = io_commit,
3661 .region_add = io_region_add,
3662 .region_del = io_region_del,
3663 .region_nop = io_region_nop,
3664 .log_start = io_log_start,
3665 .log_stop = io_log_stop,
3666 .log_sync = io_log_sync,
3667 .log_global_start = io_log_global_start,
3668 .log_global_stop = io_log_global_stop,
3669 .eventfd_add = io_eventfd_add,
3670 .eventfd_del = io_eventfd_del,
3671 .priority = 0,
3672 };
3673
3674 static void memory_map_init(void)
3675 {
3676 system_memory = g_malloc(sizeof(*system_memory));
3677 memory_region_init(system_memory, "system", INT64_MAX);
3678 set_system_memory_map(system_memory);
3679
3680 system_io = g_malloc(sizeof(*system_io));
3681 memory_region_init(system_io, "io", 65536);
3682 set_system_io_map(system_io);
3683
3684 memory_listener_register(&core_memory_listener, system_memory);
3685 memory_listener_register(&io_memory_listener, system_io);
3686 }
3687
3688 MemoryRegion *get_system_memory(void)
3689 {
3690 return system_memory;
3691 }
3692
3693 MemoryRegion *get_system_io(void)
3694 {
3695 return system_io;
3696 }
3697
3698 #endif /* !defined(CONFIG_USER_ONLY) */
3699
3700 /* physical memory access (slow version, mainly for debug) */
3701 #if defined(CONFIG_USER_ONLY)
3702 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3703 uint8_t *buf, int len, int is_write)
3704 {
3705 int l, flags;
3706 target_ulong page;
3707 void * p;
3708
3709 while (len > 0) {
3710 page = addr & TARGET_PAGE_MASK;
3711 l = (page + TARGET_PAGE_SIZE) - addr;
3712 if (l > len)
3713 l = len;
3714 flags = page_get_flags(page);
3715 if (!(flags & PAGE_VALID))
3716 return -1;
3717 if (is_write) {
3718 if (!(flags & PAGE_WRITE))
3719 return -1;
3720 /* XXX: this code should not depend on lock_user */
3721 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3722 return -1;
3723 memcpy(p, buf, l);
3724 unlock_user(p, addr, l);
3725 } else {
3726 if (!(flags & PAGE_READ))
3727 return -1;
3728 /* XXX: this code should not depend on lock_user */
3729 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3730 return -1;
3731 memcpy(buf, p, l);
3732 unlock_user(p, addr, 0);
3733 }
3734 len -= l;
3735 buf += l;
3736 addr += l;
3737 }
3738 return 0;
3739 }
3740
3741 #else
3742 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3743 int len, int is_write)
3744 {
3745 int l, io_index;
3746 uint8_t *ptr;
3747 uint32_t val;
3748 target_phys_addr_t page;
3749 ram_addr_t pd;
3750 PhysPageDesc p;
3751
3752 while (len > 0) {
3753 page = addr & TARGET_PAGE_MASK;
3754 l = (page + TARGET_PAGE_SIZE) - addr;
3755 if (l > len)
3756 l = len;
3757 p = phys_page_find(page >> TARGET_PAGE_BITS);
3758 pd = p.phys_offset;
3759
3760 if (is_write) {
3761 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
3762 target_phys_addr_t addr1;
3763 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
3764 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3765 /* XXX: could force cpu_single_env to NULL to avoid
3766 potential bugs */
3767 if (l >= 4 && ((addr1 & 3) == 0)) {
3768 /* 32 bit write access */
3769 val = ldl_p(buf);
3770 io_mem_write(io_index, addr1, val, 4);
3771 l = 4;
3772 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3773 /* 16 bit write access */
3774 val = lduw_p(buf);
3775 io_mem_write(io_index, addr1, val, 2);
3776 l = 2;
3777 } else {
3778 /* 8 bit write access */
3779 val = ldub_p(buf);
3780 io_mem_write(io_index, addr1, val, 1);
3781 l = 1;
3782 }
3783 } else {
3784 ram_addr_t addr1;
3785 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3786 /* RAM case */
3787 ptr = qemu_get_ram_ptr(addr1);
3788 memcpy(ptr, buf, l);
3789 if (!cpu_physical_memory_is_dirty(addr1)) {
3790 /* invalidate code */
3791 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3792 /* set dirty bit */
3793 cpu_physical_memory_set_dirty_flags(
3794 addr1, (0xff & ~CODE_DIRTY_FLAG));
3795 }
3796 qemu_put_ram_ptr(ptr);
3797 }
3798 } else {
3799 if (!is_ram_rom_romd(pd)) {
3800 target_phys_addr_t addr1;
3801 /* I/O case */
3802 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
3803 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3804 if (l >= 4 && ((addr1 & 3) == 0)) {
3805 /* 32 bit read access */
3806 val = io_mem_read(io_index, addr1, 4);
3807 stl_p(buf, val);
3808 l = 4;
3809 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3810 /* 16 bit read access */
3811 val = io_mem_read(io_index, addr1, 2);
3812 stw_p(buf, val);
3813 l = 2;
3814 } else {
3815 /* 8 bit read access */
3816 val = io_mem_read(io_index, addr1, 1);
3817 stb_p(buf, val);
3818 l = 1;
3819 }
3820 } else {
3821 /* RAM case */
3822 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3823 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3824 qemu_put_ram_ptr(ptr);
3825 }
3826 }
3827 len -= l;
3828 buf += l;
3829 addr += l;
3830 }
3831 }
3832
3833 /* used for ROM loading : can write in RAM and ROM */
3834 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3835 const uint8_t *buf, int len)
3836 {
3837 int l;
3838 uint8_t *ptr;
3839 target_phys_addr_t page;
3840 unsigned long pd;
3841 PhysPageDesc p;
3842
3843 while (len > 0) {
3844 page = addr & TARGET_PAGE_MASK;
3845 l = (page + TARGET_PAGE_SIZE) - addr;
3846 if (l > len)
3847 l = len;
3848 p = phys_page_find(page >> TARGET_PAGE_BITS);
3849 pd = p.phys_offset;
3850
3851 if (!is_ram_rom_romd(pd)) {
3852 /* do nothing */
3853 } else {
3854 unsigned long addr1;
3855 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3856 /* ROM/RAM case */
3857 ptr = qemu_get_ram_ptr(addr1);
3858 memcpy(ptr, buf, l);
3859 qemu_put_ram_ptr(ptr);
3860 }
3861 len -= l;
3862 buf += l;
3863 addr += l;
3864 }
3865 }
3866
3867 typedef struct {
3868 void *buffer;
3869 target_phys_addr_t addr;
3870 target_phys_addr_t len;
3871 } BounceBuffer;
3872
3873 static BounceBuffer bounce;
3874
3875 typedef struct MapClient {
3876 void *opaque;
3877 void (*callback)(void *opaque);
3878 QLIST_ENTRY(MapClient) link;
3879 } MapClient;
3880
3881 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3882 = QLIST_HEAD_INITIALIZER(map_client_list);
3883
3884 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3885 {
3886 MapClient *client = g_malloc(sizeof(*client));
3887
3888 client->opaque = opaque;
3889 client->callback = callback;
3890 QLIST_INSERT_HEAD(&map_client_list, client, link);
3891 return client;
3892 }
3893
3894 void cpu_unregister_map_client(void *_client)
3895 {
3896 MapClient *client = (MapClient *)_client;
3897
3898 QLIST_REMOVE(client, link);
3899 g_free(client);
3900 }
3901
3902 static void cpu_notify_map_clients(void)
3903 {
3904 MapClient *client;
3905
3906 while (!QLIST_EMPTY(&map_client_list)) {
3907 client = QLIST_FIRST(&map_client_list);
3908 client->callback(client->opaque);
3909 cpu_unregister_map_client(client);
3910 }
3911 }
3912
3913 /* Map a physical memory region into a host virtual address.
3914 * May map a subset of the requested range, given by and returned in *plen.
3915 * May return NULL if resources needed to perform the mapping are exhausted.
3916 * Use only for reads OR writes - not for read-modify-write operations.
3917 * Use cpu_register_map_client() to know when retrying the map operation is
3918 * likely to succeed.
3919 */
3920 void *cpu_physical_memory_map(target_phys_addr_t addr,
3921 target_phys_addr_t *plen,
3922 int is_write)
3923 {
3924 target_phys_addr_t len = *plen;
3925 target_phys_addr_t todo = 0;
3926 int l;
3927 target_phys_addr_t page;
3928 unsigned long pd;
3929 PhysPageDesc p;
3930 ram_addr_t raddr = RAM_ADDR_MAX;
3931 ram_addr_t rlen;
3932 void *ret;
3933
3934 while (len > 0) {
3935 page = addr & TARGET_PAGE_MASK;
3936 l = (page + TARGET_PAGE_SIZE) - addr;
3937 if (l > len)
3938 l = len;
3939 p = phys_page_find(page >> TARGET_PAGE_BITS);
3940 pd = p.phys_offset;
3941
3942 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
3943 if (todo || bounce.buffer) {
3944 break;
3945 }
3946 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3947 bounce.addr = addr;
3948 bounce.len = l;
3949 if (!is_write) {
3950 cpu_physical_memory_read(addr, bounce.buffer, l);
3951 }
3952
3953 *plen = l;
3954 return bounce.buffer;
3955 }
3956 if (!todo) {
3957 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3958 }
3959
3960 len -= l;
3961 addr += l;
3962 todo += l;
3963 }
3964 rlen = todo;
3965 ret = qemu_ram_ptr_length(raddr, &rlen);
3966 *plen = rlen;
3967 return ret;
3968 }
3969
3970 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3971 * Will also mark the memory as dirty if is_write == 1. access_len gives
3972 * the amount of memory that was actually read or written by the caller.
3973 */
3974 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3975 int is_write, target_phys_addr_t access_len)
3976 {
3977 if (buffer != bounce.buffer) {
3978 if (is_write) {
3979 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3980 while (access_len) {
3981 unsigned l;
3982 l = TARGET_PAGE_SIZE;
3983 if (l > access_len)
3984 l = access_len;
3985 if (!cpu_physical_memory_is_dirty(addr1)) {
3986 /* invalidate code */
3987 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3988 /* set dirty bit */
3989 cpu_physical_memory_set_dirty_flags(
3990 addr1, (0xff & ~CODE_DIRTY_FLAG));
3991 }
3992 addr1 += l;
3993 access_len -= l;
3994 }
3995 }
3996 if (xen_enabled()) {
3997 xen_invalidate_map_cache_entry(buffer);
3998 }
3999 return;
4000 }
4001 if (is_write) {
4002 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4003 }
4004 qemu_vfree(bounce.buffer);
4005 bounce.buffer = NULL;
4006 cpu_notify_map_clients();
4007 }
4008
4009 /* warning: addr must be aligned */
4010 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4011 enum device_endian endian)
4012 {
4013 int io_index;
4014 uint8_t *ptr;
4015 uint32_t val;
4016 unsigned long pd;
4017 PhysPageDesc p;
4018
4019 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4020 pd = p.phys_offset;
4021
4022 if (!is_ram_rom_romd(pd)) {
4023 /* I/O case */
4024 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4025 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4026 val = io_mem_read(io_index, addr, 4);
4027 #if defined(TARGET_WORDS_BIGENDIAN)
4028 if (endian == DEVICE_LITTLE_ENDIAN) {
4029 val = bswap32(val);
4030 }
4031 #else
4032 if (endian == DEVICE_BIG_ENDIAN) {
4033 val = bswap32(val);
4034 }
4035 #endif
4036 } else {
4037 /* RAM case */
4038 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4039 (addr & ~TARGET_PAGE_MASK);
4040 switch (endian) {
4041 case DEVICE_LITTLE_ENDIAN:
4042 val = ldl_le_p(ptr);
4043 break;
4044 case DEVICE_BIG_ENDIAN:
4045 val = ldl_be_p(ptr);
4046 break;
4047 default:
4048 val = ldl_p(ptr);
4049 break;
4050 }
4051 }
4052 return val;
4053 }
4054
4055 uint32_t ldl_phys(target_phys_addr_t addr)
4056 {
4057 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4058 }
4059
4060 uint32_t ldl_le_phys(target_phys_addr_t addr)
4061 {
4062 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4063 }
4064
4065 uint32_t ldl_be_phys(target_phys_addr_t addr)
4066 {
4067 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4068 }
4069
4070 /* warning: addr must be aligned */
4071 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4072 enum device_endian endian)
4073 {
4074 int io_index;
4075 uint8_t *ptr;
4076 uint64_t val;
4077 unsigned long pd;
4078 PhysPageDesc p;
4079
4080 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4081 pd = p.phys_offset;
4082
4083 if (!is_ram_rom_romd(pd)) {
4084 /* I/O case */
4085 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4086 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4087
4088 /* XXX This is broken when device endian != cpu endian.
4089 Fix and add "endian" variable check */
4090 #ifdef TARGET_WORDS_BIGENDIAN
4091 val = io_mem_read(io_index, addr, 4) << 32;
4092 val |= io_mem_read(io_index, addr + 4, 4);
4093 #else
4094 val = io_mem_read(io_index, addr, 4);
4095 val |= io_mem_read(io_index, addr + 4, 4) << 32;
4096 #endif
4097 } else {
4098 /* RAM case */
4099 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4100 (addr & ~TARGET_PAGE_MASK);
4101 switch (endian) {
4102 case DEVICE_LITTLE_ENDIAN:
4103 val = ldq_le_p(ptr);
4104 break;
4105 case DEVICE_BIG_ENDIAN:
4106 val = ldq_be_p(ptr);
4107 break;
4108 default:
4109 val = ldq_p(ptr);
4110 break;
4111 }
4112 }
4113 return val;
4114 }
4115
4116 uint64_t ldq_phys(target_phys_addr_t addr)
4117 {
4118 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4119 }
4120
4121 uint64_t ldq_le_phys(target_phys_addr_t addr)
4122 {
4123 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4124 }
4125
4126 uint64_t ldq_be_phys(target_phys_addr_t addr)
4127 {
4128 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4129 }
4130
4131 /* XXX: optimize */
4132 uint32_t ldub_phys(target_phys_addr_t addr)
4133 {
4134 uint8_t val;
4135 cpu_physical_memory_read(addr, &val, 1);
4136 return val;
4137 }
4138
4139 /* warning: addr must be aligned */
4140 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4141 enum device_endian endian)
4142 {
4143 int io_index;
4144 uint8_t *ptr;
4145 uint64_t val;
4146 unsigned long pd;
4147 PhysPageDesc p;
4148
4149 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4150 pd = p.phys_offset;
4151
4152 if (!is_ram_rom_romd(pd)) {
4153 /* I/O case */
4154 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4155 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4156 val = io_mem_read(io_index, addr, 2);
4157 #if defined(TARGET_WORDS_BIGENDIAN)
4158 if (endian == DEVICE_LITTLE_ENDIAN) {
4159 val = bswap16(val);
4160 }
4161 #else
4162 if (endian == DEVICE_BIG_ENDIAN) {
4163 val = bswap16(val);
4164 }
4165 #endif
4166 } else {
4167 /* RAM case */
4168 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4169 (addr & ~TARGET_PAGE_MASK);
4170 switch (endian) {
4171 case DEVICE_LITTLE_ENDIAN:
4172 val = lduw_le_p(ptr);
4173 break;
4174 case DEVICE_BIG_ENDIAN:
4175 val = lduw_be_p(ptr);
4176 break;
4177 default:
4178 val = lduw_p(ptr);
4179 break;
4180 }
4181 }
4182 return val;
4183 }
4184
4185 uint32_t lduw_phys(target_phys_addr_t addr)
4186 {
4187 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4188 }
4189
4190 uint32_t lduw_le_phys(target_phys_addr_t addr)
4191 {
4192 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4193 }
4194
4195 uint32_t lduw_be_phys(target_phys_addr_t addr)
4196 {
4197 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4198 }
4199
4200 /* warning: addr must be aligned. The ram page is not masked as dirty
4201 and the code inside is not invalidated. It is useful if the dirty
4202 bits are used to track modified PTEs */
4203 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4204 {
4205 int io_index;
4206 uint8_t *ptr;
4207 unsigned long pd;
4208 PhysPageDesc p;
4209
4210 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4211 pd = p.phys_offset;
4212
4213 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4214 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4215 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4216 io_mem_write(io_index, addr, val, 4);
4217 } else {
4218 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4219 ptr = qemu_get_ram_ptr(addr1);
4220 stl_p(ptr, val);
4221
4222 if (unlikely(in_migration)) {
4223 if (!cpu_physical_memory_is_dirty(addr1)) {
4224 /* invalidate code */
4225 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4226 /* set dirty bit */
4227 cpu_physical_memory_set_dirty_flags(
4228 addr1, (0xff & ~CODE_DIRTY_FLAG));
4229 }
4230 }
4231 }
4232 }
4233
4234 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4235 {
4236 int io_index;
4237 uint8_t *ptr;
4238 unsigned long pd;
4239 PhysPageDesc p;
4240
4241 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4242 pd = p.phys_offset;
4243
4244 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4245 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4246 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4247 #ifdef TARGET_WORDS_BIGENDIAN
4248 io_mem_write(io_index, addr, val >> 32, 4);
4249 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
4250 #else
4251 io_mem_write(io_index, addr, (uint32_t)val, 4);
4252 io_mem_write(io_index, addr + 4, val >> 32, 4);
4253 #endif
4254 } else {
4255 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4256 (addr & ~TARGET_PAGE_MASK);
4257 stq_p(ptr, val);
4258 }
4259 }
4260
4261 /* warning: addr must be aligned */
4262 static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4263 enum device_endian endian)
4264 {
4265 int io_index;
4266 uint8_t *ptr;
4267 unsigned long pd;
4268 PhysPageDesc p;
4269
4270 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4271 pd = p.phys_offset;
4272
4273 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4274 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4275 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4276 #if defined(TARGET_WORDS_BIGENDIAN)
4277 if (endian == DEVICE_LITTLE_ENDIAN) {
4278 val = bswap32(val);
4279 }
4280 #else
4281 if (endian == DEVICE_BIG_ENDIAN) {
4282 val = bswap32(val);
4283 }
4284 #endif
4285 io_mem_write(io_index, addr, val, 4);
4286 } else {
4287 unsigned long addr1;
4288 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4289 /* RAM case */
4290 ptr = qemu_get_ram_ptr(addr1);
4291 switch (endian) {
4292 case DEVICE_LITTLE_ENDIAN:
4293 stl_le_p(ptr, val);
4294 break;
4295 case DEVICE_BIG_ENDIAN:
4296 stl_be_p(ptr, val);
4297 break;
4298 default:
4299 stl_p(ptr, val);
4300 break;
4301 }
4302 if (!cpu_physical_memory_is_dirty(addr1)) {
4303 /* invalidate code */
4304 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4305 /* set dirty bit */
4306 cpu_physical_memory_set_dirty_flags(addr1,
4307 (0xff & ~CODE_DIRTY_FLAG));
4308 }
4309 }
4310 }
4311
4312 void stl_phys(target_phys_addr_t addr, uint32_t val)
4313 {
4314 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4315 }
4316
4317 void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4318 {
4319 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4320 }
4321
4322 void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4323 {
4324 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4325 }
4326
4327 /* XXX: optimize */
4328 void stb_phys(target_phys_addr_t addr, uint32_t val)
4329 {
4330 uint8_t v = val;
4331 cpu_physical_memory_write(addr, &v, 1);
4332 }
4333
4334 /* warning: addr must be aligned */
4335 static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4336 enum device_endian endian)
4337 {
4338 int io_index;
4339 uint8_t *ptr;
4340 unsigned long pd;
4341 PhysPageDesc p;
4342
4343 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4344 pd = p.phys_offset;
4345
4346 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4347 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4348 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4349 #if defined(TARGET_WORDS_BIGENDIAN)
4350 if (endian == DEVICE_LITTLE_ENDIAN) {
4351 val = bswap16(val);
4352 }
4353 #else
4354 if (endian == DEVICE_BIG_ENDIAN) {
4355 val = bswap16(val);
4356 }
4357 #endif
4358 io_mem_write(io_index, addr, val, 2);
4359 } else {
4360 unsigned long addr1;
4361 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4362 /* RAM case */
4363 ptr = qemu_get_ram_ptr(addr1);
4364 switch (endian) {
4365 case DEVICE_LITTLE_ENDIAN:
4366 stw_le_p(ptr, val);
4367 break;
4368 case DEVICE_BIG_ENDIAN:
4369 stw_be_p(ptr, val);
4370 break;
4371 default:
4372 stw_p(ptr, val);
4373 break;
4374 }
4375 if (!cpu_physical_memory_is_dirty(addr1)) {
4376 /* invalidate code */
4377 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4378 /* set dirty bit */
4379 cpu_physical_memory_set_dirty_flags(addr1,
4380 (0xff & ~CODE_DIRTY_FLAG));
4381 }
4382 }
4383 }
4384
4385 void stw_phys(target_phys_addr_t addr, uint32_t val)
4386 {
4387 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4388 }
4389
4390 void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4391 {
4392 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4393 }
4394
4395 void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4396 {
4397 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4398 }
4399
4400 /* XXX: optimize */
4401 void stq_phys(target_phys_addr_t addr, uint64_t val)
4402 {
4403 val = tswap64(val);
4404 cpu_physical_memory_write(addr, &val, 8);
4405 }
4406
4407 void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4408 {
4409 val = cpu_to_le64(val);
4410 cpu_physical_memory_write(addr, &val, 8);
4411 }
4412
4413 void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4414 {
4415 val = cpu_to_be64(val);
4416 cpu_physical_memory_write(addr, &val, 8);
4417 }
4418
4419 /* virtual memory access for debug (includes writing to ROM) */
4420 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4421 uint8_t *buf, int len, int is_write)
4422 {
4423 int l;
4424 target_phys_addr_t phys_addr;
4425 target_ulong page;
4426
4427 while (len > 0) {
4428 page = addr & TARGET_PAGE_MASK;
4429 phys_addr = cpu_get_phys_page_debug(env, page);
4430 /* if no physical page mapped, return an error */
4431 if (phys_addr == -1)
4432 return -1;
4433 l = (page + TARGET_PAGE_SIZE) - addr;
4434 if (l > len)
4435 l = len;
4436 phys_addr += (addr & ~TARGET_PAGE_MASK);
4437 if (is_write)
4438 cpu_physical_memory_write_rom(phys_addr, buf, l);
4439 else
4440 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4441 len -= l;
4442 buf += l;
4443 addr += l;
4444 }
4445 return 0;
4446 }
4447 #endif
4448
4449 /* in deterministic execution mode, instructions doing device I/Os
4450 must be at the end of the TB */
4451 void cpu_io_recompile(CPUState *env, void *retaddr)
4452 {
4453 TranslationBlock *tb;
4454 uint32_t n, cflags;
4455 target_ulong pc, cs_base;
4456 uint64_t flags;
4457
4458 tb = tb_find_pc((unsigned long)retaddr);
4459 if (!tb) {
4460 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4461 retaddr);
4462 }
4463 n = env->icount_decr.u16.low + tb->icount;
4464 cpu_restore_state(tb, env, (unsigned long)retaddr);
4465 /* Calculate how many instructions had been executed before the fault
4466 occurred. */
4467 n = n - env->icount_decr.u16.low;
4468 /* Generate a new TB ending on the I/O insn. */
4469 n++;
4470 /* On MIPS and SH, delay slot instructions can only be restarted if
4471 they were already the first instruction in the TB. If this is not
4472 the first instruction in a TB then re-execute the preceding
4473 branch. */
4474 #if defined(TARGET_MIPS)
4475 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4476 env->active_tc.PC -= 4;
4477 env->icount_decr.u16.low++;
4478 env->hflags &= ~MIPS_HFLAG_BMASK;
4479 }
4480 #elif defined(TARGET_SH4)
4481 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4482 && n > 1) {
4483 env->pc -= 2;
4484 env->icount_decr.u16.low++;
4485 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4486 }
4487 #endif
4488 /* This should never happen. */
4489 if (n > CF_COUNT_MASK)
4490 cpu_abort(env, "TB too big during recompile");
4491
4492 cflags = n | CF_LAST_IO;
4493 pc = tb->pc;
4494 cs_base = tb->cs_base;
4495 flags = tb->flags;
4496 tb_phys_invalidate(tb, -1);
4497 /* FIXME: In theory this could raise an exception. In practice
4498 we have already translated the block once so it's probably ok. */
4499 tb_gen_code(env, pc, cs_base, flags, cflags);
4500 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4501 the first in the TB) then we end up generating a whole new TB and
4502 repeating the fault, which is horribly inefficient.
4503 Better would be to execute just this insn uncached, or generate a
4504 second new TB. */
4505 cpu_resume_from_signal(env, NULL);
4506 }
4507
4508 #if !defined(CONFIG_USER_ONLY)
4509
4510 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4511 {
4512 int i, target_code_size, max_target_code_size;
4513 int direct_jmp_count, direct_jmp2_count, cross_page;
4514 TranslationBlock *tb;
4515
4516 target_code_size = 0;
4517 max_target_code_size = 0;
4518 cross_page = 0;
4519 direct_jmp_count = 0;
4520 direct_jmp2_count = 0;
4521 for(i = 0; i < nb_tbs; i++) {
4522 tb = &tbs[i];
4523 target_code_size += tb->size;
4524 if (tb->size > max_target_code_size)
4525 max_target_code_size = tb->size;
4526 if (tb->page_addr[1] != -1)
4527 cross_page++;
4528 if (tb->tb_next_offset[0] != 0xffff) {
4529 direct_jmp_count++;
4530 if (tb->tb_next_offset[1] != 0xffff) {
4531 direct_jmp2_count++;
4532 }
4533 }
4534 }
4535 /* XXX: avoid using doubles ? */
4536 cpu_fprintf(f, "Translation buffer state:\n");
4537 cpu_fprintf(f, "gen code size %td/%ld\n",
4538 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4539 cpu_fprintf(f, "TB count %d/%d\n",
4540 nb_tbs, code_gen_max_blocks);
4541 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4542 nb_tbs ? target_code_size / nb_tbs : 0,
4543 max_target_code_size);
4544 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4545 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4546 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4547 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4548 cross_page,
4549 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4550 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4551 direct_jmp_count,
4552 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4553 direct_jmp2_count,
4554 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4555 cpu_fprintf(f, "\nStatistics:\n");
4556 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4557 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4558 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4559 tcg_dump_info(f, cpu_fprintf);
4560 }
4561
4562 /* NOTE: this function can trigger an exception */
4563 /* NOTE2: the returned address is not exactly the physical address: it
4564 is the offset relative to phys_ram_base */
4565 tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4566 {
4567 int mmu_idx, page_index, pd;
4568 void *p;
4569
4570 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4571 mmu_idx = cpu_mmu_index(env1);
4572 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4573 (addr & TARGET_PAGE_MASK))) {
4574 ldub_code(addr);
4575 }
4576 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
4577 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
4578 && !is_romd(pd)) {
4579 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4580 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4581 #else
4582 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4583 #endif
4584 }
4585 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4586 return qemu_ram_addr_from_host_nofail(p);
4587 }
4588
4589 /*
4590 * A helper function for the _utterly broken_ virtio device model to find out if
4591 * it's running on a big endian machine. Don't do this at home kids!
4592 */
4593 bool virtio_is_big_endian(void);
4594 bool virtio_is_big_endian(void)
4595 {
4596 #if defined(TARGET_WORDS_BIGENDIAN)
4597 return true;
4598 #else
4599 return false;
4600 #endif
4601 }
4602
4603 #define MMUSUFFIX _cmmu
4604 #undef GETPC
4605 #define GETPC() NULL
4606 #define env cpu_single_env
4607 #define SOFTMMU_CODE_ACCESS
4608
4609 #define SHIFT 0
4610 #include "softmmu_template.h"
4611
4612 #define SHIFT 1
4613 #include "softmmu_template.h"
4614
4615 #define SHIFT 2
4616 #include "softmmu_template.h"
4617
4618 #define SHIFT 3
4619 #include "softmmu_template.h"
4620
4621 #undef env
4622
4623 #endif