]> git.proxmox.com Git - mirror_qemu.git/blob - exec.c
memory: remove first level of l1_phys_map
[mirror_qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "tcg.h"
30 #include "hw/hw.h"
31 #include "hw/qdev.h"
32 #include "osdep.h"
33 #include "kvm.h"
34 #include "hw/xen.h"
35 #include "qemu-timer.h"
36 #include "memory.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
39 #include <qemu.h>
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
45 #include <sys/time.h>
46 #include <sys/proc.h>
47 #include <machine/profile.h>
48 #define _KERNEL
49 #include <sys/user.h>
50 #undef _KERNEL
51 #undef sigqueue
52 #include <libutil.h>
53 #endif
54 #endif
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
57 #include "trace.h"
58 #endif
59
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
62
63 //#define DEBUG_TB_INVALIDATE
64 //#define DEBUG_FLUSH
65 //#define DEBUG_TLB
66 //#define DEBUG_UNASSIGNED
67
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
71
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
74
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
77 #undef DEBUG_TB_CHECK
78 #endif
79
80 #define SMC_BITMAP_USE_THRESHOLD 10
81
82 static TranslationBlock *tbs;
83 static int code_gen_max_blocks;
84 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 static int nb_tbs;
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
96 #elif defined(_WIN32)
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
100 #else
101 #define code_gen_section \
102 __attribute__((aligned (32)))
103 #endif
104
105 uint8_t code_gen_prologue[1024] code_gen_section;
106 static uint8_t *code_gen_buffer;
107 static unsigned long code_gen_buffer_size;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size;
110 static uint8_t *code_gen_ptr;
111
112 #if !defined(CONFIG_USER_ONLY)
113 int phys_ram_fd;
114 static int in_migration;
115
116 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
117
118 static MemoryRegion *system_memory;
119 static MemoryRegion *system_io;
120
121 MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
122 static MemoryRegion io_mem_subpage_ram;
123
124 #endif
125
126 CPUState *first_cpu;
127 /* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
129 DEFINE_TLS(CPUState *,cpu_single_env);
130 /* 0 = Do not count executed instructions.
131 1 = Precise instruction counting.
132 2 = Adaptive rate instruction counting. */
133 int use_icount = 0;
134
135 typedef struct PageDesc {
136 /* list of TBs intersecting this ram page */
137 TranslationBlock *first_tb;
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142 #if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144 #endif
145 } PageDesc;
146
147 /* In system mode we want L1_MAP to be based on ram offsets,
148 while in user mode we want it to be based on virtual addresses. */
149 #if !defined(CONFIG_USER_ONLY)
150 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152 #else
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
154 #endif
155 #else
156 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
157 #endif
158
159 /* Size of the L2 (and L3, etc) page tables. */
160 #define L2_BITS 10
161 #define L2_SIZE (1 << L2_BITS)
162
163 #define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
166 /* The bits remaining after N lower levels of page tables. */
167 #define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
170 #if V_L1_BITS_REM < 4
171 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172 #else
173 #define V_L1_BITS V_L1_BITS_REM
174 #endif
175
176 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
178 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
180 unsigned long qemu_real_host_page_size;
181 unsigned long qemu_host_page_size;
182 unsigned long qemu_host_page_mask;
183
184 /* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186 static void *l1_map[V_L1_SIZE];
187
188 #if !defined(CONFIG_USER_ONLY)
189 typedef struct PhysPageDesc {
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset;
192 ram_addr_t region_offset;
193 } PhysPageDesc;
194
195 /* This is a multi-level map on the physical address space.
196 The bottom level has pointers to PhysPageDesc. */
197 static void *phys_map;
198
199 static void io_mem_init(void);
200 static void memory_map_init(void);
201
202 /* io memory support */
203 MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
204 static char io_mem_used[IO_MEM_NB_ENTRIES];
205 static MemoryRegion io_mem_watch;
206 #endif
207
208 /* log support */
209 #ifdef WIN32
210 static const char *logfilename = "qemu.log";
211 #else
212 static const char *logfilename = "/tmp/qemu.log";
213 #endif
214 FILE *logfile;
215 int loglevel;
216 static int log_append = 0;
217
218 /* statistics */
219 #if !defined(CONFIG_USER_ONLY)
220 static int tlb_flush_count;
221 #endif
222 static int tb_flush_count;
223 static int tb_phys_invalidate_count;
224
225 #ifdef _WIN32
226 static void map_exec(void *addr, long size)
227 {
228 DWORD old_protect;
229 VirtualProtect(addr, size,
230 PAGE_EXECUTE_READWRITE, &old_protect);
231
232 }
233 #else
234 static void map_exec(void *addr, long size)
235 {
236 unsigned long start, end, page_size;
237
238 page_size = getpagesize();
239 start = (unsigned long)addr;
240 start &= ~(page_size - 1);
241
242 end = (unsigned long)addr + size;
243 end += page_size - 1;
244 end &= ~(page_size - 1);
245
246 mprotect((void *)start, end - start,
247 PROT_READ | PROT_WRITE | PROT_EXEC);
248 }
249 #endif
250
251 static void page_init(void)
252 {
253 /* NOTE: we can always suppose that qemu_host_page_size >=
254 TARGET_PAGE_SIZE */
255 #ifdef _WIN32
256 {
257 SYSTEM_INFO system_info;
258
259 GetSystemInfo(&system_info);
260 qemu_real_host_page_size = system_info.dwPageSize;
261 }
262 #else
263 qemu_real_host_page_size = getpagesize();
264 #endif
265 if (qemu_host_page_size == 0)
266 qemu_host_page_size = qemu_real_host_page_size;
267 if (qemu_host_page_size < TARGET_PAGE_SIZE)
268 qemu_host_page_size = TARGET_PAGE_SIZE;
269 qemu_host_page_mask = ~(qemu_host_page_size - 1);
270
271 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
272 {
273 #ifdef HAVE_KINFO_GETVMMAP
274 struct kinfo_vmentry *freep;
275 int i, cnt;
276
277 freep = kinfo_getvmmap(getpid(), &cnt);
278 if (freep) {
279 mmap_lock();
280 for (i = 0; i < cnt; i++) {
281 unsigned long startaddr, endaddr;
282
283 startaddr = freep[i].kve_start;
284 endaddr = freep[i].kve_end;
285 if (h2g_valid(startaddr)) {
286 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
287
288 if (h2g_valid(endaddr)) {
289 endaddr = h2g(endaddr);
290 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
291 } else {
292 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
293 endaddr = ~0ul;
294 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
295 #endif
296 }
297 }
298 }
299 free(freep);
300 mmap_unlock();
301 }
302 #else
303 FILE *f;
304
305 last_brk = (unsigned long)sbrk(0);
306
307 f = fopen("/compat/linux/proc/self/maps", "r");
308 if (f) {
309 mmap_lock();
310
311 do {
312 unsigned long startaddr, endaddr;
313 int n;
314
315 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
316
317 if (n == 2 && h2g_valid(startaddr)) {
318 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
319
320 if (h2g_valid(endaddr)) {
321 endaddr = h2g(endaddr);
322 } else {
323 endaddr = ~0ul;
324 }
325 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
326 }
327 } while (!feof(f));
328
329 fclose(f);
330 mmap_unlock();
331 }
332 #endif
333 }
334 #endif
335 }
336
337 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
338 {
339 PageDesc *pd;
340 void **lp;
341 int i;
342
343 #if defined(CONFIG_USER_ONLY)
344 /* We can't use g_malloc because it may recurse into a locked mutex. */
345 # define ALLOC(P, SIZE) \
346 do { \
347 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
348 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
349 } while (0)
350 #else
351 # define ALLOC(P, SIZE) \
352 do { P = g_malloc0(SIZE); } while (0)
353 #endif
354
355 /* Level 1. Always allocated. */
356 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
357
358 /* Level 2..N-1. */
359 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
360 void **p = *lp;
361
362 if (p == NULL) {
363 if (!alloc) {
364 return NULL;
365 }
366 ALLOC(p, sizeof(void *) * L2_SIZE);
367 *lp = p;
368 }
369
370 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
371 }
372
373 pd = *lp;
374 if (pd == NULL) {
375 if (!alloc) {
376 return NULL;
377 }
378 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
379 *lp = pd;
380 }
381
382 #undef ALLOC
383
384 return pd + (index & (L2_SIZE - 1));
385 }
386
387 static inline PageDesc *page_find(tb_page_addr_t index)
388 {
389 return page_find_alloc(index, 0);
390 }
391
392 #if !defined(CONFIG_USER_ONLY)
393 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
394 {
395 PhysPageDesc *pd;
396 void **lp;
397 int i;
398
399 lp = &phys_map;
400
401 /* Level 1..N-1. */
402 for (i = P_L2_LEVELS - 1; i > 0; i--) {
403 void **p = *lp;
404 if (p == NULL) {
405 if (!alloc) {
406 return NULL;
407 }
408 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
409 }
410 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
411 }
412
413 pd = *lp;
414 if (pd == NULL) {
415 int i;
416 int first_index = index & ~(L2_SIZE - 1);
417
418 if (!alloc) {
419 return NULL;
420 }
421
422 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
423
424 for (i = 0; i < L2_SIZE; i++) {
425 pd[i].phys_offset = io_mem_unassigned.ram_addr;
426 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
427 }
428 }
429
430 return pd + (index & (L2_SIZE - 1));
431 }
432
433 static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
434 {
435 PhysPageDesc *p = phys_page_find_alloc(index, 0);
436
437 if (p) {
438 return *p;
439 } else {
440 return (PhysPageDesc) {
441 .phys_offset = io_mem_unassigned.ram_addr,
442 .region_offset = index << TARGET_PAGE_BITS,
443 };
444 }
445 }
446
447 static void tlb_protect_code(ram_addr_t ram_addr);
448 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
449 target_ulong vaddr);
450 #define mmap_lock() do { } while(0)
451 #define mmap_unlock() do { } while(0)
452 #endif
453
454 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
455
456 #if defined(CONFIG_USER_ONLY)
457 /* Currently it is not recommended to allocate big chunks of data in
458 user mode. It will change when a dedicated libc will be used */
459 #define USE_STATIC_CODE_GEN_BUFFER
460 #endif
461
462 #ifdef USE_STATIC_CODE_GEN_BUFFER
463 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
464 __attribute__((aligned (CODE_GEN_ALIGN)));
465 #endif
466
467 static void code_gen_alloc(unsigned long tb_size)
468 {
469 #ifdef USE_STATIC_CODE_GEN_BUFFER
470 code_gen_buffer = static_code_gen_buffer;
471 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472 map_exec(code_gen_buffer, code_gen_buffer_size);
473 #else
474 code_gen_buffer_size = tb_size;
475 if (code_gen_buffer_size == 0) {
476 #if defined(CONFIG_USER_ONLY)
477 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
478 #else
479 /* XXX: needs adjustments */
480 code_gen_buffer_size = (unsigned long)(ram_size / 4);
481 #endif
482 }
483 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
484 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
485 /* The code gen buffer location may have constraints depending on
486 the host cpu and OS */
487 #if defined(__linux__)
488 {
489 int flags;
490 void *start = NULL;
491
492 flags = MAP_PRIVATE | MAP_ANONYMOUS;
493 #if defined(__x86_64__)
494 flags |= MAP_32BIT;
495 /* Cannot map more than that */
496 if (code_gen_buffer_size > (800 * 1024 * 1024))
497 code_gen_buffer_size = (800 * 1024 * 1024);
498 #elif defined(__sparc_v9__)
499 // Map the buffer below 2G, so we can use direct calls and branches
500 flags |= MAP_FIXED;
501 start = (void *) 0x60000000UL;
502 if (code_gen_buffer_size > (512 * 1024 * 1024))
503 code_gen_buffer_size = (512 * 1024 * 1024);
504 #elif defined(__arm__)
505 /* Keep the buffer no bigger than 16MB to branch between blocks */
506 if (code_gen_buffer_size > 16 * 1024 * 1024)
507 code_gen_buffer_size = 16 * 1024 * 1024;
508 #elif defined(__s390x__)
509 /* Map the buffer so that we can use direct calls and branches. */
510 /* We have a +- 4GB range on the branches; leave some slop. */
511 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
512 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
513 }
514 start = (void *)0x90000000UL;
515 #endif
516 code_gen_buffer = mmap(start, code_gen_buffer_size,
517 PROT_WRITE | PROT_READ | PROT_EXEC,
518 flags, -1, 0);
519 if (code_gen_buffer == MAP_FAILED) {
520 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
521 exit(1);
522 }
523 }
524 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
525 || defined(__DragonFly__) || defined(__OpenBSD__) \
526 || defined(__NetBSD__)
527 {
528 int flags;
529 void *addr = NULL;
530 flags = MAP_PRIVATE | MAP_ANONYMOUS;
531 #if defined(__x86_64__)
532 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
533 * 0x40000000 is free */
534 flags |= MAP_FIXED;
535 addr = (void *)0x40000000;
536 /* Cannot map more than that */
537 if (code_gen_buffer_size > (800 * 1024 * 1024))
538 code_gen_buffer_size = (800 * 1024 * 1024);
539 #elif defined(__sparc_v9__)
540 // Map the buffer below 2G, so we can use direct calls and branches
541 flags |= MAP_FIXED;
542 addr = (void *) 0x60000000UL;
543 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
544 code_gen_buffer_size = (512 * 1024 * 1024);
545 }
546 #endif
547 code_gen_buffer = mmap(addr, code_gen_buffer_size,
548 PROT_WRITE | PROT_READ | PROT_EXEC,
549 flags, -1, 0);
550 if (code_gen_buffer == MAP_FAILED) {
551 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
552 exit(1);
553 }
554 }
555 #else
556 code_gen_buffer = g_malloc(code_gen_buffer_size);
557 map_exec(code_gen_buffer, code_gen_buffer_size);
558 #endif
559 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
560 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
561 code_gen_buffer_max_size = code_gen_buffer_size -
562 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
563 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
564 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
565 }
566
567 /* Must be called before using the QEMU cpus. 'tb_size' is the size
568 (in bytes) allocated to the translation buffer. Zero means default
569 size. */
570 void tcg_exec_init(unsigned long tb_size)
571 {
572 cpu_gen_init();
573 code_gen_alloc(tb_size);
574 code_gen_ptr = code_gen_buffer;
575 page_init();
576 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
577 /* There's no guest base to take into account, so go ahead and
578 initialize the prologue now. */
579 tcg_prologue_init(&tcg_ctx);
580 #endif
581 }
582
583 bool tcg_enabled(void)
584 {
585 return code_gen_buffer != NULL;
586 }
587
588 void cpu_exec_init_all(void)
589 {
590 #if !defined(CONFIG_USER_ONLY)
591 memory_map_init();
592 io_mem_init();
593 #endif
594 }
595
596 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
597
598 static int cpu_common_post_load(void *opaque, int version_id)
599 {
600 CPUState *env = opaque;
601
602 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
603 version_id is increased. */
604 env->interrupt_request &= ~0x01;
605 tlb_flush(env, 1);
606
607 return 0;
608 }
609
610 static const VMStateDescription vmstate_cpu_common = {
611 .name = "cpu_common",
612 .version_id = 1,
613 .minimum_version_id = 1,
614 .minimum_version_id_old = 1,
615 .post_load = cpu_common_post_load,
616 .fields = (VMStateField []) {
617 VMSTATE_UINT32(halted, CPUState),
618 VMSTATE_UINT32(interrupt_request, CPUState),
619 VMSTATE_END_OF_LIST()
620 }
621 };
622 #endif
623
624 CPUState *qemu_get_cpu(int cpu)
625 {
626 CPUState *env = first_cpu;
627
628 while (env) {
629 if (env->cpu_index == cpu)
630 break;
631 env = env->next_cpu;
632 }
633
634 return env;
635 }
636
637 void cpu_exec_init(CPUState *env)
638 {
639 CPUState **penv;
640 int cpu_index;
641
642 #if defined(CONFIG_USER_ONLY)
643 cpu_list_lock();
644 #endif
645 env->next_cpu = NULL;
646 penv = &first_cpu;
647 cpu_index = 0;
648 while (*penv != NULL) {
649 penv = &(*penv)->next_cpu;
650 cpu_index++;
651 }
652 env->cpu_index = cpu_index;
653 env->numa_node = 0;
654 QTAILQ_INIT(&env->breakpoints);
655 QTAILQ_INIT(&env->watchpoints);
656 #ifndef CONFIG_USER_ONLY
657 env->thread_id = qemu_get_thread_id();
658 #endif
659 *penv = env;
660 #if defined(CONFIG_USER_ONLY)
661 cpu_list_unlock();
662 #endif
663 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
664 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
665 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
666 cpu_save, cpu_load, env);
667 #endif
668 }
669
670 /* Allocate a new translation block. Flush the translation buffer if
671 too many translation blocks or too much generated code. */
672 static TranslationBlock *tb_alloc(target_ulong pc)
673 {
674 TranslationBlock *tb;
675
676 if (nb_tbs >= code_gen_max_blocks ||
677 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
678 return NULL;
679 tb = &tbs[nb_tbs++];
680 tb->pc = pc;
681 tb->cflags = 0;
682 return tb;
683 }
684
685 void tb_free(TranslationBlock *tb)
686 {
687 /* In practice this is mostly used for single use temporary TB
688 Ignore the hard cases and just back up if this TB happens to
689 be the last one generated. */
690 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
691 code_gen_ptr = tb->tc_ptr;
692 nb_tbs--;
693 }
694 }
695
696 static inline void invalidate_page_bitmap(PageDesc *p)
697 {
698 if (p->code_bitmap) {
699 g_free(p->code_bitmap);
700 p->code_bitmap = NULL;
701 }
702 p->code_write_count = 0;
703 }
704
705 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
706
707 static void page_flush_tb_1 (int level, void **lp)
708 {
709 int i;
710
711 if (*lp == NULL) {
712 return;
713 }
714 if (level == 0) {
715 PageDesc *pd = *lp;
716 for (i = 0; i < L2_SIZE; ++i) {
717 pd[i].first_tb = NULL;
718 invalidate_page_bitmap(pd + i);
719 }
720 } else {
721 void **pp = *lp;
722 for (i = 0; i < L2_SIZE; ++i) {
723 page_flush_tb_1 (level - 1, pp + i);
724 }
725 }
726 }
727
728 static void page_flush_tb(void)
729 {
730 int i;
731 for (i = 0; i < V_L1_SIZE; i++) {
732 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
733 }
734 }
735
736 /* flush all the translation blocks */
737 /* XXX: tb_flush is currently not thread safe */
738 void tb_flush(CPUState *env1)
739 {
740 CPUState *env;
741 #if defined(DEBUG_FLUSH)
742 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
743 (unsigned long)(code_gen_ptr - code_gen_buffer),
744 nb_tbs, nb_tbs > 0 ?
745 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
746 #endif
747 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
748 cpu_abort(env1, "Internal error: code buffer overflow\n");
749
750 nb_tbs = 0;
751
752 for(env = first_cpu; env != NULL; env = env->next_cpu) {
753 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
754 }
755
756 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
757 page_flush_tb();
758
759 code_gen_ptr = code_gen_buffer;
760 /* XXX: flush processor icache at this point if cache flush is
761 expensive */
762 tb_flush_count++;
763 }
764
765 #ifdef DEBUG_TB_CHECK
766
767 static void tb_invalidate_check(target_ulong address)
768 {
769 TranslationBlock *tb;
770 int i;
771 address &= TARGET_PAGE_MASK;
772 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
773 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
774 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
775 address >= tb->pc + tb->size)) {
776 printf("ERROR invalidate: address=" TARGET_FMT_lx
777 " PC=%08lx size=%04x\n",
778 address, (long)tb->pc, tb->size);
779 }
780 }
781 }
782 }
783
784 /* verify that all the pages have correct rights for code */
785 static void tb_page_check(void)
786 {
787 TranslationBlock *tb;
788 int i, flags1, flags2;
789
790 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
791 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
792 flags1 = page_get_flags(tb->pc);
793 flags2 = page_get_flags(tb->pc + tb->size - 1);
794 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
795 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
796 (long)tb->pc, tb->size, flags1, flags2);
797 }
798 }
799 }
800 }
801
802 #endif
803
804 /* invalidate one TB */
805 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
806 int next_offset)
807 {
808 TranslationBlock *tb1;
809 for(;;) {
810 tb1 = *ptb;
811 if (tb1 == tb) {
812 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
813 break;
814 }
815 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
816 }
817 }
818
819 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
820 {
821 TranslationBlock *tb1;
822 unsigned int n1;
823
824 for(;;) {
825 tb1 = *ptb;
826 n1 = (long)tb1 & 3;
827 tb1 = (TranslationBlock *)((long)tb1 & ~3);
828 if (tb1 == tb) {
829 *ptb = tb1->page_next[n1];
830 break;
831 }
832 ptb = &tb1->page_next[n1];
833 }
834 }
835
836 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
837 {
838 TranslationBlock *tb1, **ptb;
839 unsigned int n1;
840
841 ptb = &tb->jmp_next[n];
842 tb1 = *ptb;
843 if (tb1) {
844 /* find tb(n) in circular list */
845 for(;;) {
846 tb1 = *ptb;
847 n1 = (long)tb1 & 3;
848 tb1 = (TranslationBlock *)((long)tb1 & ~3);
849 if (n1 == n && tb1 == tb)
850 break;
851 if (n1 == 2) {
852 ptb = &tb1->jmp_first;
853 } else {
854 ptb = &tb1->jmp_next[n1];
855 }
856 }
857 /* now we can suppress tb(n) from the list */
858 *ptb = tb->jmp_next[n];
859
860 tb->jmp_next[n] = NULL;
861 }
862 }
863
864 /* reset the jump entry 'n' of a TB so that it is not chained to
865 another TB */
866 static inline void tb_reset_jump(TranslationBlock *tb, int n)
867 {
868 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
869 }
870
871 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
872 {
873 CPUState *env;
874 PageDesc *p;
875 unsigned int h, n1;
876 tb_page_addr_t phys_pc;
877 TranslationBlock *tb1, *tb2;
878
879 /* remove the TB from the hash list */
880 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
881 h = tb_phys_hash_func(phys_pc);
882 tb_remove(&tb_phys_hash[h], tb,
883 offsetof(TranslationBlock, phys_hash_next));
884
885 /* remove the TB from the page list */
886 if (tb->page_addr[0] != page_addr) {
887 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
888 tb_page_remove(&p->first_tb, tb);
889 invalidate_page_bitmap(p);
890 }
891 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
892 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
893 tb_page_remove(&p->first_tb, tb);
894 invalidate_page_bitmap(p);
895 }
896
897 tb_invalidated_flag = 1;
898
899 /* remove the TB from the hash list */
900 h = tb_jmp_cache_hash_func(tb->pc);
901 for(env = first_cpu; env != NULL; env = env->next_cpu) {
902 if (env->tb_jmp_cache[h] == tb)
903 env->tb_jmp_cache[h] = NULL;
904 }
905
906 /* suppress this TB from the two jump lists */
907 tb_jmp_remove(tb, 0);
908 tb_jmp_remove(tb, 1);
909
910 /* suppress any remaining jumps to this TB */
911 tb1 = tb->jmp_first;
912 for(;;) {
913 n1 = (long)tb1 & 3;
914 if (n1 == 2)
915 break;
916 tb1 = (TranslationBlock *)((long)tb1 & ~3);
917 tb2 = tb1->jmp_next[n1];
918 tb_reset_jump(tb1, n1);
919 tb1->jmp_next[n1] = NULL;
920 tb1 = tb2;
921 }
922 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
923
924 tb_phys_invalidate_count++;
925 }
926
927 static inline void set_bits(uint8_t *tab, int start, int len)
928 {
929 int end, mask, end1;
930
931 end = start + len;
932 tab += start >> 3;
933 mask = 0xff << (start & 7);
934 if ((start & ~7) == (end & ~7)) {
935 if (start < end) {
936 mask &= ~(0xff << (end & 7));
937 *tab |= mask;
938 }
939 } else {
940 *tab++ |= mask;
941 start = (start + 8) & ~7;
942 end1 = end & ~7;
943 while (start < end1) {
944 *tab++ = 0xff;
945 start += 8;
946 }
947 if (start < end) {
948 mask = ~(0xff << (end & 7));
949 *tab |= mask;
950 }
951 }
952 }
953
954 static void build_page_bitmap(PageDesc *p)
955 {
956 int n, tb_start, tb_end;
957 TranslationBlock *tb;
958
959 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
960
961 tb = p->first_tb;
962 while (tb != NULL) {
963 n = (long)tb & 3;
964 tb = (TranslationBlock *)((long)tb & ~3);
965 /* NOTE: this is subtle as a TB may span two physical pages */
966 if (n == 0) {
967 /* NOTE: tb_end may be after the end of the page, but
968 it is not a problem */
969 tb_start = tb->pc & ~TARGET_PAGE_MASK;
970 tb_end = tb_start + tb->size;
971 if (tb_end > TARGET_PAGE_SIZE)
972 tb_end = TARGET_PAGE_SIZE;
973 } else {
974 tb_start = 0;
975 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
976 }
977 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
978 tb = tb->page_next[n];
979 }
980 }
981
982 TranslationBlock *tb_gen_code(CPUState *env,
983 target_ulong pc, target_ulong cs_base,
984 int flags, int cflags)
985 {
986 TranslationBlock *tb;
987 uint8_t *tc_ptr;
988 tb_page_addr_t phys_pc, phys_page2;
989 target_ulong virt_page2;
990 int code_gen_size;
991
992 phys_pc = get_page_addr_code(env, pc);
993 tb = tb_alloc(pc);
994 if (!tb) {
995 /* flush must be done */
996 tb_flush(env);
997 /* cannot fail at this point */
998 tb = tb_alloc(pc);
999 /* Don't forget to invalidate previous TB info. */
1000 tb_invalidated_flag = 1;
1001 }
1002 tc_ptr = code_gen_ptr;
1003 tb->tc_ptr = tc_ptr;
1004 tb->cs_base = cs_base;
1005 tb->flags = flags;
1006 tb->cflags = cflags;
1007 cpu_gen_code(env, tb, &code_gen_size);
1008 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1009
1010 /* check next page if needed */
1011 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1012 phys_page2 = -1;
1013 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1014 phys_page2 = get_page_addr_code(env, virt_page2);
1015 }
1016 tb_link_page(tb, phys_pc, phys_page2);
1017 return tb;
1018 }
1019
1020 /* invalidate all TBs which intersect with the target physical page
1021 starting in range [start;end[. NOTE: start and end must refer to
1022 the same physical page. 'is_cpu_write_access' should be true if called
1023 from a real cpu write access: the virtual CPU will exit the current
1024 TB if code is modified inside this TB. */
1025 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1026 int is_cpu_write_access)
1027 {
1028 TranslationBlock *tb, *tb_next, *saved_tb;
1029 CPUState *env = cpu_single_env;
1030 tb_page_addr_t tb_start, tb_end;
1031 PageDesc *p;
1032 int n;
1033 #ifdef TARGET_HAS_PRECISE_SMC
1034 int current_tb_not_found = is_cpu_write_access;
1035 TranslationBlock *current_tb = NULL;
1036 int current_tb_modified = 0;
1037 target_ulong current_pc = 0;
1038 target_ulong current_cs_base = 0;
1039 int current_flags = 0;
1040 #endif /* TARGET_HAS_PRECISE_SMC */
1041
1042 p = page_find(start >> TARGET_PAGE_BITS);
1043 if (!p)
1044 return;
1045 if (!p->code_bitmap &&
1046 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1047 is_cpu_write_access) {
1048 /* build code bitmap */
1049 build_page_bitmap(p);
1050 }
1051
1052 /* we remove all the TBs in the range [start, end[ */
1053 /* XXX: see if in some cases it could be faster to invalidate all the code */
1054 tb = p->first_tb;
1055 while (tb != NULL) {
1056 n = (long)tb & 3;
1057 tb = (TranslationBlock *)((long)tb & ~3);
1058 tb_next = tb->page_next[n];
1059 /* NOTE: this is subtle as a TB may span two physical pages */
1060 if (n == 0) {
1061 /* NOTE: tb_end may be after the end of the page, but
1062 it is not a problem */
1063 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1064 tb_end = tb_start + tb->size;
1065 } else {
1066 tb_start = tb->page_addr[1];
1067 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1068 }
1069 if (!(tb_end <= start || tb_start >= end)) {
1070 #ifdef TARGET_HAS_PRECISE_SMC
1071 if (current_tb_not_found) {
1072 current_tb_not_found = 0;
1073 current_tb = NULL;
1074 if (env->mem_io_pc) {
1075 /* now we have a real cpu fault */
1076 current_tb = tb_find_pc(env->mem_io_pc);
1077 }
1078 }
1079 if (current_tb == tb &&
1080 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1081 /* If we are modifying the current TB, we must stop
1082 its execution. We could be more precise by checking
1083 that the modification is after the current PC, but it
1084 would require a specialized function to partially
1085 restore the CPU state */
1086
1087 current_tb_modified = 1;
1088 cpu_restore_state(current_tb, env, env->mem_io_pc);
1089 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1090 &current_flags);
1091 }
1092 #endif /* TARGET_HAS_PRECISE_SMC */
1093 /* we need to do that to handle the case where a signal
1094 occurs while doing tb_phys_invalidate() */
1095 saved_tb = NULL;
1096 if (env) {
1097 saved_tb = env->current_tb;
1098 env->current_tb = NULL;
1099 }
1100 tb_phys_invalidate(tb, -1);
1101 if (env) {
1102 env->current_tb = saved_tb;
1103 if (env->interrupt_request && env->current_tb)
1104 cpu_interrupt(env, env->interrupt_request);
1105 }
1106 }
1107 tb = tb_next;
1108 }
1109 #if !defined(CONFIG_USER_ONLY)
1110 /* if no code remaining, no need to continue to use slow writes */
1111 if (!p->first_tb) {
1112 invalidate_page_bitmap(p);
1113 if (is_cpu_write_access) {
1114 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1115 }
1116 }
1117 #endif
1118 #ifdef TARGET_HAS_PRECISE_SMC
1119 if (current_tb_modified) {
1120 /* we generate a block containing just the instruction
1121 modifying the memory. It will ensure that it cannot modify
1122 itself */
1123 env->current_tb = NULL;
1124 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1125 cpu_resume_from_signal(env, NULL);
1126 }
1127 #endif
1128 }
1129
1130 /* len must be <= 8 and start must be a multiple of len */
1131 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1132 {
1133 PageDesc *p;
1134 int offset, b;
1135 #if 0
1136 if (1) {
1137 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1138 cpu_single_env->mem_io_vaddr, len,
1139 cpu_single_env->eip,
1140 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1141 }
1142 #endif
1143 p = page_find(start >> TARGET_PAGE_BITS);
1144 if (!p)
1145 return;
1146 if (p->code_bitmap) {
1147 offset = start & ~TARGET_PAGE_MASK;
1148 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1149 if (b & ((1 << len) - 1))
1150 goto do_invalidate;
1151 } else {
1152 do_invalidate:
1153 tb_invalidate_phys_page_range(start, start + len, 1);
1154 }
1155 }
1156
1157 #if !defined(CONFIG_SOFTMMU)
1158 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1159 unsigned long pc, void *puc)
1160 {
1161 TranslationBlock *tb;
1162 PageDesc *p;
1163 int n;
1164 #ifdef TARGET_HAS_PRECISE_SMC
1165 TranslationBlock *current_tb = NULL;
1166 CPUState *env = cpu_single_env;
1167 int current_tb_modified = 0;
1168 target_ulong current_pc = 0;
1169 target_ulong current_cs_base = 0;
1170 int current_flags = 0;
1171 #endif
1172
1173 addr &= TARGET_PAGE_MASK;
1174 p = page_find(addr >> TARGET_PAGE_BITS);
1175 if (!p)
1176 return;
1177 tb = p->first_tb;
1178 #ifdef TARGET_HAS_PRECISE_SMC
1179 if (tb && pc != 0) {
1180 current_tb = tb_find_pc(pc);
1181 }
1182 #endif
1183 while (tb != NULL) {
1184 n = (long)tb & 3;
1185 tb = (TranslationBlock *)((long)tb & ~3);
1186 #ifdef TARGET_HAS_PRECISE_SMC
1187 if (current_tb == tb &&
1188 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1189 /* If we are modifying the current TB, we must stop
1190 its execution. We could be more precise by checking
1191 that the modification is after the current PC, but it
1192 would require a specialized function to partially
1193 restore the CPU state */
1194
1195 current_tb_modified = 1;
1196 cpu_restore_state(current_tb, env, pc);
1197 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1198 &current_flags);
1199 }
1200 #endif /* TARGET_HAS_PRECISE_SMC */
1201 tb_phys_invalidate(tb, addr);
1202 tb = tb->page_next[n];
1203 }
1204 p->first_tb = NULL;
1205 #ifdef TARGET_HAS_PRECISE_SMC
1206 if (current_tb_modified) {
1207 /* we generate a block containing just the instruction
1208 modifying the memory. It will ensure that it cannot modify
1209 itself */
1210 env->current_tb = NULL;
1211 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1212 cpu_resume_from_signal(env, puc);
1213 }
1214 #endif
1215 }
1216 #endif
1217
1218 /* add the tb in the target page and protect it if necessary */
1219 static inline void tb_alloc_page(TranslationBlock *tb,
1220 unsigned int n, tb_page_addr_t page_addr)
1221 {
1222 PageDesc *p;
1223 #ifndef CONFIG_USER_ONLY
1224 bool page_already_protected;
1225 #endif
1226
1227 tb->page_addr[n] = page_addr;
1228 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1229 tb->page_next[n] = p->first_tb;
1230 #ifndef CONFIG_USER_ONLY
1231 page_already_protected = p->first_tb != NULL;
1232 #endif
1233 p->first_tb = (TranslationBlock *)((long)tb | n);
1234 invalidate_page_bitmap(p);
1235
1236 #if defined(TARGET_HAS_SMC) || 1
1237
1238 #if defined(CONFIG_USER_ONLY)
1239 if (p->flags & PAGE_WRITE) {
1240 target_ulong addr;
1241 PageDesc *p2;
1242 int prot;
1243
1244 /* force the host page as non writable (writes will have a
1245 page fault + mprotect overhead) */
1246 page_addr &= qemu_host_page_mask;
1247 prot = 0;
1248 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1249 addr += TARGET_PAGE_SIZE) {
1250
1251 p2 = page_find (addr >> TARGET_PAGE_BITS);
1252 if (!p2)
1253 continue;
1254 prot |= p2->flags;
1255 p2->flags &= ~PAGE_WRITE;
1256 }
1257 mprotect(g2h(page_addr), qemu_host_page_size,
1258 (prot & PAGE_BITS) & ~PAGE_WRITE);
1259 #ifdef DEBUG_TB_INVALIDATE
1260 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1261 page_addr);
1262 #endif
1263 }
1264 #else
1265 /* if some code is already present, then the pages are already
1266 protected. So we handle the case where only the first TB is
1267 allocated in a physical page */
1268 if (!page_already_protected) {
1269 tlb_protect_code(page_addr);
1270 }
1271 #endif
1272
1273 #endif /* TARGET_HAS_SMC */
1274 }
1275
1276 /* add a new TB and link it to the physical page tables. phys_page2 is
1277 (-1) to indicate that only one page contains the TB. */
1278 void tb_link_page(TranslationBlock *tb,
1279 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1280 {
1281 unsigned int h;
1282 TranslationBlock **ptb;
1283
1284 /* Grab the mmap lock to stop another thread invalidating this TB
1285 before we are done. */
1286 mmap_lock();
1287 /* add in the physical hash table */
1288 h = tb_phys_hash_func(phys_pc);
1289 ptb = &tb_phys_hash[h];
1290 tb->phys_hash_next = *ptb;
1291 *ptb = tb;
1292
1293 /* add in the page list */
1294 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1295 if (phys_page2 != -1)
1296 tb_alloc_page(tb, 1, phys_page2);
1297 else
1298 tb->page_addr[1] = -1;
1299
1300 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1301 tb->jmp_next[0] = NULL;
1302 tb->jmp_next[1] = NULL;
1303
1304 /* init original jump addresses */
1305 if (tb->tb_next_offset[0] != 0xffff)
1306 tb_reset_jump(tb, 0);
1307 if (tb->tb_next_offset[1] != 0xffff)
1308 tb_reset_jump(tb, 1);
1309
1310 #ifdef DEBUG_TB_CHECK
1311 tb_page_check();
1312 #endif
1313 mmap_unlock();
1314 }
1315
1316 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1317 tb[1].tc_ptr. Return NULL if not found */
1318 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1319 {
1320 int m_min, m_max, m;
1321 unsigned long v;
1322 TranslationBlock *tb;
1323
1324 if (nb_tbs <= 0)
1325 return NULL;
1326 if (tc_ptr < (unsigned long)code_gen_buffer ||
1327 tc_ptr >= (unsigned long)code_gen_ptr)
1328 return NULL;
1329 /* binary search (cf Knuth) */
1330 m_min = 0;
1331 m_max = nb_tbs - 1;
1332 while (m_min <= m_max) {
1333 m = (m_min + m_max) >> 1;
1334 tb = &tbs[m];
1335 v = (unsigned long)tb->tc_ptr;
1336 if (v == tc_ptr)
1337 return tb;
1338 else if (tc_ptr < v) {
1339 m_max = m - 1;
1340 } else {
1341 m_min = m + 1;
1342 }
1343 }
1344 return &tbs[m_max];
1345 }
1346
1347 static void tb_reset_jump_recursive(TranslationBlock *tb);
1348
1349 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1350 {
1351 TranslationBlock *tb1, *tb_next, **ptb;
1352 unsigned int n1;
1353
1354 tb1 = tb->jmp_next[n];
1355 if (tb1 != NULL) {
1356 /* find head of list */
1357 for(;;) {
1358 n1 = (long)tb1 & 3;
1359 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1360 if (n1 == 2)
1361 break;
1362 tb1 = tb1->jmp_next[n1];
1363 }
1364 /* we are now sure now that tb jumps to tb1 */
1365 tb_next = tb1;
1366
1367 /* remove tb from the jmp_first list */
1368 ptb = &tb_next->jmp_first;
1369 for(;;) {
1370 tb1 = *ptb;
1371 n1 = (long)tb1 & 3;
1372 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1373 if (n1 == n && tb1 == tb)
1374 break;
1375 ptb = &tb1->jmp_next[n1];
1376 }
1377 *ptb = tb->jmp_next[n];
1378 tb->jmp_next[n] = NULL;
1379
1380 /* suppress the jump to next tb in generated code */
1381 tb_reset_jump(tb, n);
1382
1383 /* suppress jumps in the tb on which we could have jumped */
1384 tb_reset_jump_recursive(tb_next);
1385 }
1386 }
1387
1388 static void tb_reset_jump_recursive(TranslationBlock *tb)
1389 {
1390 tb_reset_jump_recursive2(tb, 0);
1391 tb_reset_jump_recursive2(tb, 1);
1392 }
1393
1394 #if defined(TARGET_HAS_ICE)
1395 #if defined(CONFIG_USER_ONLY)
1396 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1397 {
1398 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1399 }
1400 #else
1401 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1402 {
1403 target_phys_addr_t addr;
1404 target_ulong pd;
1405 ram_addr_t ram_addr;
1406 PhysPageDesc p;
1407
1408 addr = cpu_get_phys_page_debug(env, pc);
1409 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1410 pd = p.phys_offset;
1411 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1412 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1413 }
1414 #endif
1415 #endif /* TARGET_HAS_ICE */
1416
1417 #if defined(CONFIG_USER_ONLY)
1418 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1419
1420 {
1421 }
1422
1423 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1424 int flags, CPUWatchpoint **watchpoint)
1425 {
1426 return -ENOSYS;
1427 }
1428 #else
1429 /* Add a watchpoint. */
1430 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1431 int flags, CPUWatchpoint **watchpoint)
1432 {
1433 target_ulong len_mask = ~(len - 1);
1434 CPUWatchpoint *wp;
1435
1436 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1437 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1438 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1439 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1440 return -EINVAL;
1441 }
1442 wp = g_malloc(sizeof(*wp));
1443
1444 wp->vaddr = addr;
1445 wp->len_mask = len_mask;
1446 wp->flags = flags;
1447
1448 /* keep all GDB-injected watchpoints in front */
1449 if (flags & BP_GDB)
1450 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1451 else
1452 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1453
1454 tlb_flush_page(env, addr);
1455
1456 if (watchpoint)
1457 *watchpoint = wp;
1458 return 0;
1459 }
1460
1461 /* Remove a specific watchpoint. */
1462 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1463 int flags)
1464 {
1465 target_ulong len_mask = ~(len - 1);
1466 CPUWatchpoint *wp;
1467
1468 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1469 if (addr == wp->vaddr && len_mask == wp->len_mask
1470 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1471 cpu_watchpoint_remove_by_ref(env, wp);
1472 return 0;
1473 }
1474 }
1475 return -ENOENT;
1476 }
1477
1478 /* Remove a specific watchpoint by reference. */
1479 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1480 {
1481 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1482
1483 tlb_flush_page(env, watchpoint->vaddr);
1484
1485 g_free(watchpoint);
1486 }
1487
1488 /* Remove all matching watchpoints. */
1489 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1490 {
1491 CPUWatchpoint *wp, *next;
1492
1493 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1494 if (wp->flags & mask)
1495 cpu_watchpoint_remove_by_ref(env, wp);
1496 }
1497 }
1498 #endif
1499
1500 /* Add a breakpoint. */
1501 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1502 CPUBreakpoint **breakpoint)
1503 {
1504 #if defined(TARGET_HAS_ICE)
1505 CPUBreakpoint *bp;
1506
1507 bp = g_malloc(sizeof(*bp));
1508
1509 bp->pc = pc;
1510 bp->flags = flags;
1511
1512 /* keep all GDB-injected breakpoints in front */
1513 if (flags & BP_GDB)
1514 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1515 else
1516 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1517
1518 breakpoint_invalidate(env, pc);
1519
1520 if (breakpoint)
1521 *breakpoint = bp;
1522 return 0;
1523 #else
1524 return -ENOSYS;
1525 #endif
1526 }
1527
1528 /* Remove a specific breakpoint. */
1529 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1530 {
1531 #if defined(TARGET_HAS_ICE)
1532 CPUBreakpoint *bp;
1533
1534 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1535 if (bp->pc == pc && bp->flags == flags) {
1536 cpu_breakpoint_remove_by_ref(env, bp);
1537 return 0;
1538 }
1539 }
1540 return -ENOENT;
1541 #else
1542 return -ENOSYS;
1543 #endif
1544 }
1545
1546 /* Remove a specific breakpoint by reference. */
1547 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1548 {
1549 #if defined(TARGET_HAS_ICE)
1550 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1551
1552 breakpoint_invalidate(env, breakpoint->pc);
1553
1554 g_free(breakpoint);
1555 #endif
1556 }
1557
1558 /* Remove all matching breakpoints. */
1559 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1560 {
1561 #if defined(TARGET_HAS_ICE)
1562 CPUBreakpoint *bp, *next;
1563
1564 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1565 if (bp->flags & mask)
1566 cpu_breakpoint_remove_by_ref(env, bp);
1567 }
1568 #endif
1569 }
1570
1571 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1572 CPU loop after each instruction */
1573 void cpu_single_step(CPUState *env, int enabled)
1574 {
1575 #if defined(TARGET_HAS_ICE)
1576 if (env->singlestep_enabled != enabled) {
1577 env->singlestep_enabled = enabled;
1578 if (kvm_enabled())
1579 kvm_update_guest_debug(env, 0);
1580 else {
1581 /* must flush all the translated code to avoid inconsistencies */
1582 /* XXX: only flush what is necessary */
1583 tb_flush(env);
1584 }
1585 }
1586 #endif
1587 }
1588
1589 /* enable or disable low levels log */
1590 void cpu_set_log(int log_flags)
1591 {
1592 loglevel = log_flags;
1593 if (loglevel && !logfile) {
1594 logfile = fopen(logfilename, log_append ? "a" : "w");
1595 if (!logfile) {
1596 perror(logfilename);
1597 _exit(1);
1598 }
1599 #if !defined(CONFIG_SOFTMMU)
1600 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1601 {
1602 static char logfile_buf[4096];
1603 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1604 }
1605 #elif defined(_WIN32)
1606 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1607 setvbuf(logfile, NULL, _IONBF, 0);
1608 #else
1609 setvbuf(logfile, NULL, _IOLBF, 0);
1610 #endif
1611 log_append = 1;
1612 }
1613 if (!loglevel && logfile) {
1614 fclose(logfile);
1615 logfile = NULL;
1616 }
1617 }
1618
1619 void cpu_set_log_filename(const char *filename)
1620 {
1621 logfilename = strdup(filename);
1622 if (logfile) {
1623 fclose(logfile);
1624 logfile = NULL;
1625 }
1626 cpu_set_log(loglevel);
1627 }
1628
1629 static void cpu_unlink_tb(CPUState *env)
1630 {
1631 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1632 problem and hope the cpu will stop of its own accord. For userspace
1633 emulation this often isn't actually as bad as it sounds. Often
1634 signals are used primarily to interrupt blocking syscalls. */
1635 TranslationBlock *tb;
1636 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1637
1638 spin_lock(&interrupt_lock);
1639 tb = env->current_tb;
1640 /* if the cpu is currently executing code, we must unlink it and
1641 all the potentially executing TB */
1642 if (tb) {
1643 env->current_tb = NULL;
1644 tb_reset_jump_recursive(tb);
1645 }
1646 spin_unlock(&interrupt_lock);
1647 }
1648
1649 #ifndef CONFIG_USER_ONLY
1650 /* mask must never be zero, except for A20 change call */
1651 static void tcg_handle_interrupt(CPUState *env, int mask)
1652 {
1653 int old_mask;
1654
1655 old_mask = env->interrupt_request;
1656 env->interrupt_request |= mask;
1657
1658 /*
1659 * If called from iothread context, wake the target cpu in
1660 * case its halted.
1661 */
1662 if (!qemu_cpu_is_self(env)) {
1663 qemu_cpu_kick(env);
1664 return;
1665 }
1666
1667 if (use_icount) {
1668 env->icount_decr.u16.high = 0xffff;
1669 if (!can_do_io(env)
1670 && (mask & ~old_mask) != 0) {
1671 cpu_abort(env, "Raised interrupt while not in I/O function");
1672 }
1673 } else {
1674 cpu_unlink_tb(env);
1675 }
1676 }
1677
1678 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1679
1680 #else /* CONFIG_USER_ONLY */
1681
1682 void cpu_interrupt(CPUState *env, int mask)
1683 {
1684 env->interrupt_request |= mask;
1685 cpu_unlink_tb(env);
1686 }
1687 #endif /* CONFIG_USER_ONLY */
1688
1689 void cpu_reset_interrupt(CPUState *env, int mask)
1690 {
1691 env->interrupt_request &= ~mask;
1692 }
1693
1694 void cpu_exit(CPUState *env)
1695 {
1696 env->exit_request = 1;
1697 cpu_unlink_tb(env);
1698 }
1699
1700 const CPULogItem cpu_log_items[] = {
1701 { CPU_LOG_TB_OUT_ASM, "out_asm",
1702 "show generated host assembly code for each compiled TB" },
1703 { CPU_LOG_TB_IN_ASM, "in_asm",
1704 "show target assembly code for each compiled TB" },
1705 { CPU_LOG_TB_OP, "op",
1706 "show micro ops for each compiled TB" },
1707 { CPU_LOG_TB_OP_OPT, "op_opt",
1708 "show micro ops "
1709 #ifdef TARGET_I386
1710 "before eflags optimization and "
1711 #endif
1712 "after liveness analysis" },
1713 { CPU_LOG_INT, "int",
1714 "show interrupts/exceptions in short format" },
1715 { CPU_LOG_EXEC, "exec",
1716 "show trace before each executed TB (lots of logs)" },
1717 { CPU_LOG_TB_CPU, "cpu",
1718 "show CPU state before block translation" },
1719 #ifdef TARGET_I386
1720 { CPU_LOG_PCALL, "pcall",
1721 "show protected mode far calls/returns/exceptions" },
1722 { CPU_LOG_RESET, "cpu_reset",
1723 "show CPU state before CPU resets" },
1724 #endif
1725 #ifdef DEBUG_IOPORT
1726 { CPU_LOG_IOPORT, "ioport",
1727 "show all i/o ports accesses" },
1728 #endif
1729 { 0, NULL, NULL },
1730 };
1731
1732 static int cmp1(const char *s1, int n, const char *s2)
1733 {
1734 if (strlen(s2) != n)
1735 return 0;
1736 return memcmp(s1, s2, n) == 0;
1737 }
1738
1739 /* takes a comma separated list of log masks. Return 0 if error. */
1740 int cpu_str_to_log_mask(const char *str)
1741 {
1742 const CPULogItem *item;
1743 int mask;
1744 const char *p, *p1;
1745
1746 p = str;
1747 mask = 0;
1748 for(;;) {
1749 p1 = strchr(p, ',');
1750 if (!p1)
1751 p1 = p + strlen(p);
1752 if(cmp1(p,p1-p,"all")) {
1753 for(item = cpu_log_items; item->mask != 0; item++) {
1754 mask |= item->mask;
1755 }
1756 } else {
1757 for(item = cpu_log_items; item->mask != 0; item++) {
1758 if (cmp1(p, p1 - p, item->name))
1759 goto found;
1760 }
1761 return 0;
1762 }
1763 found:
1764 mask |= item->mask;
1765 if (*p1 != ',')
1766 break;
1767 p = p1 + 1;
1768 }
1769 return mask;
1770 }
1771
1772 void cpu_abort(CPUState *env, const char *fmt, ...)
1773 {
1774 va_list ap;
1775 va_list ap2;
1776
1777 va_start(ap, fmt);
1778 va_copy(ap2, ap);
1779 fprintf(stderr, "qemu: fatal: ");
1780 vfprintf(stderr, fmt, ap);
1781 fprintf(stderr, "\n");
1782 #ifdef TARGET_I386
1783 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1784 #else
1785 cpu_dump_state(env, stderr, fprintf, 0);
1786 #endif
1787 if (qemu_log_enabled()) {
1788 qemu_log("qemu: fatal: ");
1789 qemu_log_vprintf(fmt, ap2);
1790 qemu_log("\n");
1791 #ifdef TARGET_I386
1792 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1793 #else
1794 log_cpu_state(env, 0);
1795 #endif
1796 qemu_log_flush();
1797 qemu_log_close();
1798 }
1799 va_end(ap2);
1800 va_end(ap);
1801 #if defined(CONFIG_USER_ONLY)
1802 {
1803 struct sigaction act;
1804 sigfillset(&act.sa_mask);
1805 act.sa_handler = SIG_DFL;
1806 sigaction(SIGABRT, &act, NULL);
1807 }
1808 #endif
1809 abort();
1810 }
1811
1812 CPUState *cpu_copy(CPUState *env)
1813 {
1814 CPUState *new_env = cpu_init(env->cpu_model_str);
1815 CPUState *next_cpu = new_env->next_cpu;
1816 int cpu_index = new_env->cpu_index;
1817 #if defined(TARGET_HAS_ICE)
1818 CPUBreakpoint *bp;
1819 CPUWatchpoint *wp;
1820 #endif
1821
1822 memcpy(new_env, env, sizeof(CPUState));
1823
1824 /* Preserve chaining and index. */
1825 new_env->next_cpu = next_cpu;
1826 new_env->cpu_index = cpu_index;
1827
1828 /* Clone all break/watchpoints.
1829 Note: Once we support ptrace with hw-debug register access, make sure
1830 BP_CPU break/watchpoints are handled correctly on clone. */
1831 QTAILQ_INIT(&env->breakpoints);
1832 QTAILQ_INIT(&env->watchpoints);
1833 #if defined(TARGET_HAS_ICE)
1834 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1835 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1836 }
1837 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1838 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1839 wp->flags, NULL);
1840 }
1841 #endif
1842
1843 return new_env;
1844 }
1845
1846 #if !defined(CONFIG_USER_ONLY)
1847
1848 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1849 {
1850 unsigned int i;
1851
1852 /* Discard jump cache entries for any tb which might potentially
1853 overlap the flushed page. */
1854 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1855 memset (&env->tb_jmp_cache[i], 0,
1856 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1857
1858 i = tb_jmp_cache_hash_page(addr);
1859 memset (&env->tb_jmp_cache[i], 0,
1860 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1861 }
1862
1863 static CPUTLBEntry s_cputlb_empty_entry = {
1864 .addr_read = -1,
1865 .addr_write = -1,
1866 .addr_code = -1,
1867 .addend = -1,
1868 };
1869
1870 /* NOTE:
1871 * If flush_global is true (the usual case), flush all tlb entries.
1872 * If flush_global is false, flush (at least) all tlb entries not
1873 * marked global.
1874 *
1875 * Since QEMU doesn't currently implement a global/not-global flag
1876 * for tlb entries, at the moment tlb_flush() will also flush all
1877 * tlb entries in the flush_global == false case. This is OK because
1878 * CPU architectures generally permit an implementation to drop
1879 * entries from the TLB at any time, so flushing more entries than
1880 * required is only an efficiency issue, not a correctness issue.
1881 */
1882 void tlb_flush(CPUState *env, int flush_global)
1883 {
1884 int i;
1885
1886 #if defined(DEBUG_TLB)
1887 printf("tlb_flush:\n");
1888 #endif
1889 /* must reset current TB so that interrupts cannot modify the
1890 links while we are modifying them */
1891 env->current_tb = NULL;
1892
1893 for(i = 0; i < CPU_TLB_SIZE; i++) {
1894 int mmu_idx;
1895 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1896 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1897 }
1898 }
1899
1900 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1901
1902 env->tlb_flush_addr = -1;
1903 env->tlb_flush_mask = 0;
1904 tlb_flush_count++;
1905 }
1906
1907 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1908 {
1909 if (addr == (tlb_entry->addr_read &
1910 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1911 addr == (tlb_entry->addr_write &
1912 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1913 addr == (tlb_entry->addr_code &
1914 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1915 *tlb_entry = s_cputlb_empty_entry;
1916 }
1917 }
1918
1919 void tlb_flush_page(CPUState *env, target_ulong addr)
1920 {
1921 int i;
1922 int mmu_idx;
1923
1924 #if defined(DEBUG_TLB)
1925 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1926 #endif
1927 /* Check if we need to flush due to large pages. */
1928 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1929 #if defined(DEBUG_TLB)
1930 printf("tlb_flush_page: forced full flush ("
1931 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1932 env->tlb_flush_addr, env->tlb_flush_mask);
1933 #endif
1934 tlb_flush(env, 1);
1935 return;
1936 }
1937 /* must reset current TB so that interrupts cannot modify the
1938 links while we are modifying them */
1939 env->current_tb = NULL;
1940
1941 addr &= TARGET_PAGE_MASK;
1942 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1943 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1944 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1945
1946 tlb_flush_jmp_cache(env, addr);
1947 }
1948
1949 /* update the TLBs so that writes to code in the virtual page 'addr'
1950 can be detected */
1951 static void tlb_protect_code(ram_addr_t ram_addr)
1952 {
1953 cpu_physical_memory_reset_dirty(ram_addr,
1954 ram_addr + TARGET_PAGE_SIZE,
1955 CODE_DIRTY_FLAG);
1956 }
1957
1958 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1959 tested for self modifying code */
1960 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1961 target_ulong vaddr)
1962 {
1963 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1964 }
1965
1966 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1967 unsigned long start, unsigned long length)
1968 {
1969 unsigned long addr;
1970 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
1971 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1972 if ((addr - start) < length) {
1973 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1974 }
1975 }
1976 }
1977
1978 /* Note: start and end must be within the same ram block. */
1979 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1980 int dirty_flags)
1981 {
1982 CPUState *env;
1983 unsigned long length, start1;
1984 int i;
1985
1986 start &= TARGET_PAGE_MASK;
1987 end = TARGET_PAGE_ALIGN(end);
1988
1989 length = end - start;
1990 if (length == 0)
1991 return;
1992 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1993
1994 /* we modify the TLB cache so that the dirty bit will be set again
1995 when accessing the range */
1996 start1 = (unsigned long)qemu_safe_ram_ptr(start);
1997 /* Check that we don't span multiple blocks - this breaks the
1998 address comparisons below. */
1999 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
2000 != (end - 1) - start) {
2001 abort();
2002 }
2003
2004 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2005 int mmu_idx;
2006 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2007 for(i = 0; i < CPU_TLB_SIZE; i++)
2008 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2009 start1, length);
2010 }
2011 }
2012 }
2013
2014 int cpu_physical_memory_set_dirty_tracking(int enable)
2015 {
2016 int ret = 0;
2017 in_migration = enable;
2018 return ret;
2019 }
2020
2021 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2022 {
2023 ram_addr_t ram_addr;
2024 void *p;
2025
2026 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
2027 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2028 + tlb_entry->addend);
2029 ram_addr = qemu_ram_addr_from_host_nofail(p);
2030 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2031 tlb_entry->addr_write |= TLB_NOTDIRTY;
2032 }
2033 }
2034 }
2035
2036 /* update the TLB according to the current state of the dirty bits */
2037 void cpu_tlb_update_dirty(CPUState *env)
2038 {
2039 int i;
2040 int mmu_idx;
2041 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2042 for(i = 0; i < CPU_TLB_SIZE; i++)
2043 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2044 }
2045 }
2046
2047 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2048 {
2049 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2050 tlb_entry->addr_write = vaddr;
2051 }
2052
2053 /* update the TLB corresponding to virtual page vaddr
2054 so that it is no longer dirty */
2055 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2056 {
2057 int i;
2058 int mmu_idx;
2059
2060 vaddr &= TARGET_PAGE_MASK;
2061 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2062 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2063 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2064 }
2065
2066 /* Our TLB does not support large pages, so remember the area covered by
2067 large pages and trigger a full TLB flush if these are invalidated. */
2068 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2069 target_ulong size)
2070 {
2071 target_ulong mask = ~(size - 1);
2072
2073 if (env->tlb_flush_addr == (target_ulong)-1) {
2074 env->tlb_flush_addr = vaddr & mask;
2075 env->tlb_flush_mask = mask;
2076 return;
2077 }
2078 /* Extend the existing region to include the new page.
2079 This is a compromise between unnecessary flushes and the cost
2080 of maintaining a full variable size TLB. */
2081 mask &= env->tlb_flush_mask;
2082 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2083 mask <<= 1;
2084 }
2085 env->tlb_flush_addr &= mask;
2086 env->tlb_flush_mask = mask;
2087 }
2088
2089 static bool is_ram_rom(ram_addr_t pd)
2090 {
2091 pd &= ~TARGET_PAGE_MASK;
2092 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
2093 }
2094
2095 static bool is_romd(ram_addr_t pd)
2096 {
2097 MemoryRegion *mr;
2098
2099 pd &= ~TARGET_PAGE_MASK;
2100 mr = io_mem_region[pd];
2101 return mr->rom_device && mr->readable;
2102 }
2103
2104 static bool is_ram_rom_romd(ram_addr_t pd)
2105 {
2106 return is_ram_rom(pd) || is_romd(pd);
2107 }
2108
2109 /* Add a new TLB entry. At most one entry for a given virtual address
2110 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2111 supplied size is only used by tlb_flush_page. */
2112 void tlb_set_page(CPUState *env, target_ulong vaddr,
2113 target_phys_addr_t paddr, int prot,
2114 int mmu_idx, target_ulong size)
2115 {
2116 PhysPageDesc p;
2117 unsigned long pd;
2118 unsigned int index;
2119 target_ulong address;
2120 target_ulong code_address;
2121 unsigned long addend;
2122 CPUTLBEntry *te;
2123 CPUWatchpoint *wp;
2124 target_phys_addr_t iotlb;
2125
2126 assert(size >= TARGET_PAGE_SIZE);
2127 if (size != TARGET_PAGE_SIZE) {
2128 tlb_add_large_page(env, vaddr, size);
2129 }
2130 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2131 pd = p.phys_offset;
2132 #if defined(DEBUG_TLB)
2133 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2134 " prot=%x idx=%d pd=0x%08lx\n",
2135 vaddr, paddr, prot, mmu_idx, pd);
2136 #endif
2137
2138 address = vaddr;
2139 if (!is_ram_rom_romd(pd)) {
2140 /* IO memory case (romd handled later) */
2141 address |= TLB_MMIO;
2142 }
2143 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2144 if (is_ram_rom(pd)) {
2145 /* Normal RAM. */
2146 iotlb = pd & TARGET_PAGE_MASK;
2147 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2148 iotlb |= io_mem_notdirty.ram_addr;
2149 else
2150 iotlb |= io_mem_rom.ram_addr;
2151 } else {
2152 /* IO handlers are currently passed a physical address.
2153 It would be nice to pass an offset from the base address
2154 of that region. This would avoid having to special case RAM,
2155 and avoid full address decoding in every device.
2156 We can't use the high bits of pd for this because
2157 IO_MEM_ROMD uses these as a ram address. */
2158 iotlb = (pd & ~TARGET_PAGE_MASK);
2159 iotlb += p.region_offset;
2160 }
2161
2162 code_address = address;
2163 /* Make accesses to pages with watchpoints go via the
2164 watchpoint trap routines. */
2165 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2166 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2167 /* Avoid trapping reads of pages with a write breakpoint. */
2168 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2169 iotlb = io_mem_watch.ram_addr + paddr;
2170 address |= TLB_MMIO;
2171 break;
2172 }
2173 }
2174 }
2175
2176 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2177 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2178 te = &env->tlb_table[mmu_idx][index];
2179 te->addend = addend - vaddr;
2180 if (prot & PAGE_READ) {
2181 te->addr_read = address;
2182 } else {
2183 te->addr_read = -1;
2184 }
2185
2186 if (prot & PAGE_EXEC) {
2187 te->addr_code = code_address;
2188 } else {
2189 te->addr_code = -1;
2190 }
2191 if (prot & PAGE_WRITE) {
2192 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
2193 /* Write access calls the I/O callback. */
2194 te->addr_write = address | TLB_MMIO;
2195 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
2196 !cpu_physical_memory_is_dirty(pd)) {
2197 te->addr_write = address | TLB_NOTDIRTY;
2198 } else {
2199 te->addr_write = address;
2200 }
2201 } else {
2202 te->addr_write = -1;
2203 }
2204 }
2205
2206 #else
2207
2208 void tlb_flush(CPUState *env, int flush_global)
2209 {
2210 }
2211
2212 void tlb_flush_page(CPUState *env, target_ulong addr)
2213 {
2214 }
2215
2216 /*
2217 * Walks guest process memory "regions" one by one
2218 * and calls callback function 'fn' for each region.
2219 */
2220
2221 struct walk_memory_regions_data
2222 {
2223 walk_memory_regions_fn fn;
2224 void *priv;
2225 unsigned long start;
2226 int prot;
2227 };
2228
2229 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2230 abi_ulong end, int new_prot)
2231 {
2232 if (data->start != -1ul) {
2233 int rc = data->fn(data->priv, data->start, end, data->prot);
2234 if (rc != 0) {
2235 return rc;
2236 }
2237 }
2238
2239 data->start = (new_prot ? end : -1ul);
2240 data->prot = new_prot;
2241
2242 return 0;
2243 }
2244
2245 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2246 abi_ulong base, int level, void **lp)
2247 {
2248 abi_ulong pa;
2249 int i, rc;
2250
2251 if (*lp == NULL) {
2252 return walk_memory_regions_end(data, base, 0);
2253 }
2254
2255 if (level == 0) {
2256 PageDesc *pd = *lp;
2257 for (i = 0; i < L2_SIZE; ++i) {
2258 int prot = pd[i].flags;
2259
2260 pa = base | (i << TARGET_PAGE_BITS);
2261 if (prot != data->prot) {
2262 rc = walk_memory_regions_end(data, pa, prot);
2263 if (rc != 0) {
2264 return rc;
2265 }
2266 }
2267 }
2268 } else {
2269 void **pp = *lp;
2270 for (i = 0; i < L2_SIZE; ++i) {
2271 pa = base | ((abi_ulong)i <<
2272 (TARGET_PAGE_BITS + L2_BITS * level));
2273 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2274 if (rc != 0) {
2275 return rc;
2276 }
2277 }
2278 }
2279
2280 return 0;
2281 }
2282
2283 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2284 {
2285 struct walk_memory_regions_data data;
2286 unsigned long i;
2287
2288 data.fn = fn;
2289 data.priv = priv;
2290 data.start = -1ul;
2291 data.prot = 0;
2292
2293 for (i = 0; i < V_L1_SIZE; i++) {
2294 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2295 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2296 if (rc != 0) {
2297 return rc;
2298 }
2299 }
2300
2301 return walk_memory_regions_end(&data, 0, 0);
2302 }
2303
2304 static int dump_region(void *priv, abi_ulong start,
2305 abi_ulong end, unsigned long prot)
2306 {
2307 FILE *f = (FILE *)priv;
2308
2309 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2310 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2311 start, end, end - start,
2312 ((prot & PAGE_READ) ? 'r' : '-'),
2313 ((prot & PAGE_WRITE) ? 'w' : '-'),
2314 ((prot & PAGE_EXEC) ? 'x' : '-'));
2315
2316 return (0);
2317 }
2318
2319 /* dump memory mappings */
2320 void page_dump(FILE *f)
2321 {
2322 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2323 "start", "end", "size", "prot");
2324 walk_memory_regions(f, dump_region);
2325 }
2326
2327 int page_get_flags(target_ulong address)
2328 {
2329 PageDesc *p;
2330
2331 p = page_find(address >> TARGET_PAGE_BITS);
2332 if (!p)
2333 return 0;
2334 return p->flags;
2335 }
2336
2337 /* Modify the flags of a page and invalidate the code if necessary.
2338 The flag PAGE_WRITE_ORG is positioned automatically depending
2339 on PAGE_WRITE. The mmap_lock should already be held. */
2340 void page_set_flags(target_ulong start, target_ulong end, int flags)
2341 {
2342 target_ulong addr, len;
2343
2344 /* This function should never be called with addresses outside the
2345 guest address space. If this assert fires, it probably indicates
2346 a missing call to h2g_valid. */
2347 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2348 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2349 #endif
2350 assert(start < end);
2351
2352 start = start & TARGET_PAGE_MASK;
2353 end = TARGET_PAGE_ALIGN(end);
2354
2355 if (flags & PAGE_WRITE) {
2356 flags |= PAGE_WRITE_ORG;
2357 }
2358
2359 for (addr = start, len = end - start;
2360 len != 0;
2361 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2362 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2363
2364 /* If the write protection bit is set, then we invalidate
2365 the code inside. */
2366 if (!(p->flags & PAGE_WRITE) &&
2367 (flags & PAGE_WRITE) &&
2368 p->first_tb) {
2369 tb_invalidate_phys_page(addr, 0, NULL);
2370 }
2371 p->flags = flags;
2372 }
2373 }
2374
2375 int page_check_range(target_ulong start, target_ulong len, int flags)
2376 {
2377 PageDesc *p;
2378 target_ulong end;
2379 target_ulong addr;
2380
2381 /* This function should never be called with addresses outside the
2382 guest address space. If this assert fires, it probably indicates
2383 a missing call to h2g_valid. */
2384 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2385 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2386 #endif
2387
2388 if (len == 0) {
2389 return 0;
2390 }
2391 if (start + len - 1 < start) {
2392 /* We've wrapped around. */
2393 return -1;
2394 }
2395
2396 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2397 start = start & TARGET_PAGE_MASK;
2398
2399 for (addr = start, len = end - start;
2400 len != 0;
2401 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2402 p = page_find(addr >> TARGET_PAGE_BITS);
2403 if( !p )
2404 return -1;
2405 if( !(p->flags & PAGE_VALID) )
2406 return -1;
2407
2408 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2409 return -1;
2410 if (flags & PAGE_WRITE) {
2411 if (!(p->flags & PAGE_WRITE_ORG))
2412 return -1;
2413 /* unprotect the page if it was put read-only because it
2414 contains translated code */
2415 if (!(p->flags & PAGE_WRITE)) {
2416 if (!page_unprotect(addr, 0, NULL))
2417 return -1;
2418 }
2419 return 0;
2420 }
2421 }
2422 return 0;
2423 }
2424
2425 /* called from signal handler: invalidate the code and unprotect the
2426 page. Return TRUE if the fault was successfully handled. */
2427 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2428 {
2429 unsigned int prot;
2430 PageDesc *p;
2431 target_ulong host_start, host_end, addr;
2432
2433 /* Technically this isn't safe inside a signal handler. However we
2434 know this only ever happens in a synchronous SEGV handler, so in
2435 practice it seems to be ok. */
2436 mmap_lock();
2437
2438 p = page_find(address >> TARGET_PAGE_BITS);
2439 if (!p) {
2440 mmap_unlock();
2441 return 0;
2442 }
2443
2444 /* if the page was really writable, then we change its
2445 protection back to writable */
2446 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2447 host_start = address & qemu_host_page_mask;
2448 host_end = host_start + qemu_host_page_size;
2449
2450 prot = 0;
2451 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2452 p = page_find(addr >> TARGET_PAGE_BITS);
2453 p->flags |= PAGE_WRITE;
2454 prot |= p->flags;
2455
2456 /* and since the content will be modified, we must invalidate
2457 the corresponding translated code. */
2458 tb_invalidate_phys_page(addr, pc, puc);
2459 #ifdef DEBUG_TB_CHECK
2460 tb_invalidate_check(addr);
2461 #endif
2462 }
2463 mprotect((void *)g2h(host_start), qemu_host_page_size,
2464 prot & PAGE_BITS);
2465
2466 mmap_unlock();
2467 return 1;
2468 }
2469 mmap_unlock();
2470 return 0;
2471 }
2472
2473 static inline void tlb_set_dirty(CPUState *env,
2474 unsigned long addr, target_ulong vaddr)
2475 {
2476 }
2477 #endif /* defined(CONFIG_USER_ONLY) */
2478
2479 #if !defined(CONFIG_USER_ONLY)
2480
2481 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2482 typedef struct subpage_t {
2483 MemoryRegion iomem;
2484 target_phys_addr_t base;
2485 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2486 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2487 } subpage_t;
2488
2489 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2490 ram_addr_t memory, ram_addr_t region_offset);
2491 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2492 ram_addr_t orig_memory,
2493 ram_addr_t region_offset);
2494 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2495 need_subpage) \
2496 do { \
2497 if (addr > start_addr) \
2498 start_addr2 = 0; \
2499 else { \
2500 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2501 if (start_addr2 > 0) \
2502 need_subpage = 1; \
2503 } \
2504 \
2505 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2506 end_addr2 = TARGET_PAGE_SIZE - 1; \
2507 else { \
2508 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2509 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2510 need_subpage = 1; \
2511 } \
2512 } while (0)
2513
2514 static void destroy_page_desc(PhysPageDesc pd)
2515 {
2516 unsigned io_index = pd.phys_offset & ~TARGET_PAGE_MASK;
2517 MemoryRegion *mr = io_mem_region[io_index];
2518
2519 if (mr->subpage) {
2520 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2521 memory_region_destroy(&subpage->iomem);
2522 g_free(subpage);
2523 }
2524 }
2525
2526 static void destroy_l2_mapping(void **lp, unsigned level)
2527 {
2528 unsigned i;
2529 void **p;
2530 PhysPageDesc *pd;
2531
2532 if (!*lp) {
2533 return;
2534 }
2535
2536 if (level > 0) {
2537 p = *lp;
2538 for (i = 0; i < L2_SIZE; ++i) {
2539 destroy_l2_mapping(&p[i], level - 1);
2540 }
2541 g_free(p);
2542 } else {
2543 pd = *lp;
2544 for (i = 0; i < L2_SIZE; ++i) {
2545 destroy_page_desc(pd[i]);
2546 }
2547 g_free(pd);
2548 }
2549 *lp = NULL;
2550 }
2551
2552 static void destroy_all_mappings(void)
2553 {
2554 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
2555 }
2556
2557 /* register physical memory.
2558 For RAM, 'size' must be a multiple of the target page size.
2559 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2560 io memory page. The address used when calling the IO function is
2561 the offset from the start of the region, plus region_offset. Both
2562 start_addr and region_offset are rounded down to a page boundary
2563 before calculating this offset. This should not be a problem unless
2564 the low bits of start_addr and region_offset differ. */
2565 void cpu_register_physical_memory_log(MemoryRegionSection *section,
2566 bool readonly)
2567 {
2568 target_phys_addr_t start_addr = section->offset_within_address_space;
2569 ram_addr_t size = section->size;
2570 ram_addr_t phys_offset = section->mr->ram_addr;
2571 ram_addr_t region_offset = section->offset_within_region;
2572 target_phys_addr_t addr, end_addr;
2573 PhysPageDesc *p;
2574 CPUState *env;
2575 ram_addr_t orig_size = size;
2576 subpage_t *subpage;
2577
2578 if (memory_region_is_ram(section->mr)) {
2579 phys_offset += region_offset;
2580 region_offset = 0;
2581 }
2582
2583 if (readonly) {
2584 phys_offset |= io_mem_rom.ram_addr;
2585 }
2586
2587 assert(size);
2588
2589 if (phys_offset == io_mem_unassigned.ram_addr) {
2590 region_offset = start_addr;
2591 }
2592 region_offset &= TARGET_PAGE_MASK;
2593 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2594 end_addr = start_addr + (target_phys_addr_t)size;
2595
2596 addr = start_addr;
2597 do {
2598 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
2599 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
2600 ram_addr_t orig_memory = p->phys_offset;
2601 target_phys_addr_t start_addr2, end_addr2;
2602 int need_subpage = 0;
2603 MemoryRegion *mr = io_mem_region[orig_memory & ~TARGET_PAGE_MASK];
2604
2605 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2606 need_subpage);
2607 if (need_subpage) {
2608 if (!(mr->subpage)) {
2609 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2610 &p->phys_offset, orig_memory,
2611 p->region_offset);
2612 } else {
2613 subpage = container_of(mr, subpage_t, iomem);
2614 }
2615 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2616 region_offset);
2617 p->region_offset = 0;
2618 } else {
2619 p->phys_offset = phys_offset;
2620 p->region_offset = region_offset;
2621 if (is_ram_rom_romd(phys_offset))
2622 phys_offset += TARGET_PAGE_SIZE;
2623 }
2624 } else {
2625 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2626 p->phys_offset = phys_offset;
2627 p->region_offset = region_offset;
2628 if (is_ram_rom_romd(phys_offset)) {
2629 phys_offset += TARGET_PAGE_SIZE;
2630 } else {
2631 target_phys_addr_t start_addr2, end_addr2;
2632 int need_subpage = 0;
2633
2634 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2635 end_addr2, need_subpage);
2636
2637 if (need_subpage) {
2638 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2639 &p->phys_offset,
2640 io_mem_unassigned.ram_addr,
2641 addr & TARGET_PAGE_MASK);
2642 subpage_register(subpage, start_addr2, end_addr2,
2643 phys_offset, region_offset);
2644 p->region_offset = 0;
2645 }
2646 }
2647 }
2648 region_offset += TARGET_PAGE_SIZE;
2649 addr += TARGET_PAGE_SIZE;
2650 } while (addr != end_addr);
2651
2652 /* since each CPU stores ram addresses in its TLB cache, we must
2653 reset the modified entries */
2654 /* XXX: slow ! */
2655 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2656 tlb_flush(env, 1);
2657 }
2658 }
2659
2660 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2661 {
2662 if (kvm_enabled())
2663 kvm_coalesce_mmio_region(addr, size);
2664 }
2665
2666 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2667 {
2668 if (kvm_enabled())
2669 kvm_uncoalesce_mmio_region(addr, size);
2670 }
2671
2672 void qemu_flush_coalesced_mmio_buffer(void)
2673 {
2674 if (kvm_enabled())
2675 kvm_flush_coalesced_mmio_buffer();
2676 }
2677
2678 #if defined(__linux__) && !defined(TARGET_S390X)
2679
2680 #include <sys/vfs.h>
2681
2682 #define HUGETLBFS_MAGIC 0x958458f6
2683
2684 static long gethugepagesize(const char *path)
2685 {
2686 struct statfs fs;
2687 int ret;
2688
2689 do {
2690 ret = statfs(path, &fs);
2691 } while (ret != 0 && errno == EINTR);
2692
2693 if (ret != 0) {
2694 perror(path);
2695 return 0;
2696 }
2697
2698 if (fs.f_type != HUGETLBFS_MAGIC)
2699 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2700
2701 return fs.f_bsize;
2702 }
2703
2704 static void *file_ram_alloc(RAMBlock *block,
2705 ram_addr_t memory,
2706 const char *path)
2707 {
2708 char *filename;
2709 void *area;
2710 int fd;
2711 #ifdef MAP_POPULATE
2712 int flags;
2713 #endif
2714 unsigned long hpagesize;
2715
2716 hpagesize = gethugepagesize(path);
2717 if (!hpagesize) {
2718 return NULL;
2719 }
2720
2721 if (memory < hpagesize) {
2722 return NULL;
2723 }
2724
2725 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2726 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2727 return NULL;
2728 }
2729
2730 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2731 return NULL;
2732 }
2733
2734 fd = mkstemp(filename);
2735 if (fd < 0) {
2736 perror("unable to create backing store for hugepages");
2737 free(filename);
2738 return NULL;
2739 }
2740 unlink(filename);
2741 free(filename);
2742
2743 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2744
2745 /*
2746 * ftruncate is not supported by hugetlbfs in older
2747 * hosts, so don't bother bailing out on errors.
2748 * If anything goes wrong with it under other filesystems,
2749 * mmap will fail.
2750 */
2751 if (ftruncate(fd, memory))
2752 perror("ftruncate");
2753
2754 #ifdef MAP_POPULATE
2755 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2756 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2757 * to sidestep this quirk.
2758 */
2759 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2760 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2761 #else
2762 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2763 #endif
2764 if (area == MAP_FAILED) {
2765 perror("file_ram_alloc: can't mmap RAM pages");
2766 close(fd);
2767 return (NULL);
2768 }
2769 block->fd = fd;
2770 return area;
2771 }
2772 #endif
2773
2774 static ram_addr_t find_ram_offset(ram_addr_t size)
2775 {
2776 RAMBlock *block, *next_block;
2777 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
2778
2779 if (QLIST_EMPTY(&ram_list.blocks))
2780 return 0;
2781
2782 QLIST_FOREACH(block, &ram_list.blocks, next) {
2783 ram_addr_t end, next = RAM_ADDR_MAX;
2784
2785 end = block->offset + block->length;
2786
2787 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2788 if (next_block->offset >= end) {
2789 next = MIN(next, next_block->offset);
2790 }
2791 }
2792 if (next - end >= size && next - end < mingap) {
2793 offset = end;
2794 mingap = next - end;
2795 }
2796 }
2797
2798 if (offset == RAM_ADDR_MAX) {
2799 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2800 (uint64_t)size);
2801 abort();
2802 }
2803
2804 return offset;
2805 }
2806
2807 static ram_addr_t last_ram_offset(void)
2808 {
2809 RAMBlock *block;
2810 ram_addr_t last = 0;
2811
2812 QLIST_FOREACH(block, &ram_list.blocks, next)
2813 last = MAX(last, block->offset + block->length);
2814
2815 return last;
2816 }
2817
2818 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
2819 {
2820 RAMBlock *new_block, *block;
2821
2822 new_block = NULL;
2823 QLIST_FOREACH(block, &ram_list.blocks, next) {
2824 if (block->offset == addr) {
2825 new_block = block;
2826 break;
2827 }
2828 }
2829 assert(new_block);
2830 assert(!new_block->idstr[0]);
2831
2832 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2833 char *id = dev->parent_bus->info->get_dev_path(dev);
2834 if (id) {
2835 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2836 g_free(id);
2837 }
2838 }
2839 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2840
2841 QLIST_FOREACH(block, &ram_list.blocks, next) {
2842 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
2843 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2844 new_block->idstr);
2845 abort();
2846 }
2847 }
2848 }
2849
2850 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2851 MemoryRegion *mr)
2852 {
2853 RAMBlock *new_block;
2854
2855 size = TARGET_PAGE_ALIGN(size);
2856 new_block = g_malloc0(sizeof(*new_block));
2857
2858 new_block->mr = mr;
2859 new_block->offset = find_ram_offset(size);
2860 if (host) {
2861 new_block->host = host;
2862 new_block->flags |= RAM_PREALLOC_MASK;
2863 } else {
2864 if (mem_path) {
2865 #if defined (__linux__) && !defined(TARGET_S390X)
2866 new_block->host = file_ram_alloc(new_block, size, mem_path);
2867 if (!new_block->host) {
2868 new_block->host = qemu_vmalloc(size);
2869 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2870 }
2871 #else
2872 fprintf(stderr, "-mem-path option unsupported\n");
2873 exit(1);
2874 #endif
2875 } else {
2876 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2877 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2878 an system defined value, which is at least 256GB. Larger systems
2879 have larger values. We put the guest between the end of data
2880 segment (system break) and this value. We use 32GB as a base to
2881 have enough room for the system break to grow. */
2882 new_block->host = mmap((void*)0x800000000, size,
2883 PROT_EXEC|PROT_READ|PROT_WRITE,
2884 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2885 if (new_block->host == MAP_FAILED) {
2886 fprintf(stderr, "Allocating RAM failed\n");
2887 abort();
2888 }
2889 #else
2890 if (xen_enabled()) {
2891 xen_ram_alloc(new_block->offset, size, mr);
2892 } else {
2893 new_block->host = qemu_vmalloc(size);
2894 }
2895 #endif
2896 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2897 }
2898 }
2899 new_block->length = size;
2900
2901 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2902
2903 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
2904 last_ram_offset() >> TARGET_PAGE_BITS);
2905 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2906 0xff, size >> TARGET_PAGE_BITS);
2907
2908 if (kvm_enabled())
2909 kvm_setup_guest_memory(new_block->host, size);
2910
2911 return new_block->offset;
2912 }
2913
2914 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
2915 {
2916 return qemu_ram_alloc_from_ptr(size, NULL, mr);
2917 }
2918
2919 void qemu_ram_free_from_ptr(ram_addr_t addr)
2920 {
2921 RAMBlock *block;
2922
2923 QLIST_FOREACH(block, &ram_list.blocks, next) {
2924 if (addr == block->offset) {
2925 QLIST_REMOVE(block, next);
2926 g_free(block);
2927 return;
2928 }
2929 }
2930 }
2931
2932 void qemu_ram_free(ram_addr_t addr)
2933 {
2934 RAMBlock *block;
2935
2936 QLIST_FOREACH(block, &ram_list.blocks, next) {
2937 if (addr == block->offset) {
2938 QLIST_REMOVE(block, next);
2939 if (block->flags & RAM_PREALLOC_MASK) {
2940 ;
2941 } else if (mem_path) {
2942 #if defined (__linux__) && !defined(TARGET_S390X)
2943 if (block->fd) {
2944 munmap(block->host, block->length);
2945 close(block->fd);
2946 } else {
2947 qemu_vfree(block->host);
2948 }
2949 #else
2950 abort();
2951 #endif
2952 } else {
2953 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2954 munmap(block->host, block->length);
2955 #else
2956 if (xen_enabled()) {
2957 xen_invalidate_map_cache_entry(block->host);
2958 } else {
2959 qemu_vfree(block->host);
2960 }
2961 #endif
2962 }
2963 g_free(block);
2964 return;
2965 }
2966 }
2967
2968 }
2969
2970 #ifndef _WIN32
2971 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2972 {
2973 RAMBlock *block;
2974 ram_addr_t offset;
2975 int flags;
2976 void *area, *vaddr;
2977
2978 QLIST_FOREACH(block, &ram_list.blocks, next) {
2979 offset = addr - block->offset;
2980 if (offset < block->length) {
2981 vaddr = block->host + offset;
2982 if (block->flags & RAM_PREALLOC_MASK) {
2983 ;
2984 } else {
2985 flags = MAP_FIXED;
2986 munmap(vaddr, length);
2987 if (mem_path) {
2988 #if defined(__linux__) && !defined(TARGET_S390X)
2989 if (block->fd) {
2990 #ifdef MAP_POPULATE
2991 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2992 MAP_PRIVATE;
2993 #else
2994 flags |= MAP_PRIVATE;
2995 #endif
2996 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2997 flags, block->fd, offset);
2998 } else {
2999 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3000 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3001 flags, -1, 0);
3002 }
3003 #else
3004 abort();
3005 #endif
3006 } else {
3007 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3008 flags |= MAP_SHARED | MAP_ANONYMOUS;
3009 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3010 flags, -1, 0);
3011 #else
3012 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3013 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3014 flags, -1, 0);
3015 #endif
3016 }
3017 if (area != vaddr) {
3018 fprintf(stderr, "Could not remap addr: "
3019 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
3020 length, addr);
3021 exit(1);
3022 }
3023 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3024 }
3025 return;
3026 }
3027 }
3028 }
3029 #endif /* !_WIN32 */
3030
3031 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3032 With the exception of the softmmu code in this file, this should
3033 only be used for local memory (e.g. video ram) that the device owns,
3034 and knows it isn't going to access beyond the end of the block.
3035
3036 It should not be used for general purpose DMA.
3037 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3038 */
3039 void *qemu_get_ram_ptr(ram_addr_t addr)
3040 {
3041 RAMBlock *block;
3042
3043 QLIST_FOREACH(block, &ram_list.blocks, next) {
3044 if (addr - block->offset < block->length) {
3045 /* Move this entry to to start of the list. */
3046 if (block != QLIST_FIRST(&ram_list.blocks)) {
3047 QLIST_REMOVE(block, next);
3048 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3049 }
3050 if (xen_enabled()) {
3051 /* We need to check if the requested address is in the RAM
3052 * because we don't want to map the entire memory in QEMU.
3053 * In that case just map until the end of the page.
3054 */
3055 if (block->offset == 0) {
3056 return xen_map_cache(addr, 0, 0);
3057 } else if (block->host == NULL) {
3058 block->host =
3059 xen_map_cache(block->offset, block->length, 1);
3060 }
3061 }
3062 return block->host + (addr - block->offset);
3063 }
3064 }
3065
3066 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3067 abort();
3068
3069 return NULL;
3070 }
3071
3072 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3073 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3074 */
3075 void *qemu_safe_ram_ptr(ram_addr_t addr)
3076 {
3077 RAMBlock *block;
3078
3079 QLIST_FOREACH(block, &ram_list.blocks, next) {
3080 if (addr - block->offset < block->length) {
3081 if (xen_enabled()) {
3082 /* We need to check if the requested address is in the RAM
3083 * because we don't want to map the entire memory in QEMU.
3084 * In that case just map until the end of the page.
3085 */
3086 if (block->offset == 0) {
3087 return xen_map_cache(addr, 0, 0);
3088 } else if (block->host == NULL) {
3089 block->host =
3090 xen_map_cache(block->offset, block->length, 1);
3091 }
3092 }
3093 return block->host + (addr - block->offset);
3094 }
3095 }
3096
3097 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3098 abort();
3099
3100 return NULL;
3101 }
3102
3103 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3104 * but takes a size argument */
3105 void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
3106 {
3107 if (*size == 0) {
3108 return NULL;
3109 }
3110 if (xen_enabled()) {
3111 return xen_map_cache(addr, *size, 1);
3112 } else {
3113 RAMBlock *block;
3114
3115 QLIST_FOREACH(block, &ram_list.blocks, next) {
3116 if (addr - block->offset < block->length) {
3117 if (addr - block->offset + *size > block->length)
3118 *size = block->length - addr + block->offset;
3119 return block->host + (addr - block->offset);
3120 }
3121 }
3122
3123 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3124 abort();
3125 }
3126 }
3127
3128 void qemu_put_ram_ptr(void *addr)
3129 {
3130 trace_qemu_put_ram_ptr(addr);
3131 }
3132
3133 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3134 {
3135 RAMBlock *block;
3136 uint8_t *host = ptr;
3137
3138 if (xen_enabled()) {
3139 *ram_addr = xen_ram_addr_from_mapcache(ptr);
3140 return 0;
3141 }
3142
3143 QLIST_FOREACH(block, &ram_list.blocks, next) {
3144 /* This case append when the block is not mapped. */
3145 if (block->host == NULL) {
3146 continue;
3147 }
3148 if (host - block->host < block->length) {
3149 *ram_addr = block->offset + (host - block->host);
3150 return 0;
3151 }
3152 }
3153
3154 return -1;
3155 }
3156
3157 /* Some of the softmmu routines need to translate from a host pointer
3158 (typically a TLB entry) back to a ram offset. */
3159 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3160 {
3161 ram_addr_t ram_addr;
3162
3163 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3164 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3165 abort();
3166 }
3167 return ram_addr;
3168 }
3169
3170 static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3171 unsigned size)
3172 {
3173 #ifdef DEBUG_UNASSIGNED
3174 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3175 #endif
3176 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3177 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
3178 #endif
3179 return 0;
3180 }
3181
3182 static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3183 uint64_t val, unsigned size)
3184 {
3185 #ifdef DEBUG_UNASSIGNED
3186 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
3187 #endif
3188 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3189 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
3190 #endif
3191 }
3192
3193 static const MemoryRegionOps unassigned_mem_ops = {
3194 .read = unassigned_mem_read,
3195 .write = unassigned_mem_write,
3196 .endianness = DEVICE_NATIVE_ENDIAN,
3197 };
3198
3199 static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3200 unsigned size)
3201 {
3202 abort();
3203 }
3204
3205 static void error_mem_write(void *opaque, target_phys_addr_t addr,
3206 uint64_t value, unsigned size)
3207 {
3208 abort();
3209 }
3210
3211 static const MemoryRegionOps error_mem_ops = {
3212 .read = error_mem_read,
3213 .write = error_mem_write,
3214 .endianness = DEVICE_NATIVE_ENDIAN,
3215 };
3216
3217 static const MemoryRegionOps rom_mem_ops = {
3218 .read = error_mem_read,
3219 .write = unassigned_mem_write,
3220 .endianness = DEVICE_NATIVE_ENDIAN,
3221 };
3222
3223 static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3224 uint64_t val, unsigned size)
3225 {
3226 int dirty_flags;
3227 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3228 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3229 #if !defined(CONFIG_USER_ONLY)
3230 tb_invalidate_phys_page_fast(ram_addr, size);
3231 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3232 #endif
3233 }
3234 switch (size) {
3235 case 1:
3236 stb_p(qemu_get_ram_ptr(ram_addr), val);
3237 break;
3238 case 2:
3239 stw_p(qemu_get_ram_ptr(ram_addr), val);
3240 break;
3241 case 4:
3242 stl_p(qemu_get_ram_ptr(ram_addr), val);
3243 break;
3244 default:
3245 abort();
3246 }
3247 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3248 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3249 /* we remove the notdirty callback only if the code has been
3250 flushed */
3251 if (dirty_flags == 0xff)
3252 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3253 }
3254
3255 static const MemoryRegionOps notdirty_mem_ops = {
3256 .read = error_mem_read,
3257 .write = notdirty_mem_write,
3258 .endianness = DEVICE_NATIVE_ENDIAN,
3259 };
3260
3261 /* Generate a debug exception if a watchpoint has been hit. */
3262 static void check_watchpoint(int offset, int len_mask, int flags)
3263 {
3264 CPUState *env = cpu_single_env;
3265 target_ulong pc, cs_base;
3266 TranslationBlock *tb;
3267 target_ulong vaddr;
3268 CPUWatchpoint *wp;
3269 int cpu_flags;
3270
3271 if (env->watchpoint_hit) {
3272 /* We re-entered the check after replacing the TB. Now raise
3273 * the debug interrupt so that is will trigger after the
3274 * current instruction. */
3275 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3276 return;
3277 }
3278 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3279 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3280 if ((vaddr == (wp->vaddr & len_mask) ||
3281 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3282 wp->flags |= BP_WATCHPOINT_HIT;
3283 if (!env->watchpoint_hit) {
3284 env->watchpoint_hit = wp;
3285 tb = tb_find_pc(env->mem_io_pc);
3286 if (!tb) {
3287 cpu_abort(env, "check_watchpoint: could not find TB for "
3288 "pc=%p", (void *)env->mem_io_pc);
3289 }
3290 cpu_restore_state(tb, env, env->mem_io_pc);
3291 tb_phys_invalidate(tb, -1);
3292 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3293 env->exception_index = EXCP_DEBUG;
3294 } else {
3295 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3296 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3297 }
3298 cpu_resume_from_signal(env, NULL);
3299 }
3300 } else {
3301 wp->flags &= ~BP_WATCHPOINT_HIT;
3302 }
3303 }
3304 }
3305
3306 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3307 so these check for a hit then pass through to the normal out-of-line
3308 phys routines. */
3309 static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3310 unsigned size)
3311 {
3312 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3313 switch (size) {
3314 case 1: return ldub_phys(addr);
3315 case 2: return lduw_phys(addr);
3316 case 4: return ldl_phys(addr);
3317 default: abort();
3318 }
3319 }
3320
3321 static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3322 uint64_t val, unsigned size)
3323 {
3324 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3325 switch (size) {
3326 case 1: stb_phys(addr, val);
3327 case 2: stw_phys(addr, val);
3328 case 4: stl_phys(addr, val);
3329 default: abort();
3330 }
3331 }
3332
3333 static const MemoryRegionOps watch_mem_ops = {
3334 .read = watch_mem_read,
3335 .write = watch_mem_write,
3336 .endianness = DEVICE_NATIVE_ENDIAN,
3337 };
3338
3339 static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3340 unsigned len)
3341 {
3342 subpage_t *mmio = opaque;
3343 unsigned int idx = SUBPAGE_IDX(addr);
3344 #if defined(DEBUG_SUBPAGE)
3345 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3346 mmio, len, addr, idx);
3347 #endif
3348
3349 addr += mmio->region_offset[idx];
3350 idx = mmio->sub_io_index[idx];
3351 return io_mem_read(idx, addr, len);
3352 }
3353
3354 static void subpage_write(void *opaque, target_phys_addr_t addr,
3355 uint64_t value, unsigned len)
3356 {
3357 subpage_t *mmio = opaque;
3358 unsigned int idx = SUBPAGE_IDX(addr);
3359 #if defined(DEBUG_SUBPAGE)
3360 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3361 " idx %d value %"PRIx64"\n",
3362 __func__, mmio, len, addr, idx, value);
3363 #endif
3364
3365 addr += mmio->region_offset[idx];
3366 idx = mmio->sub_io_index[idx];
3367 io_mem_write(idx, addr, value, len);
3368 }
3369
3370 static const MemoryRegionOps subpage_ops = {
3371 .read = subpage_read,
3372 .write = subpage_write,
3373 .endianness = DEVICE_NATIVE_ENDIAN,
3374 };
3375
3376 static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3377 unsigned size)
3378 {
3379 ram_addr_t raddr = addr;
3380 void *ptr = qemu_get_ram_ptr(raddr);
3381 switch (size) {
3382 case 1: return ldub_p(ptr);
3383 case 2: return lduw_p(ptr);
3384 case 4: return ldl_p(ptr);
3385 default: abort();
3386 }
3387 }
3388
3389 static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3390 uint64_t value, unsigned size)
3391 {
3392 ram_addr_t raddr = addr;
3393 void *ptr = qemu_get_ram_ptr(raddr);
3394 switch (size) {
3395 case 1: return stb_p(ptr, value);
3396 case 2: return stw_p(ptr, value);
3397 case 4: return stl_p(ptr, value);
3398 default: abort();
3399 }
3400 }
3401
3402 static const MemoryRegionOps subpage_ram_ops = {
3403 .read = subpage_ram_read,
3404 .write = subpage_ram_write,
3405 .endianness = DEVICE_NATIVE_ENDIAN,
3406 };
3407
3408 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3409 ram_addr_t memory, ram_addr_t region_offset)
3410 {
3411 int idx, eidx;
3412
3413 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3414 return -1;
3415 idx = SUBPAGE_IDX(start);
3416 eidx = SUBPAGE_IDX(end);
3417 #if defined(DEBUG_SUBPAGE)
3418 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3419 mmio, start, end, idx, eidx, memory);
3420 #endif
3421 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
3422 memory = io_mem_subpage_ram.ram_addr;
3423 }
3424 memory &= IO_MEM_NB_ENTRIES - 1;
3425 for (; idx <= eidx; idx++) {
3426 mmio->sub_io_index[idx] = memory;
3427 mmio->region_offset[idx] = region_offset;
3428 }
3429
3430 return 0;
3431 }
3432
3433 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3434 ram_addr_t orig_memory,
3435 ram_addr_t region_offset)
3436 {
3437 subpage_t *mmio;
3438 int subpage_memory;
3439
3440 mmio = g_malloc0(sizeof(subpage_t));
3441
3442 mmio->base = base;
3443 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3444 "subpage", TARGET_PAGE_SIZE);
3445 mmio->iomem.subpage = true;
3446 subpage_memory = mmio->iomem.ram_addr;
3447 #if defined(DEBUG_SUBPAGE)
3448 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3449 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3450 #endif
3451 *phys = subpage_memory;
3452 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3453
3454 return mmio;
3455 }
3456
3457 static int get_free_io_mem_idx(void)
3458 {
3459 int i;
3460
3461 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3462 if (!io_mem_used[i]) {
3463 io_mem_used[i] = 1;
3464 return i;
3465 }
3466 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3467 return -1;
3468 }
3469
3470 /* mem_read and mem_write are arrays of functions containing the
3471 function to access byte (index 0), word (index 1) and dword (index
3472 2). Functions can be omitted with a NULL function pointer.
3473 If io_index is non zero, the corresponding io zone is
3474 modified. If it is zero, a new io zone is allocated. The return
3475 value can be used with cpu_register_physical_memory(). (-1) is
3476 returned if error. */
3477 static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
3478 {
3479 if (io_index <= 0) {
3480 io_index = get_free_io_mem_idx();
3481 if (io_index == -1)
3482 return io_index;
3483 } else {
3484 if (io_index >= IO_MEM_NB_ENTRIES)
3485 return -1;
3486 }
3487
3488 io_mem_region[io_index] = mr;
3489
3490 return io_index;
3491 }
3492
3493 int cpu_register_io_memory(MemoryRegion *mr)
3494 {
3495 return cpu_register_io_memory_fixed(0, mr);
3496 }
3497
3498 void cpu_unregister_io_memory(int io_index)
3499 {
3500 io_mem_region[io_index] = NULL;
3501 io_mem_used[io_index] = 0;
3502 }
3503
3504 static void io_mem_init(void)
3505 {
3506 int i;
3507
3508 /* Must be first: */
3509 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3510 assert(io_mem_ram.ram_addr == 0);
3511 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3512 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3513 "unassigned", UINT64_MAX);
3514 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3515 "notdirty", UINT64_MAX);
3516 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3517 "subpage-ram", UINT64_MAX);
3518 for (i=0; i<5; i++)
3519 io_mem_used[i] = 1;
3520
3521 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3522 "watch", UINT64_MAX);
3523 }
3524
3525 static void core_begin(MemoryListener *listener)
3526 {
3527 destroy_all_mappings();
3528 }
3529
3530 static void core_commit(MemoryListener *listener)
3531 {
3532 }
3533
3534 static void core_region_add(MemoryListener *listener,
3535 MemoryRegionSection *section)
3536 {
3537 cpu_register_physical_memory_log(section, section->readonly);
3538 }
3539
3540 static void core_region_del(MemoryListener *listener,
3541 MemoryRegionSection *section)
3542 {
3543 }
3544
3545 static void core_region_nop(MemoryListener *listener,
3546 MemoryRegionSection *section)
3547 {
3548 cpu_register_physical_memory_log(section, section->readonly);
3549 }
3550
3551 static void core_log_start(MemoryListener *listener,
3552 MemoryRegionSection *section)
3553 {
3554 }
3555
3556 static void core_log_stop(MemoryListener *listener,
3557 MemoryRegionSection *section)
3558 {
3559 }
3560
3561 static void core_log_sync(MemoryListener *listener,
3562 MemoryRegionSection *section)
3563 {
3564 }
3565
3566 static void core_log_global_start(MemoryListener *listener)
3567 {
3568 cpu_physical_memory_set_dirty_tracking(1);
3569 }
3570
3571 static void core_log_global_stop(MemoryListener *listener)
3572 {
3573 cpu_physical_memory_set_dirty_tracking(0);
3574 }
3575
3576 static void core_eventfd_add(MemoryListener *listener,
3577 MemoryRegionSection *section,
3578 bool match_data, uint64_t data, int fd)
3579 {
3580 }
3581
3582 static void core_eventfd_del(MemoryListener *listener,
3583 MemoryRegionSection *section,
3584 bool match_data, uint64_t data, int fd)
3585 {
3586 }
3587
3588 static void io_begin(MemoryListener *listener)
3589 {
3590 }
3591
3592 static void io_commit(MemoryListener *listener)
3593 {
3594 }
3595
3596 static void io_region_add(MemoryListener *listener,
3597 MemoryRegionSection *section)
3598 {
3599 iorange_init(&section->mr->iorange, &memory_region_iorange_ops,
3600 section->offset_within_address_space, section->size);
3601 ioport_register(&section->mr->iorange);
3602 }
3603
3604 static void io_region_del(MemoryListener *listener,
3605 MemoryRegionSection *section)
3606 {
3607 isa_unassign_ioport(section->offset_within_address_space, section->size);
3608 }
3609
3610 static void io_region_nop(MemoryListener *listener,
3611 MemoryRegionSection *section)
3612 {
3613 }
3614
3615 static void io_log_start(MemoryListener *listener,
3616 MemoryRegionSection *section)
3617 {
3618 }
3619
3620 static void io_log_stop(MemoryListener *listener,
3621 MemoryRegionSection *section)
3622 {
3623 }
3624
3625 static void io_log_sync(MemoryListener *listener,
3626 MemoryRegionSection *section)
3627 {
3628 }
3629
3630 static void io_log_global_start(MemoryListener *listener)
3631 {
3632 }
3633
3634 static void io_log_global_stop(MemoryListener *listener)
3635 {
3636 }
3637
3638 static void io_eventfd_add(MemoryListener *listener,
3639 MemoryRegionSection *section,
3640 bool match_data, uint64_t data, int fd)
3641 {
3642 }
3643
3644 static void io_eventfd_del(MemoryListener *listener,
3645 MemoryRegionSection *section,
3646 bool match_data, uint64_t data, int fd)
3647 {
3648 }
3649
3650 static MemoryListener core_memory_listener = {
3651 .begin = core_begin,
3652 .commit = core_commit,
3653 .region_add = core_region_add,
3654 .region_del = core_region_del,
3655 .region_nop = core_region_nop,
3656 .log_start = core_log_start,
3657 .log_stop = core_log_stop,
3658 .log_sync = core_log_sync,
3659 .log_global_start = core_log_global_start,
3660 .log_global_stop = core_log_global_stop,
3661 .eventfd_add = core_eventfd_add,
3662 .eventfd_del = core_eventfd_del,
3663 .priority = 0,
3664 };
3665
3666 static MemoryListener io_memory_listener = {
3667 .begin = io_begin,
3668 .commit = io_commit,
3669 .region_add = io_region_add,
3670 .region_del = io_region_del,
3671 .region_nop = io_region_nop,
3672 .log_start = io_log_start,
3673 .log_stop = io_log_stop,
3674 .log_sync = io_log_sync,
3675 .log_global_start = io_log_global_start,
3676 .log_global_stop = io_log_global_stop,
3677 .eventfd_add = io_eventfd_add,
3678 .eventfd_del = io_eventfd_del,
3679 .priority = 0,
3680 };
3681
3682 static void memory_map_init(void)
3683 {
3684 system_memory = g_malloc(sizeof(*system_memory));
3685 memory_region_init(system_memory, "system", INT64_MAX);
3686 set_system_memory_map(system_memory);
3687
3688 system_io = g_malloc(sizeof(*system_io));
3689 memory_region_init(system_io, "io", 65536);
3690 set_system_io_map(system_io);
3691
3692 memory_listener_register(&core_memory_listener, system_memory);
3693 memory_listener_register(&io_memory_listener, system_io);
3694 }
3695
3696 MemoryRegion *get_system_memory(void)
3697 {
3698 return system_memory;
3699 }
3700
3701 MemoryRegion *get_system_io(void)
3702 {
3703 return system_io;
3704 }
3705
3706 #endif /* !defined(CONFIG_USER_ONLY) */
3707
3708 /* physical memory access (slow version, mainly for debug) */
3709 #if defined(CONFIG_USER_ONLY)
3710 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3711 uint8_t *buf, int len, int is_write)
3712 {
3713 int l, flags;
3714 target_ulong page;
3715 void * p;
3716
3717 while (len > 0) {
3718 page = addr & TARGET_PAGE_MASK;
3719 l = (page + TARGET_PAGE_SIZE) - addr;
3720 if (l > len)
3721 l = len;
3722 flags = page_get_flags(page);
3723 if (!(flags & PAGE_VALID))
3724 return -1;
3725 if (is_write) {
3726 if (!(flags & PAGE_WRITE))
3727 return -1;
3728 /* XXX: this code should not depend on lock_user */
3729 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3730 return -1;
3731 memcpy(p, buf, l);
3732 unlock_user(p, addr, l);
3733 } else {
3734 if (!(flags & PAGE_READ))
3735 return -1;
3736 /* XXX: this code should not depend on lock_user */
3737 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3738 return -1;
3739 memcpy(buf, p, l);
3740 unlock_user(p, addr, 0);
3741 }
3742 len -= l;
3743 buf += l;
3744 addr += l;
3745 }
3746 return 0;
3747 }
3748
3749 #else
3750 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3751 int len, int is_write)
3752 {
3753 int l, io_index;
3754 uint8_t *ptr;
3755 uint32_t val;
3756 target_phys_addr_t page;
3757 ram_addr_t pd;
3758 PhysPageDesc p;
3759
3760 while (len > 0) {
3761 page = addr & TARGET_PAGE_MASK;
3762 l = (page + TARGET_PAGE_SIZE) - addr;
3763 if (l > len)
3764 l = len;
3765 p = phys_page_find(page >> TARGET_PAGE_BITS);
3766 pd = p.phys_offset;
3767
3768 if (is_write) {
3769 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
3770 target_phys_addr_t addr1;
3771 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
3772 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3773 /* XXX: could force cpu_single_env to NULL to avoid
3774 potential bugs */
3775 if (l >= 4 && ((addr1 & 3) == 0)) {
3776 /* 32 bit write access */
3777 val = ldl_p(buf);
3778 io_mem_write(io_index, addr1, val, 4);
3779 l = 4;
3780 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3781 /* 16 bit write access */
3782 val = lduw_p(buf);
3783 io_mem_write(io_index, addr1, val, 2);
3784 l = 2;
3785 } else {
3786 /* 8 bit write access */
3787 val = ldub_p(buf);
3788 io_mem_write(io_index, addr1, val, 1);
3789 l = 1;
3790 }
3791 } else {
3792 ram_addr_t addr1;
3793 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3794 /* RAM case */
3795 ptr = qemu_get_ram_ptr(addr1);
3796 memcpy(ptr, buf, l);
3797 if (!cpu_physical_memory_is_dirty(addr1)) {
3798 /* invalidate code */
3799 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3800 /* set dirty bit */
3801 cpu_physical_memory_set_dirty_flags(
3802 addr1, (0xff & ~CODE_DIRTY_FLAG));
3803 }
3804 qemu_put_ram_ptr(ptr);
3805 }
3806 } else {
3807 if (!is_ram_rom_romd(pd)) {
3808 target_phys_addr_t addr1;
3809 /* I/O case */
3810 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
3811 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3812 if (l >= 4 && ((addr1 & 3) == 0)) {
3813 /* 32 bit read access */
3814 val = io_mem_read(io_index, addr1, 4);
3815 stl_p(buf, val);
3816 l = 4;
3817 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3818 /* 16 bit read access */
3819 val = io_mem_read(io_index, addr1, 2);
3820 stw_p(buf, val);
3821 l = 2;
3822 } else {
3823 /* 8 bit read access */
3824 val = io_mem_read(io_index, addr1, 1);
3825 stb_p(buf, val);
3826 l = 1;
3827 }
3828 } else {
3829 /* RAM case */
3830 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3831 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3832 qemu_put_ram_ptr(ptr);
3833 }
3834 }
3835 len -= l;
3836 buf += l;
3837 addr += l;
3838 }
3839 }
3840
3841 /* used for ROM loading : can write in RAM and ROM */
3842 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3843 const uint8_t *buf, int len)
3844 {
3845 int l;
3846 uint8_t *ptr;
3847 target_phys_addr_t page;
3848 unsigned long pd;
3849 PhysPageDesc p;
3850
3851 while (len > 0) {
3852 page = addr & TARGET_PAGE_MASK;
3853 l = (page + TARGET_PAGE_SIZE) - addr;
3854 if (l > len)
3855 l = len;
3856 p = phys_page_find(page >> TARGET_PAGE_BITS);
3857 pd = p.phys_offset;
3858
3859 if (!is_ram_rom_romd(pd)) {
3860 /* do nothing */
3861 } else {
3862 unsigned long addr1;
3863 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3864 /* ROM/RAM case */
3865 ptr = qemu_get_ram_ptr(addr1);
3866 memcpy(ptr, buf, l);
3867 qemu_put_ram_ptr(ptr);
3868 }
3869 len -= l;
3870 buf += l;
3871 addr += l;
3872 }
3873 }
3874
3875 typedef struct {
3876 void *buffer;
3877 target_phys_addr_t addr;
3878 target_phys_addr_t len;
3879 } BounceBuffer;
3880
3881 static BounceBuffer bounce;
3882
3883 typedef struct MapClient {
3884 void *opaque;
3885 void (*callback)(void *opaque);
3886 QLIST_ENTRY(MapClient) link;
3887 } MapClient;
3888
3889 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3890 = QLIST_HEAD_INITIALIZER(map_client_list);
3891
3892 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3893 {
3894 MapClient *client = g_malloc(sizeof(*client));
3895
3896 client->opaque = opaque;
3897 client->callback = callback;
3898 QLIST_INSERT_HEAD(&map_client_list, client, link);
3899 return client;
3900 }
3901
3902 void cpu_unregister_map_client(void *_client)
3903 {
3904 MapClient *client = (MapClient *)_client;
3905
3906 QLIST_REMOVE(client, link);
3907 g_free(client);
3908 }
3909
3910 static void cpu_notify_map_clients(void)
3911 {
3912 MapClient *client;
3913
3914 while (!QLIST_EMPTY(&map_client_list)) {
3915 client = QLIST_FIRST(&map_client_list);
3916 client->callback(client->opaque);
3917 cpu_unregister_map_client(client);
3918 }
3919 }
3920
3921 /* Map a physical memory region into a host virtual address.
3922 * May map a subset of the requested range, given by and returned in *plen.
3923 * May return NULL if resources needed to perform the mapping are exhausted.
3924 * Use only for reads OR writes - not for read-modify-write operations.
3925 * Use cpu_register_map_client() to know when retrying the map operation is
3926 * likely to succeed.
3927 */
3928 void *cpu_physical_memory_map(target_phys_addr_t addr,
3929 target_phys_addr_t *plen,
3930 int is_write)
3931 {
3932 target_phys_addr_t len = *plen;
3933 target_phys_addr_t todo = 0;
3934 int l;
3935 target_phys_addr_t page;
3936 unsigned long pd;
3937 PhysPageDesc p;
3938 ram_addr_t raddr = RAM_ADDR_MAX;
3939 ram_addr_t rlen;
3940 void *ret;
3941
3942 while (len > 0) {
3943 page = addr & TARGET_PAGE_MASK;
3944 l = (page + TARGET_PAGE_SIZE) - addr;
3945 if (l > len)
3946 l = len;
3947 p = phys_page_find(page >> TARGET_PAGE_BITS);
3948 pd = p.phys_offset;
3949
3950 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
3951 if (todo || bounce.buffer) {
3952 break;
3953 }
3954 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3955 bounce.addr = addr;
3956 bounce.len = l;
3957 if (!is_write) {
3958 cpu_physical_memory_read(addr, bounce.buffer, l);
3959 }
3960
3961 *plen = l;
3962 return bounce.buffer;
3963 }
3964 if (!todo) {
3965 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3966 }
3967
3968 len -= l;
3969 addr += l;
3970 todo += l;
3971 }
3972 rlen = todo;
3973 ret = qemu_ram_ptr_length(raddr, &rlen);
3974 *plen = rlen;
3975 return ret;
3976 }
3977
3978 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3979 * Will also mark the memory as dirty if is_write == 1. access_len gives
3980 * the amount of memory that was actually read or written by the caller.
3981 */
3982 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3983 int is_write, target_phys_addr_t access_len)
3984 {
3985 if (buffer != bounce.buffer) {
3986 if (is_write) {
3987 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3988 while (access_len) {
3989 unsigned l;
3990 l = TARGET_PAGE_SIZE;
3991 if (l > access_len)
3992 l = access_len;
3993 if (!cpu_physical_memory_is_dirty(addr1)) {
3994 /* invalidate code */
3995 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3996 /* set dirty bit */
3997 cpu_physical_memory_set_dirty_flags(
3998 addr1, (0xff & ~CODE_DIRTY_FLAG));
3999 }
4000 addr1 += l;
4001 access_len -= l;
4002 }
4003 }
4004 if (xen_enabled()) {
4005 xen_invalidate_map_cache_entry(buffer);
4006 }
4007 return;
4008 }
4009 if (is_write) {
4010 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4011 }
4012 qemu_vfree(bounce.buffer);
4013 bounce.buffer = NULL;
4014 cpu_notify_map_clients();
4015 }
4016
4017 /* warning: addr must be aligned */
4018 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4019 enum device_endian endian)
4020 {
4021 int io_index;
4022 uint8_t *ptr;
4023 uint32_t val;
4024 unsigned long pd;
4025 PhysPageDesc p;
4026
4027 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4028 pd = p.phys_offset;
4029
4030 if (!is_ram_rom_romd(pd)) {
4031 /* I/O case */
4032 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4033 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4034 val = io_mem_read(io_index, addr, 4);
4035 #if defined(TARGET_WORDS_BIGENDIAN)
4036 if (endian == DEVICE_LITTLE_ENDIAN) {
4037 val = bswap32(val);
4038 }
4039 #else
4040 if (endian == DEVICE_BIG_ENDIAN) {
4041 val = bswap32(val);
4042 }
4043 #endif
4044 } else {
4045 /* RAM case */
4046 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4047 (addr & ~TARGET_PAGE_MASK);
4048 switch (endian) {
4049 case DEVICE_LITTLE_ENDIAN:
4050 val = ldl_le_p(ptr);
4051 break;
4052 case DEVICE_BIG_ENDIAN:
4053 val = ldl_be_p(ptr);
4054 break;
4055 default:
4056 val = ldl_p(ptr);
4057 break;
4058 }
4059 }
4060 return val;
4061 }
4062
4063 uint32_t ldl_phys(target_phys_addr_t addr)
4064 {
4065 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4066 }
4067
4068 uint32_t ldl_le_phys(target_phys_addr_t addr)
4069 {
4070 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4071 }
4072
4073 uint32_t ldl_be_phys(target_phys_addr_t addr)
4074 {
4075 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4076 }
4077
4078 /* warning: addr must be aligned */
4079 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4080 enum device_endian endian)
4081 {
4082 int io_index;
4083 uint8_t *ptr;
4084 uint64_t val;
4085 unsigned long pd;
4086 PhysPageDesc p;
4087
4088 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4089 pd = p.phys_offset;
4090
4091 if (!is_ram_rom_romd(pd)) {
4092 /* I/O case */
4093 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4094 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4095
4096 /* XXX This is broken when device endian != cpu endian.
4097 Fix and add "endian" variable check */
4098 #ifdef TARGET_WORDS_BIGENDIAN
4099 val = io_mem_read(io_index, addr, 4) << 32;
4100 val |= io_mem_read(io_index, addr + 4, 4);
4101 #else
4102 val = io_mem_read(io_index, addr, 4);
4103 val |= io_mem_read(io_index, addr + 4, 4) << 32;
4104 #endif
4105 } else {
4106 /* RAM case */
4107 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4108 (addr & ~TARGET_PAGE_MASK);
4109 switch (endian) {
4110 case DEVICE_LITTLE_ENDIAN:
4111 val = ldq_le_p(ptr);
4112 break;
4113 case DEVICE_BIG_ENDIAN:
4114 val = ldq_be_p(ptr);
4115 break;
4116 default:
4117 val = ldq_p(ptr);
4118 break;
4119 }
4120 }
4121 return val;
4122 }
4123
4124 uint64_t ldq_phys(target_phys_addr_t addr)
4125 {
4126 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4127 }
4128
4129 uint64_t ldq_le_phys(target_phys_addr_t addr)
4130 {
4131 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4132 }
4133
4134 uint64_t ldq_be_phys(target_phys_addr_t addr)
4135 {
4136 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4137 }
4138
4139 /* XXX: optimize */
4140 uint32_t ldub_phys(target_phys_addr_t addr)
4141 {
4142 uint8_t val;
4143 cpu_physical_memory_read(addr, &val, 1);
4144 return val;
4145 }
4146
4147 /* warning: addr must be aligned */
4148 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4149 enum device_endian endian)
4150 {
4151 int io_index;
4152 uint8_t *ptr;
4153 uint64_t val;
4154 unsigned long pd;
4155 PhysPageDesc p;
4156
4157 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4158 pd = p.phys_offset;
4159
4160 if (!is_ram_rom_romd(pd)) {
4161 /* I/O case */
4162 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4163 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4164 val = io_mem_read(io_index, addr, 2);
4165 #if defined(TARGET_WORDS_BIGENDIAN)
4166 if (endian == DEVICE_LITTLE_ENDIAN) {
4167 val = bswap16(val);
4168 }
4169 #else
4170 if (endian == DEVICE_BIG_ENDIAN) {
4171 val = bswap16(val);
4172 }
4173 #endif
4174 } else {
4175 /* RAM case */
4176 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4177 (addr & ~TARGET_PAGE_MASK);
4178 switch (endian) {
4179 case DEVICE_LITTLE_ENDIAN:
4180 val = lduw_le_p(ptr);
4181 break;
4182 case DEVICE_BIG_ENDIAN:
4183 val = lduw_be_p(ptr);
4184 break;
4185 default:
4186 val = lduw_p(ptr);
4187 break;
4188 }
4189 }
4190 return val;
4191 }
4192
4193 uint32_t lduw_phys(target_phys_addr_t addr)
4194 {
4195 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4196 }
4197
4198 uint32_t lduw_le_phys(target_phys_addr_t addr)
4199 {
4200 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4201 }
4202
4203 uint32_t lduw_be_phys(target_phys_addr_t addr)
4204 {
4205 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4206 }
4207
4208 /* warning: addr must be aligned. The ram page is not masked as dirty
4209 and the code inside is not invalidated. It is useful if the dirty
4210 bits are used to track modified PTEs */
4211 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4212 {
4213 int io_index;
4214 uint8_t *ptr;
4215 unsigned long pd;
4216 PhysPageDesc p;
4217
4218 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4219 pd = p.phys_offset;
4220
4221 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4222 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4223 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4224 io_mem_write(io_index, addr, val, 4);
4225 } else {
4226 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4227 ptr = qemu_get_ram_ptr(addr1);
4228 stl_p(ptr, val);
4229
4230 if (unlikely(in_migration)) {
4231 if (!cpu_physical_memory_is_dirty(addr1)) {
4232 /* invalidate code */
4233 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4234 /* set dirty bit */
4235 cpu_physical_memory_set_dirty_flags(
4236 addr1, (0xff & ~CODE_DIRTY_FLAG));
4237 }
4238 }
4239 }
4240 }
4241
4242 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4243 {
4244 int io_index;
4245 uint8_t *ptr;
4246 unsigned long pd;
4247 PhysPageDesc p;
4248
4249 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4250 pd = p.phys_offset;
4251
4252 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4253 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4254 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4255 #ifdef TARGET_WORDS_BIGENDIAN
4256 io_mem_write(io_index, addr, val >> 32, 4);
4257 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
4258 #else
4259 io_mem_write(io_index, addr, (uint32_t)val, 4);
4260 io_mem_write(io_index, addr + 4, val >> 32, 4);
4261 #endif
4262 } else {
4263 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4264 (addr & ~TARGET_PAGE_MASK);
4265 stq_p(ptr, val);
4266 }
4267 }
4268
4269 /* warning: addr must be aligned */
4270 static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4271 enum device_endian endian)
4272 {
4273 int io_index;
4274 uint8_t *ptr;
4275 unsigned long pd;
4276 PhysPageDesc p;
4277
4278 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4279 pd = p.phys_offset;
4280
4281 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4282 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4283 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4284 #if defined(TARGET_WORDS_BIGENDIAN)
4285 if (endian == DEVICE_LITTLE_ENDIAN) {
4286 val = bswap32(val);
4287 }
4288 #else
4289 if (endian == DEVICE_BIG_ENDIAN) {
4290 val = bswap32(val);
4291 }
4292 #endif
4293 io_mem_write(io_index, addr, val, 4);
4294 } else {
4295 unsigned long addr1;
4296 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4297 /* RAM case */
4298 ptr = qemu_get_ram_ptr(addr1);
4299 switch (endian) {
4300 case DEVICE_LITTLE_ENDIAN:
4301 stl_le_p(ptr, val);
4302 break;
4303 case DEVICE_BIG_ENDIAN:
4304 stl_be_p(ptr, val);
4305 break;
4306 default:
4307 stl_p(ptr, val);
4308 break;
4309 }
4310 if (!cpu_physical_memory_is_dirty(addr1)) {
4311 /* invalidate code */
4312 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4313 /* set dirty bit */
4314 cpu_physical_memory_set_dirty_flags(addr1,
4315 (0xff & ~CODE_DIRTY_FLAG));
4316 }
4317 }
4318 }
4319
4320 void stl_phys(target_phys_addr_t addr, uint32_t val)
4321 {
4322 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4323 }
4324
4325 void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4326 {
4327 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4328 }
4329
4330 void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4331 {
4332 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4333 }
4334
4335 /* XXX: optimize */
4336 void stb_phys(target_phys_addr_t addr, uint32_t val)
4337 {
4338 uint8_t v = val;
4339 cpu_physical_memory_write(addr, &v, 1);
4340 }
4341
4342 /* warning: addr must be aligned */
4343 static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4344 enum device_endian endian)
4345 {
4346 int io_index;
4347 uint8_t *ptr;
4348 unsigned long pd;
4349 PhysPageDesc p;
4350
4351 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4352 pd = p.phys_offset;
4353
4354 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4355 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4356 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4357 #if defined(TARGET_WORDS_BIGENDIAN)
4358 if (endian == DEVICE_LITTLE_ENDIAN) {
4359 val = bswap16(val);
4360 }
4361 #else
4362 if (endian == DEVICE_BIG_ENDIAN) {
4363 val = bswap16(val);
4364 }
4365 #endif
4366 io_mem_write(io_index, addr, val, 2);
4367 } else {
4368 unsigned long addr1;
4369 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4370 /* RAM case */
4371 ptr = qemu_get_ram_ptr(addr1);
4372 switch (endian) {
4373 case DEVICE_LITTLE_ENDIAN:
4374 stw_le_p(ptr, val);
4375 break;
4376 case DEVICE_BIG_ENDIAN:
4377 stw_be_p(ptr, val);
4378 break;
4379 default:
4380 stw_p(ptr, val);
4381 break;
4382 }
4383 if (!cpu_physical_memory_is_dirty(addr1)) {
4384 /* invalidate code */
4385 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4386 /* set dirty bit */
4387 cpu_physical_memory_set_dirty_flags(addr1,
4388 (0xff & ~CODE_DIRTY_FLAG));
4389 }
4390 }
4391 }
4392
4393 void stw_phys(target_phys_addr_t addr, uint32_t val)
4394 {
4395 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4396 }
4397
4398 void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4399 {
4400 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4401 }
4402
4403 void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4404 {
4405 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4406 }
4407
4408 /* XXX: optimize */
4409 void stq_phys(target_phys_addr_t addr, uint64_t val)
4410 {
4411 val = tswap64(val);
4412 cpu_physical_memory_write(addr, &val, 8);
4413 }
4414
4415 void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4416 {
4417 val = cpu_to_le64(val);
4418 cpu_physical_memory_write(addr, &val, 8);
4419 }
4420
4421 void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4422 {
4423 val = cpu_to_be64(val);
4424 cpu_physical_memory_write(addr, &val, 8);
4425 }
4426
4427 /* virtual memory access for debug (includes writing to ROM) */
4428 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4429 uint8_t *buf, int len, int is_write)
4430 {
4431 int l;
4432 target_phys_addr_t phys_addr;
4433 target_ulong page;
4434
4435 while (len > 0) {
4436 page = addr & TARGET_PAGE_MASK;
4437 phys_addr = cpu_get_phys_page_debug(env, page);
4438 /* if no physical page mapped, return an error */
4439 if (phys_addr == -1)
4440 return -1;
4441 l = (page + TARGET_PAGE_SIZE) - addr;
4442 if (l > len)
4443 l = len;
4444 phys_addr += (addr & ~TARGET_PAGE_MASK);
4445 if (is_write)
4446 cpu_physical_memory_write_rom(phys_addr, buf, l);
4447 else
4448 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4449 len -= l;
4450 buf += l;
4451 addr += l;
4452 }
4453 return 0;
4454 }
4455 #endif
4456
4457 /* in deterministic execution mode, instructions doing device I/Os
4458 must be at the end of the TB */
4459 void cpu_io_recompile(CPUState *env, void *retaddr)
4460 {
4461 TranslationBlock *tb;
4462 uint32_t n, cflags;
4463 target_ulong pc, cs_base;
4464 uint64_t flags;
4465
4466 tb = tb_find_pc((unsigned long)retaddr);
4467 if (!tb) {
4468 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4469 retaddr);
4470 }
4471 n = env->icount_decr.u16.low + tb->icount;
4472 cpu_restore_state(tb, env, (unsigned long)retaddr);
4473 /* Calculate how many instructions had been executed before the fault
4474 occurred. */
4475 n = n - env->icount_decr.u16.low;
4476 /* Generate a new TB ending on the I/O insn. */
4477 n++;
4478 /* On MIPS and SH, delay slot instructions can only be restarted if
4479 they were already the first instruction in the TB. If this is not
4480 the first instruction in a TB then re-execute the preceding
4481 branch. */
4482 #if defined(TARGET_MIPS)
4483 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4484 env->active_tc.PC -= 4;
4485 env->icount_decr.u16.low++;
4486 env->hflags &= ~MIPS_HFLAG_BMASK;
4487 }
4488 #elif defined(TARGET_SH4)
4489 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4490 && n > 1) {
4491 env->pc -= 2;
4492 env->icount_decr.u16.low++;
4493 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4494 }
4495 #endif
4496 /* This should never happen. */
4497 if (n > CF_COUNT_MASK)
4498 cpu_abort(env, "TB too big during recompile");
4499
4500 cflags = n | CF_LAST_IO;
4501 pc = tb->pc;
4502 cs_base = tb->cs_base;
4503 flags = tb->flags;
4504 tb_phys_invalidate(tb, -1);
4505 /* FIXME: In theory this could raise an exception. In practice
4506 we have already translated the block once so it's probably ok. */
4507 tb_gen_code(env, pc, cs_base, flags, cflags);
4508 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4509 the first in the TB) then we end up generating a whole new TB and
4510 repeating the fault, which is horribly inefficient.
4511 Better would be to execute just this insn uncached, or generate a
4512 second new TB. */
4513 cpu_resume_from_signal(env, NULL);
4514 }
4515
4516 #if !defined(CONFIG_USER_ONLY)
4517
4518 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4519 {
4520 int i, target_code_size, max_target_code_size;
4521 int direct_jmp_count, direct_jmp2_count, cross_page;
4522 TranslationBlock *tb;
4523
4524 target_code_size = 0;
4525 max_target_code_size = 0;
4526 cross_page = 0;
4527 direct_jmp_count = 0;
4528 direct_jmp2_count = 0;
4529 for(i = 0; i < nb_tbs; i++) {
4530 tb = &tbs[i];
4531 target_code_size += tb->size;
4532 if (tb->size > max_target_code_size)
4533 max_target_code_size = tb->size;
4534 if (tb->page_addr[1] != -1)
4535 cross_page++;
4536 if (tb->tb_next_offset[0] != 0xffff) {
4537 direct_jmp_count++;
4538 if (tb->tb_next_offset[1] != 0xffff) {
4539 direct_jmp2_count++;
4540 }
4541 }
4542 }
4543 /* XXX: avoid using doubles ? */
4544 cpu_fprintf(f, "Translation buffer state:\n");
4545 cpu_fprintf(f, "gen code size %td/%ld\n",
4546 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4547 cpu_fprintf(f, "TB count %d/%d\n",
4548 nb_tbs, code_gen_max_blocks);
4549 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4550 nb_tbs ? target_code_size / nb_tbs : 0,
4551 max_target_code_size);
4552 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4553 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4554 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4555 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4556 cross_page,
4557 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4558 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4559 direct_jmp_count,
4560 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4561 direct_jmp2_count,
4562 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4563 cpu_fprintf(f, "\nStatistics:\n");
4564 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4565 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4566 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4567 tcg_dump_info(f, cpu_fprintf);
4568 }
4569
4570 /* NOTE: this function can trigger an exception */
4571 /* NOTE2: the returned address is not exactly the physical address: it
4572 is the offset relative to phys_ram_base */
4573 tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4574 {
4575 int mmu_idx, page_index, pd;
4576 void *p;
4577
4578 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4579 mmu_idx = cpu_mmu_index(env1);
4580 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4581 (addr & TARGET_PAGE_MASK))) {
4582 ldub_code(addr);
4583 }
4584 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
4585 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
4586 && !is_romd(pd)) {
4587 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4588 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4589 #else
4590 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4591 #endif
4592 }
4593 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4594 return qemu_ram_addr_from_host_nofail(p);
4595 }
4596
4597 /*
4598 * A helper function for the _utterly broken_ virtio device model to find out if
4599 * it's running on a big endian machine. Don't do this at home kids!
4600 */
4601 bool virtio_is_big_endian(void);
4602 bool virtio_is_big_endian(void)
4603 {
4604 #if defined(TARGET_WORDS_BIGENDIAN)
4605 return true;
4606 #else
4607 return false;
4608 #endif
4609 }
4610
4611 #define MMUSUFFIX _cmmu
4612 #undef GETPC
4613 #define GETPC() NULL
4614 #define env cpu_single_env
4615 #define SOFTMMU_CODE_ACCESS
4616
4617 #define SHIFT 0
4618 #include "softmmu_template.h"
4619
4620 #define SHIFT 1
4621 #include "softmmu_template.h"
4622
4623 #define SHIFT 2
4624 #include "softmmu_template.h"
4625
4626 #define SHIFT 3
4627 #include "softmmu_template.h"
4628
4629 #undef env
4630
4631 #endif