]> git.proxmox.com Git - mirror_qemu.git/blob - exec.c
memory: make phys_page_find() return a MemoryRegionSection
[mirror_qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "tcg.h"
30 #include "hw/hw.h"
31 #include "hw/qdev.h"
32 #include "osdep.h"
33 #include "kvm.h"
34 #include "hw/xen.h"
35 #include "qemu-timer.h"
36 #include "memory.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
39 #include <qemu.h>
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
45 #include <sys/time.h>
46 #include <sys/proc.h>
47 #include <machine/profile.h>
48 #define _KERNEL
49 #include <sys/user.h>
50 #undef _KERNEL
51 #undef sigqueue
52 #include <libutil.h>
53 #endif
54 #endif
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
57 #include "trace.h"
58 #endif
59
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
62
63 //#define DEBUG_TB_INVALIDATE
64 //#define DEBUG_FLUSH
65 //#define DEBUG_TLB
66 //#define DEBUG_UNASSIGNED
67
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
71
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
74
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
77 #undef DEBUG_TB_CHECK
78 #endif
79
80 #define SMC_BITMAP_USE_THRESHOLD 10
81
82 static TranslationBlock *tbs;
83 static int code_gen_max_blocks;
84 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 static int nb_tbs;
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
96 #elif defined(_WIN32)
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
100 #else
101 #define code_gen_section \
102 __attribute__((aligned (32)))
103 #endif
104
105 uint8_t code_gen_prologue[1024] code_gen_section;
106 static uint8_t *code_gen_buffer;
107 static unsigned long code_gen_buffer_size;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size;
110 static uint8_t *code_gen_ptr;
111
112 #if !defined(CONFIG_USER_ONLY)
113 int phys_ram_fd;
114 static int in_migration;
115
116 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
117
118 static MemoryRegion *system_memory;
119 static MemoryRegion *system_io;
120
121 MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
122 static MemoryRegion io_mem_subpage_ram;
123
124 #endif
125
126 CPUState *first_cpu;
127 /* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
129 DEFINE_TLS(CPUState *,cpu_single_env);
130 /* 0 = Do not count executed instructions.
131 1 = Precise instruction counting.
132 2 = Adaptive rate instruction counting. */
133 int use_icount = 0;
134
135 typedef struct PageDesc {
136 /* list of TBs intersecting this ram page */
137 TranslationBlock *first_tb;
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142 #if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144 #endif
145 } PageDesc;
146
147 /* In system mode we want L1_MAP to be based on ram offsets,
148 while in user mode we want it to be based on virtual addresses. */
149 #if !defined(CONFIG_USER_ONLY)
150 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152 #else
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
154 #endif
155 #else
156 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
157 #endif
158
159 /* Size of the L2 (and L3, etc) page tables. */
160 #define L2_BITS 10
161 #define L2_SIZE (1 << L2_BITS)
162
163 #define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
166 /* The bits remaining after N lower levels of page tables. */
167 #define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
170 #if V_L1_BITS_REM < 4
171 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172 #else
173 #define V_L1_BITS V_L1_BITS_REM
174 #endif
175
176 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
178 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
180 unsigned long qemu_real_host_page_size;
181 unsigned long qemu_host_page_size;
182 unsigned long qemu_host_page_mask;
183
184 /* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186 static void *l1_map[V_L1_SIZE];
187
188 #if !defined(CONFIG_USER_ONLY)
189 typedef struct PhysPageEntry PhysPageEntry;
190
191 static MemoryRegionSection *phys_sections;
192 static unsigned phys_sections_nb, phys_sections_nb_alloc;
193 static uint16_t phys_section_unassigned;
194
195 struct PhysPageEntry {
196 union {
197 uint16_t leaf; /* index into phys_sections */
198 uint16_t node; /* index into phys_map_nodes */
199 } u;
200 };
201
202 /* Simple allocator for PhysPageEntry nodes */
203 static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
204 static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
205
206 #define PHYS_MAP_NODE_NIL ((uint16_t)~0)
207
208 /* This is a multi-level map on the physical address space.
209 The bottom level has pointers to MemoryRegionSections. */
210 static PhysPageEntry phys_map = { .u.node = PHYS_MAP_NODE_NIL };
211
212 static void io_mem_init(void);
213 static void memory_map_init(void);
214
215 /* io memory support */
216 MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
217 static char io_mem_used[IO_MEM_NB_ENTRIES];
218 static MemoryRegion io_mem_watch;
219 #endif
220
221 /* log support */
222 #ifdef WIN32
223 static const char *logfilename = "qemu.log";
224 #else
225 static const char *logfilename = "/tmp/qemu.log";
226 #endif
227 FILE *logfile;
228 int loglevel;
229 static int log_append = 0;
230
231 /* statistics */
232 #if !defined(CONFIG_USER_ONLY)
233 static int tlb_flush_count;
234 #endif
235 static int tb_flush_count;
236 static int tb_phys_invalidate_count;
237
238 #ifdef _WIN32
239 static void map_exec(void *addr, long size)
240 {
241 DWORD old_protect;
242 VirtualProtect(addr, size,
243 PAGE_EXECUTE_READWRITE, &old_protect);
244
245 }
246 #else
247 static void map_exec(void *addr, long size)
248 {
249 unsigned long start, end, page_size;
250
251 page_size = getpagesize();
252 start = (unsigned long)addr;
253 start &= ~(page_size - 1);
254
255 end = (unsigned long)addr + size;
256 end += page_size - 1;
257 end &= ~(page_size - 1);
258
259 mprotect((void *)start, end - start,
260 PROT_READ | PROT_WRITE | PROT_EXEC);
261 }
262 #endif
263
264 static void page_init(void)
265 {
266 /* NOTE: we can always suppose that qemu_host_page_size >=
267 TARGET_PAGE_SIZE */
268 #ifdef _WIN32
269 {
270 SYSTEM_INFO system_info;
271
272 GetSystemInfo(&system_info);
273 qemu_real_host_page_size = system_info.dwPageSize;
274 }
275 #else
276 qemu_real_host_page_size = getpagesize();
277 #endif
278 if (qemu_host_page_size == 0)
279 qemu_host_page_size = qemu_real_host_page_size;
280 if (qemu_host_page_size < TARGET_PAGE_SIZE)
281 qemu_host_page_size = TARGET_PAGE_SIZE;
282 qemu_host_page_mask = ~(qemu_host_page_size - 1);
283
284 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
285 {
286 #ifdef HAVE_KINFO_GETVMMAP
287 struct kinfo_vmentry *freep;
288 int i, cnt;
289
290 freep = kinfo_getvmmap(getpid(), &cnt);
291 if (freep) {
292 mmap_lock();
293 for (i = 0; i < cnt; i++) {
294 unsigned long startaddr, endaddr;
295
296 startaddr = freep[i].kve_start;
297 endaddr = freep[i].kve_end;
298 if (h2g_valid(startaddr)) {
299 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
300
301 if (h2g_valid(endaddr)) {
302 endaddr = h2g(endaddr);
303 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
304 } else {
305 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
306 endaddr = ~0ul;
307 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
308 #endif
309 }
310 }
311 }
312 free(freep);
313 mmap_unlock();
314 }
315 #else
316 FILE *f;
317
318 last_brk = (unsigned long)sbrk(0);
319
320 f = fopen("/compat/linux/proc/self/maps", "r");
321 if (f) {
322 mmap_lock();
323
324 do {
325 unsigned long startaddr, endaddr;
326 int n;
327
328 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
329
330 if (n == 2 && h2g_valid(startaddr)) {
331 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
332
333 if (h2g_valid(endaddr)) {
334 endaddr = h2g(endaddr);
335 } else {
336 endaddr = ~0ul;
337 }
338 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
339 }
340 } while (!feof(f));
341
342 fclose(f);
343 mmap_unlock();
344 }
345 #endif
346 }
347 #endif
348 }
349
350 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
351 {
352 PageDesc *pd;
353 void **lp;
354 int i;
355
356 #if defined(CONFIG_USER_ONLY)
357 /* We can't use g_malloc because it may recurse into a locked mutex. */
358 # define ALLOC(P, SIZE) \
359 do { \
360 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
361 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
362 } while (0)
363 #else
364 # define ALLOC(P, SIZE) \
365 do { P = g_malloc0(SIZE); } while (0)
366 #endif
367
368 /* Level 1. Always allocated. */
369 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
370
371 /* Level 2..N-1. */
372 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
373 void **p = *lp;
374
375 if (p == NULL) {
376 if (!alloc) {
377 return NULL;
378 }
379 ALLOC(p, sizeof(void *) * L2_SIZE);
380 *lp = p;
381 }
382
383 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
384 }
385
386 pd = *lp;
387 if (pd == NULL) {
388 if (!alloc) {
389 return NULL;
390 }
391 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
392 *lp = pd;
393 }
394
395 #undef ALLOC
396
397 return pd + (index & (L2_SIZE - 1));
398 }
399
400 static inline PageDesc *page_find(tb_page_addr_t index)
401 {
402 return page_find_alloc(index, 0);
403 }
404
405 #if !defined(CONFIG_USER_ONLY)
406
407 static PhysPageEntry *phys_map_node_alloc(uint16_t *ptr)
408 {
409 unsigned i;
410 uint16_t ret;
411
412 /* Assign early to avoid the pointer being invalidated by g_renew() */
413 *ptr = ret = phys_map_nodes_nb++;
414 assert(ret != PHYS_MAP_NODE_NIL);
415 if (ret == phys_map_nodes_nb_alloc) {
416 typedef PhysPageEntry Node[L2_SIZE];
417 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
418 phys_map_nodes = g_renew(Node, phys_map_nodes,
419 phys_map_nodes_nb_alloc);
420 }
421 for (i = 0; i < L2_SIZE; ++i) {
422 phys_map_nodes[ret][i].u.node = PHYS_MAP_NODE_NIL;
423 }
424 return phys_map_nodes[ret];
425 }
426
427 static void phys_map_nodes_reset(void)
428 {
429 phys_map_nodes_nb = 0;
430 }
431
432 static uint16_t *phys_page_find_alloc(target_phys_addr_t index, int alloc)
433 {
434 PhysPageEntry *lp, *p;
435 int i, j;
436
437 lp = &phys_map;
438
439 /* Level 1..N. */
440 for (i = P_L2_LEVELS - 1; i >= 0; i--) {
441 if (lp->u.node == PHYS_MAP_NODE_NIL) {
442 if (!alloc) {
443 return NULL;
444 }
445 p = phys_map_node_alloc(&lp->u.node);
446 if (i == 0) {
447 for (j = 0; j < L2_SIZE; j++) {
448 p[j].u.leaf = phys_section_unassigned;
449 }
450 }
451 } else {
452 p = phys_map_nodes[lp->u.node];
453 }
454 lp = &p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
455 }
456
457 return &lp->u.leaf;
458 }
459
460 static MemoryRegionSection phys_page_find(target_phys_addr_t index)
461 {
462 uint16_t *p = phys_page_find_alloc(index, 0);
463 uint16_t s_index = phys_section_unassigned;
464 MemoryRegionSection section;
465 target_phys_addr_t delta;
466
467 if (p) {
468 s_index = *p;
469 }
470 section = phys_sections[s_index];
471 index <<= TARGET_PAGE_BITS;
472 assert(section.offset_within_address_space <= index
473 && index <= section.offset_within_address_space + section.size-1);
474 delta = index - section.offset_within_address_space;
475 section.offset_within_address_space += delta;
476 section.offset_within_region += delta;
477 section.size -= delta;
478 return section;
479 }
480
481 static void tlb_protect_code(ram_addr_t ram_addr);
482 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
483 target_ulong vaddr);
484 #define mmap_lock() do { } while(0)
485 #define mmap_unlock() do { } while(0)
486 #endif
487
488 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
489
490 #if defined(CONFIG_USER_ONLY)
491 /* Currently it is not recommended to allocate big chunks of data in
492 user mode. It will change when a dedicated libc will be used */
493 #define USE_STATIC_CODE_GEN_BUFFER
494 #endif
495
496 #ifdef USE_STATIC_CODE_GEN_BUFFER
497 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
498 __attribute__((aligned (CODE_GEN_ALIGN)));
499 #endif
500
501 static void code_gen_alloc(unsigned long tb_size)
502 {
503 #ifdef USE_STATIC_CODE_GEN_BUFFER
504 code_gen_buffer = static_code_gen_buffer;
505 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
506 map_exec(code_gen_buffer, code_gen_buffer_size);
507 #else
508 code_gen_buffer_size = tb_size;
509 if (code_gen_buffer_size == 0) {
510 #if defined(CONFIG_USER_ONLY)
511 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
512 #else
513 /* XXX: needs adjustments */
514 code_gen_buffer_size = (unsigned long)(ram_size / 4);
515 #endif
516 }
517 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
518 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
519 /* The code gen buffer location may have constraints depending on
520 the host cpu and OS */
521 #if defined(__linux__)
522 {
523 int flags;
524 void *start = NULL;
525
526 flags = MAP_PRIVATE | MAP_ANONYMOUS;
527 #if defined(__x86_64__)
528 flags |= MAP_32BIT;
529 /* Cannot map more than that */
530 if (code_gen_buffer_size > (800 * 1024 * 1024))
531 code_gen_buffer_size = (800 * 1024 * 1024);
532 #elif defined(__sparc_v9__)
533 // Map the buffer below 2G, so we can use direct calls and branches
534 flags |= MAP_FIXED;
535 start = (void *) 0x60000000UL;
536 if (code_gen_buffer_size > (512 * 1024 * 1024))
537 code_gen_buffer_size = (512 * 1024 * 1024);
538 #elif defined(__arm__)
539 /* Keep the buffer no bigger than 16MB to branch between blocks */
540 if (code_gen_buffer_size > 16 * 1024 * 1024)
541 code_gen_buffer_size = 16 * 1024 * 1024;
542 #elif defined(__s390x__)
543 /* Map the buffer so that we can use direct calls and branches. */
544 /* We have a +- 4GB range on the branches; leave some slop. */
545 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
546 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
547 }
548 start = (void *)0x90000000UL;
549 #endif
550 code_gen_buffer = mmap(start, code_gen_buffer_size,
551 PROT_WRITE | PROT_READ | PROT_EXEC,
552 flags, -1, 0);
553 if (code_gen_buffer == MAP_FAILED) {
554 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
555 exit(1);
556 }
557 }
558 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
559 || defined(__DragonFly__) || defined(__OpenBSD__) \
560 || defined(__NetBSD__)
561 {
562 int flags;
563 void *addr = NULL;
564 flags = MAP_PRIVATE | MAP_ANONYMOUS;
565 #if defined(__x86_64__)
566 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
567 * 0x40000000 is free */
568 flags |= MAP_FIXED;
569 addr = (void *)0x40000000;
570 /* Cannot map more than that */
571 if (code_gen_buffer_size > (800 * 1024 * 1024))
572 code_gen_buffer_size = (800 * 1024 * 1024);
573 #elif defined(__sparc_v9__)
574 // Map the buffer below 2G, so we can use direct calls and branches
575 flags |= MAP_FIXED;
576 addr = (void *) 0x60000000UL;
577 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
578 code_gen_buffer_size = (512 * 1024 * 1024);
579 }
580 #endif
581 code_gen_buffer = mmap(addr, code_gen_buffer_size,
582 PROT_WRITE | PROT_READ | PROT_EXEC,
583 flags, -1, 0);
584 if (code_gen_buffer == MAP_FAILED) {
585 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
586 exit(1);
587 }
588 }
589 #else
590 code_gen_buffer = g_malloc(code_gen_buffer_size);
591 map_exec(code_gen_buffer, code_gen_buffer_size);
592 #endif
593 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
594 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
595 code_gen_buffer_max_size = code_gen_buffer_size -
596 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
597 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
598 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
599 }
600
601 /* Must be called before using the QEMU cpus. 'tb_size' is the size
602 (in bytes) allocated to the translation buffer. Zero means default
603 size. */
604 void tcg_exec_init(unsigned long tb_size)
605 {
606 cpu_gen_init();
607 code_gen_alloc(tb_size);
608 code_gen_ptr = code_gen_buffer;
609 page_init();
610 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
611 /* There's no guest base to take into account, so go ahead and
612 initialize the prologue now. */
613 tcg_prologue_init(&tcg_ctx);
614 #endif
615 }
616
617 bool tcg_enabled(void)
618 {
619 return code_gen_buffer != NULL;
620 }
621
622 void cpu_exec_init_all(void)
623 {
624 #if !defined(CONFIG_USER_ONLY)
625 memory_map_init();
626 io_mem_init();
627 #endif
628 }
629
630 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
631
632 static int cpu_common_post_load(void *opaque, int version_id)
633 {
634 CPUState *env = opaque;
635
636 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
637 version_id is increased. */
638 env->interrupt_request &= ~0x01;
639 tlb_flush(env, 1);
640
641 return 0;
642 }
643
644 static const VMStateDescription vmstate_cpu_common = {
645 .name = "cpu_common",
646 .version_id = 1,
647 .minimum_version_id = 1,
648 .minimum_version_id_old = 1,
649 .post_load = cpu_common_post_load,
650 .fields = (VMStateField []) {
651 VMSTATE_UINT32(halted, CPUState),
652 VMSTATE_UINT32(interrupt_request, CPUState),
653 VMSTATE_END_OF_LIST()
654 }
655 };
656 #endif
657
658 CPUState *qemu_get_cpu(int cpu)
659 {
660 CPUState *env = first_cpu;
661
662 while (env) {
663 if (env->cpu_index == cpu)
664 break;
665 env = env->next_cpu;
666 }
667
668 return env;
669 }
670
671 void cpu_exec_init(CPUState *env)
672 {
673 CPUState **penv;
674 int cpu_index;
675
676 #if defined(CONFIG_USER_ONLY)
677 cpu_list_lock();
678 #endif
679 env->next_cpu = NULL;
680 penv = &first_cpu;
681 cpu_index = 0;
682 while (*penv != NULL) {
683 penv = &(*penv)->next_cpu;
684 cpu_index++;
685 }
686 env->cpu_index = cpu_index;
687 env->numa_node = 0;
688 QTAILQ_INIT(&env->breakpoints);
689 QTAILQ_INIT(&env->watchpoints);
690 #ifndef CONFIG_USER_ONLY
691 env->thread_id = qemu_get_thread_id();
692 #endif
693 *penv = env;
694 #if defined(CONFIG_USER_ONLY)
695 cpu_list_unlock();
696 #endif
697 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
698 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
699 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
700 cpu_save, cpu_load, env);
701 #endif
702 }
703
704 /* Allocate a new translation block. Flush the translation buffer if
705 too many translation blocks or too much generated code. */
706 static TranslationBlock *tb_alloc(target_ulong pc)
707 {
708 TranslationBlock *tb;
709
710 if (nb_tbs >= code_gen_max_blocks ||
711 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
712 return NULL;
713 tb = &tbs[nb_tbs++];
714 tb->pc = pc;
715 tb->cflags = 0;
716 return tb;
717 }
718
719 void tb_free(TranslationBlock *tb)
720 {
721 /* In practice this is mostly used for single use temporary TB
722 Ignore the hard cases and just back up if this TB happens to
723 be the last one generated. */
724 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
725 code_gen_ptr = tb->tc_ptr;
726 nb_tbs--;
727 }
728 }
729
730 static inline void invalidate_page_bitmap(PageDesc *p)
731 {
732 if (p->code_bitmap) {
733 g_free(p->code_bitmap);
734 p->code_bitmap = NULL;
735 }
736 p->code_write_count = 0;
737 }
738
739 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
740
741 static void page_flush_tb_1 (int level, void **lp)
742 {
743 int i;
744
745 if (*lp == NULL) {
746 return;
747 }
748 if (level == 0) {
749 PageDesc *pd = *lp;
750 for (i = 0; i < L2_SIZE; ++i) {
751 pd[i].first_tb = NULL;
752 invalidate_page_bitmap(pd + i);
753 }
754 } else {
755 void **pp = *lp;
756 for (i = 0; i < L2_SIZE; ++i) {
757 page_flush_tb_1 (level - 1, pp + i);
758 }
759 }
760 }
761
762 static void page_flush_tb(void)
763 {
764 int i;
765 for (i = 0; i < V_L1_SIZE; i++) {
766 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
767 }
768 }
769
770 /* flush all the translation blocks */
771 /* XXX: tb_flush is currently not thread safe */
772 void tb_flush(CPUState *env1)
773 {
774 CPUState *env;
775 #if defined(DEBUG_FLUSH)
776 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
777 (unsigned long)(code_gen_ptr - code_gen_buffer),
778 nb_tbs, nb_tbs > 0 ?
779 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
780 #endif
781 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
782 cpu_abort(env1, "Internal error: code buffer overflow\n");
783
784 nb_tbs = 0;
785
786 for(env = first_cpu; env != NULL; env = env->next_cpu) {
787 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
788 }
789
790 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
791 page_flush_tb();
792
793 code_gen_ptr = code_gen_buffer;
794 /* XXX: flush processor icache at this point if cache flush is
795 expensive */
796 tb_flush_count++;
797 }
798
799 #ifdef DEBUG_TB_CHECK
800
801 static void tb_invalidate_check(target_ulong address)
802 {
803 TranslationBlock *tb;
804 int i;
805 address &= TARGET_PAGE_MASK;
806 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
807 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
808 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
809 address >= tb->pc + tb->size)) {
810 printf("ERROR invalidate: address=" TARGET_FMT_lx
811 " PC=%08lx size=%04x\n",
812 address, (long)tb->pc, tb->size);
813 }
814 }
815 }
816 }
817
818 /* verify that all the pages have correct rights for code */
819 static void tb_page_check(void)
820 {
821 TranslationBlock *tb;
822 int i, flags1, flags2;
823
824 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
825 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
826 flags1 = page_get_flags(tb->pc);
827 flags2 = page_get_flags(tb->pc + tb->size - 1);
828 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
829 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
830 (long)tb->pc, tb->size, flags1, flags2);
831 }
832 }
833 }
834 }
835
836 #endif
837
838 /* invalidate one TB */
839 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
840 int next_offset)
841 {
842 TranslationBlock *tb1;
843 for(;;) {
844 tb1 = *ptb;
845 if (tb1 == tb) {
846 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
847 break;
848 }
849 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
850 }
851 }
852
853 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
854 {
855 TranslationBlock *tb1;
856 unsigned int n1;
857
858 for(;;) {
859 tb1 = *ptb;
860 n1 = (long)tb1 & 3;
861 tb1 = (TranslationBlock *)((long)tb1 & ~3);
862 if (tb1 == tb) {
863 *ptb = tb1->page_next[n1];
864 break;
865 }
866 ptb = &tb1->page_next[n1];
867 }
868 }
869
870 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
871 {
872 TranslationBlock *tb1, **ptb;
873 unsigned int n1;
874
875 ptb = &tb->jmp_next[n];
876 tb1 = *ptb;
877 if (tb1) {
878 /* find tb(n) in circular list */
879 for(;;) {
880 tb1 = *ptb;
881 n1 = (long)tb1 & 3;
882 tb1 = (TranslationBlock *)((long)tb1 & ~3);
883 if (n1 == n && tb1 == tb)
884 break;
885 if (n1 == 2) {
886 ptb = &tb1->jmp_first;
887 } else {
888 ptb = &tb1->jmp_next[n1];
889 }
890 }
891 /* now we can suppress tb(n) from the list */
892 *ptb = tb->jmp_next[n];
893
894 tb->jmp_next[n] = NULL;
895 }
896 }
897
898 /* reset the jump entry 'n' of a TB so that it is not chained to
899 another TB */
900 static inline void tb_reset_jump(TranslationBlock *tb, int n)
901 {
902 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
903 }
904
905 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
906 {
907 CPUState *env;
908 PageDesc *p;
909 unsigned int h, n1;
910 tb_page_addr_t phys_pc;
911 TranslationBlock *tb1, *tb2;
912
913 /* remove the TB from the hash list */
914 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
915 h = tb_phys_hash_func(phys_pc);
916 tb_remove(&tb_phys_hash[h], tb,
917 offsetof(TranslationBlock, phys_hash_next));
918
919 /* remove the TB from the page list */
920 if (tb->page_addr[0] != page_addr) {
921 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
922 tb_page_remove(&p->first_tb, tb);
923 invalidate_page_bitmap(p);
924 }
925 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
926 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
927 tb_page_remove(&p->first_tb, tb);
928 invalidate_page_bitmap(p);
929 }
930
931 tb_invalidated_flag = 1;
932
933 /* remove the TB from the hash list */
934 h = tb_jmp_cache_hash_func(tb->pc);
935 for(env = first_cpu; env != NULL; env = env->next_cpu) {
936 if (env->tb_jmp_cache[h] == tb)
937 env->tb_jmp_cache[h] = NULL;
938 }
939
940 /* suppress this TB from the two jump lists */
941 tb_jmp_remove(tb, 0);
942 tb_jmp_remove(tb, 1);
943
944 /* suppress any remaining jumps to this TB */
945 tb1 = tb->jmp_first;
946 for(;;) {
947 n1 = (long)tb1 & 3;
948 if (n1 == 2)
949 break;
950 tb1 = (TranslationBlock *)((long)tb1 & ~3);
951 tb2 = tb1->jmp_next[n1];
952 tb_reset_jump(tb1, n1);
953 tb1->jmp_next[n1] = NULL;
954 tb1 = tb2;
955 }
956 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
957
958 tb_phys_invalidate_count++;
959 }
960
961 static inline void set_bits(uint8_t *tab, int start, int len)
962 {
963 int end, mask, end1;
964
965 end = start + len;
966 tab += start >> 3;
967 mask = 0xff << (start & 7);
968 if ((start & ~7) == (end & ~7)) {
969 if (start < end) {
970 mask &= ~(0xff << (end & 7));
971 *tab |= mask;
972 }
973 } else {
974 *tab++ |= mask;
975 start = (start + 8) & ~7;
976 end1 = end & ~7;
977 while (start < end1) {
978 *tab++ = 0xff;
979 start += 8;
980 }
981 if (start < end) {
982 mask = ~(0xff << (end & 7));
983 *tab |= mask;
984 }
985 }
986 }
987
988 static void build_page_bitmap(PageDesc *p)
989 {
990 int n, tb_start, tb_end;
991 TranslationBlock *tb;
992
993 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
994
995 tb = p->first_tb;
996 while (tb != NULL) {
997 n = (long)tb & 3;
998 tb = (TranslationBlock *)((long)tb & ~3);
999 /* NOTE: this is subtle as a TB may span two physical pages */
1000 if (n == 0) {
1001 /* NOTE: tb_end may be after the end of the page, but
1002 it is not a problem */
1003 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1004 tb_end = tb_start + tb->size;
1005 if (tb_end > TARGET_PAGE_SIZE)
1006 tb_end = TARGET_PAGE_SIZE;
1007 } else {
1008 tb_start = 0;
1009 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1010 }
1011 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1012 tb = tb->page_next[n];
1013 }
1014 }
1015
1016 TranslationBlock *tb_gen_code(CPUState *env,
1017 target_ulong pc, target_ulong cs_base,
1018 int flags, int cflags)
1019 {
1020 TranslationBlock *tb;
1021 uint8_t *tc_ptr;
1022 tb_page_addr_t phys_pc, phys_page2;
1023 target_ulong virt_page2;
1024 int code_gen_size;
1025
1026 phys_pc = get_page_addr_code(env, pc);
1027 tb = tb_alloc(pc);
1028 if (!tb) {
1029 /* flush must be done */
1030 tb_flush(env);
1031 /* cannot fail at this point */
1032 tb = tb_alloc(pc);
1033 /* Don't forget to invalidate previous TB info. */
1034 tb_invalidated_flag = 1;
1035 }
1036 tc_ptr = code_gen_ptr;
1037 tb->tc_ptr = tc_ptr;
1038 tb->cs_base = cs_base;
1039 tb->flags = flags;
1040 tb->cflags = cflags;
1041 cpu_gen_code(env, tb, &code_gen_size);
1042 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1043
1044 /* check next page if needed */
1045 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1046 phys_page2 = -1;
1047 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1048 phys_page2 = get_page_addr_code(env, virt_page2);
1049 }
1050 tb_link_page(tb, phys_pc, phys_page2);
1051 return tb;
1052 }
1053
1054 /* invalidate all TBs which intersect with the target physical page
1055 starting in range [start;end[. NOTE: start and end must refer to
1056 the same physical page. 'is_cpu_write_access' should be true if called
1057 from a real cpu write access: the virtual CPU will exit the current
1058 TB if code is modified inside this TB. */
1059 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1060 int is_cpu_write_access)
1061 {
1062 TranslationBlock *tb, *tb_next, *saved_tb;
1063 CPUState *env = cpu_single_env;
1064 tb_page_addr_t tb_start, tb_end;
1065 PageDesc *p;
1066 int n;
1067 #ifdef TARGET_HAS_PRECISE_SMC
1068 int current_tb_not_found = is_cpu_write_access;
1069 TranslationBlock *current_tb = NULL;
1070 int current_tb_modified = 0;
1071 target_ulong current_pc = 0;
1072 target_ulong current_cs_base = 0;
1073 int current_flags = 0;
1074 #endif /* TARGET_HAS_PRECISE_SMC */
1075
1076 p = page_find(start >> TARGET_PAGE_BITS);
1077 if (!p)
1078 return;
1079 if (!p->code_bitmap &&
1080 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1081 is_cpu_write_access) {
1082 /* build code bitmap */
1083 build_page_bitmap(p);
1084 }
1085
1086 /* we remove all the TBs in the range [start, end[ */
1087 /* XXX: see if in some cases it could be faster to invalidate all the code */
1088 tb = p->first_tb;
1089 while (tb != NULL) {
1090 n = (long)tb & 3;
1091 tb = (TranslationBlock *)((long)tb & ~3);
1092 tb_next = tb->page_next[n];
1093 /* NOTE: this is subtle as a TB may span two physical pages */
1094 if (n == 0) {
1095 /* NOTE: tb_end may be after the end of the page, but
1096 it is not a problem */
1097 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1098 tb_end = tb_start + tb->size;
1099 } else {
1100 tb_start = tb->page_addr[1];
1101 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1102 }
1103 if (!(tb_end <= start || tb_start >= end)) {
1104 #ifdef TARGET_HAS_PRECISE_SMC
1105 if (current_tb_not_found) {
1106 current_tb_not_found = 0;
1107 current_tb = NULL;
1108 if (env->mem_io_pc) {
1109 /* now we have a real cpu fault */
1110 current_tb = tb_find_pc(env->mem_io_pc);
1111 }
1112 }
1113 if (current_tb == tb &&
1114 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1115 /* If we are modifying the current TB, we must stop
1116 its execution. We could be more precise by checking
1117 that the modification is after the current PC, but it
1118 would require a specialized function to partially
1119 restore the CPU state */
1120
1121 current_tb_modified = 1;
1122 cpu_restore_state(current_tb, env, env->mem_io_pc);
1123 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1124 &current_flags);
1125 }
1126 #endif /* TARGET_HAS_PRECISE_SMC */
1127 /* we need to do that to handle the case where a signal
1128 occurs while doing tb_phys_invalidate() */
1129 saved_tb = NULL;
1130 if (env) {
1131 saved_tb = env->current_tb;
1132 env->current_tb = NULL;
1133 }
1134 tb_phys_invalidate(tb, -1);
1135 if (env) {
1136 env->current_tb = saved_tb;
1137 if (env->interrupt_request && env->current_tb)
1138 cpu_interrupt(env, env->interrupt_request);
1139 }
1140 }
1141 tb = tb_next;
1142 }
1143 #if !defined(CONFIG_USER_ONLY)
1144 /* if no code remaining, no need to continue to use slow writes */
1145 if (!p->first_tb) {
1146 invalidate_page_bitmap(p);
1147 if (is_cpu_write_access) {
1148 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1149 }
1150 }
1151 #endif
1152 #ifdef TARGET_HAS_PRECISE_SMC
1153 if (current_tb_modified) {
1154 /* we generate a block containing just the instruction
1155 modifying the memory. It will ensure that it cannot modify
1156 itself */
1157 env->current_tb = NULL;
1158 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1159 cpu_resume_from_signal(env, NULL);
1160 }
1161 #endif
1162 }
1163
1164 /* len must be <= 8 and start must be a multiple of len */
1165 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1166 {
1167 PageDesc *p;
1168 int offset, b;
1169 #if 0
1170 if (1) {
1171 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1172 cpu_single_env->mem_io_vaddr, len,
1173 cpu_single_env->eip,
1174 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1175 }
1176 #endif
1177 p = page_find(start >> TARGET_PAGE_BITS);
1178 if (!p)
1179 return;
1180 if (p->code_bitmap) {
1181 offset = start & ~TARGET_PAGE_MASK;
1182 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1183 if (b & ((1 << len) - 1))
1184 goto do_invalidate;
1185 } else {
1186 do_invalidate:
1187 tb_invalidate_phys_page_range(start, start + len, 1);
1188 }
1189 }
1190
1191 #if !defined(CONFIG_SOFTMMU)
1192 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1193 unsigned long pc, void *puc)
1194 {
1195 TranslationBlock *tb;
1196 PageDesc *p;
1197 int n;
1198 #ifdef TARGET_HAS_PRECISE_SMC
1199 TranslationBlock *current_tb = NULL;
1200 CPUState *env = cpu_single_env;
1201 int current_tb_modified = 0;
1202 target_ulong current_pc = 0;
1203 target_ulong current_cs_base = 0;
1204 int current_flags = 0;
1205 #endif
1206
1207 addr &= TARGET_PAGE_MASK;
1208 p = page_find(addr >> TARGET_PAGE_BITS);
1209 if (!p)
1210 return;
1211 tb = p->first_tb;
1212 #ifdef TARGET_HAS_PRECISE_SMC
1213 if (tb && pc != 0) {
1214 current_tb = tb_find_pc(pc);
1215 }
1216 #endif
1217 while (tb != NULL) {
1218 n = (long)tb & 3;
1219 tb = (TranslationBlock *)((long)tb & ~3);
1220 #ifdef TARGET_HAS_PRECISE_SMC
1221 if (current_tb == tb &&
1222 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1223 /* If we are modifying the current TB, we must stop
1224 its execution. We could be more precise by checking
1225 that the modification is after the current PC, but it
1226 would require a specialized function to partially
1227 restore the CPU state */
1228
1229 current_tb_modified = 1;
1230 cpu_restore_state(current_tb, env, pc);
1231 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1232 &current_flags);
1233 }
1234 #endif /* TARGET_HAS_PRECISE_SMC */
1235 tb_phys_invalidate(tb, addr);
1236 tb = tb->page_next[n];
1237 }
1238 p->first_tb = NULL;
1239 #ifdef TARGET_HAS_PRECISE_SMC
1240 if (current_tb_modified) {
1241 /* we generate a block containing just the instruction
1242 modifying the memory. It will ensure that it cannot modify
1243 itself */
1244 env->current_tb = NULL;
1245 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1246 cpu_resume_from_signal(env, puc);
1247 }
1248 #endif
1249 }
1250 #endif
1251
1252 /* add the tb in the target page and protect it if necessary */
1253 static inline void tb_alloc_page(TranslationBlock *tb,
1254 unsigned int n, tb_page_addr_t page_addr)
1255 {
1256 PageDesc *p;
1257 #ifndef CONFIG_USER_ONLY
1258 bool page_already_protected;
1259 #endif
1260
1261 tb->page_addr[n] = page_addr;
1262 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1263 tb->page_next[n] = p->first_tb;
1264 #ifndef CONFIG_USER_ONLY
1265 page_already_protected = p->first_tb != NULL;
1266 #endif
1267 p->first_tb = (TranslationBlock *)((long)tb | n);
1268 invalidate_page_bitmap(p);
1269
1270 #if defined(TARGET_HAS_SMC) || 1
1271
1272 #if defined(CONFIG_USER_ONLY)
1273 if (p->flags & PAGE_WRITE) {
1274 target_ulong addr;
1275 PageDesc *p2;
1276 int prot;
1277
1278 /* force the host page as non writable (writes will have a
1279 page fault + mprotect overhead) */
1280 page_addr &= qemu_host_page_mask;
1281 prot = 0;
1282 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1283 addr += TARGET_PAGE_SIZE) {
1284
1285 p2 = page_find (addr >> TARGET_PAGE_BITS);
1286 if (!p2)
1287 continue;
1288 prot |= p2->flags;
1289 p2->flags &= ~PAGE_WRITE;
1290 }
1291 mprotect(g2h(page_addr), qemu_host_page_size,
1292 (prot & PAGE_BITS) & ~PAGE_WRITE);
1293 #ifdef DEBUG_TB_INVALIDATE
1294 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1295 page_addr);
1296 #endif
1297 }
1298 #else
1299 /* if some code is already present, then the pages are already
1300 protected. So we handle the case where only the first TB is
1301 allocated in a physical page */
1302 if (!page_already_protected) {
1303 tlb_protect_code(page_addr);
1304 }
1305 #endif
1306
1307 #endif /* TARGET_HAS_SMC */
1308 }
1309
1310 /* add a new TB and link it to the physical page tables. phys_page2 is
1311 (-1) to indicate that only one page contains the TB. */
1312 void tb_link_page(TranslationBlock *tb,
1313 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1314 {
1315 unsigned int h;
1316 TranslationBlock **ptb;
1317
1318 /* Grab the mmap lock to stop another thread invalidating this TB
1319 before we are done. */
1320 mmap_lock();
1321 /* add in the physical hash table */
1322 h = tb_phys_hash_func(phys_pc);
1323 ptb = &tb_phys_hash[h];
1324 tb->phys_hash_next = *ptb;
1325 *ptb = tb;
1326
1327 /* add in the page list */
1328 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1329 if (phys_page2 != -1)
1330 tb_alloc_page(tb, 1, phys_page2);
1331 else
1332 tb->page_addr[1] = -1;
1333
1334 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1335 tb->jmp_next[0] = NULL;
1336 tb->jmp_next[1] = NULL;
1337
1338 /* init original jump addresses */
1339 if (tb->tb_next_offset[0] != 0xffff)
1340 tb_reset_jump(tb, 0);
1341 if (tb->tb_next_offset[1] != 0xffff)
1342 tb_reset_jump(tb, 1);
1343
1344 #ifdef DEBUG_TB_CHECK
1345 tb_page_check();
1346 #endif
1347 mmap_unlock();
1348 }
1349
1350 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1351 tb[1].tc_ptr. Return NULL if not found */
1352 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1353 {
1354 int m_min, m_max, m;
1355 unsigned long v;
1356 TranslationBlock *tb;
1357
1358 if (nb_tbs <= 0)
1359 return NULL;
1360 if (tc_ptr < (unsigned long)code_gen_buffer ||
1361 tc_ptr >= (unsigned long)code_gen_ptr)
1362 return NULL;
1363 /* binary search (cf Knuth) */
1364 m_min = 0;
1365 m_max = nb_tbs - 1;
1366 while (m_min <= m_max) {
1367 m = (m_min + m_max) >> 1;
1368 tb = &tbs[m];
1369 v = (unsigned long)tb->tc_ptr;
1370 if (v == tc_ptr)
1371 return tb;
1372 else if (tc_ptr < v) {
1373 m_max = m - 1;
1374 } else {
1375 m_min = m + 1;
1376 }
1377 }
1378 return &tbs[m_max];
1379 }
1380
1381 static void tb_reset_jump_recursive(TranslationBlock *tb);
1382
1383 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1384 {
1385 TranslationBlock *tb1, *tb_next, **ptb;
1386 unsigned int n1;
1387
1388 tb1 = tb->jmp_next[n];
1389 if (tb1 != NULL) {
1390 /* find head of list */
1391 for(;;) {
1392 n1 = (long)tb1 & 3;
1393 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1394 if (n1 == 2)
1395 break;
1396 tb1 = tb1->jmp_next[n1];
1397 }
1398 /* we are now sure now that tb jumps to tb1 */
1399 tb_next = tb1;
1400
1401 /* remove tb from the jmp_first list */
1402 ptb = &tb_next->jmp_first;
1403 for(;;) {
1404 tb1 = *ptb;
1405 n1 = (long)tb1 & 3;
1406 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1407 if (n1 == n && tb1 == tb)
1408 break;
1409 ptb = &tb1->jmp_next[n1];
1410 }
1411 *ptb = tb->jmp_next[n];
1412 tb->jmp_next[n] = NULL;
1413
1414 /* suppress the jump to next tb in generated code */
1415 tb_reset_jump(tb, n);
1416
1417 /* suppress jumps in the tb on which we could have jumped */
1418 tb_reset_jump_recursive(tb_next);
1419 }
1420 }
1421
1422 static void tb_reset_jump_recursive(TranslationBlock *tb)
1423 {
1424 tb_reset_jump_recursive2(tb, 0);
1425 tb_reset_jump_recursive2(tb, 1);
1426 }
1427
1428 #if defined(TARGET_HAS_ICE)
1429 #if defined(CONFIG_USER_ONLY)
1430 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1431 {
1432 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1433 }
1434 #else
1435 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1436 {
1437 target_phys_addr_t addr;
1438 ram_addr_t ram_addr;
1439 MemoryRegionSection section;
1440
1441 addr = cpu_get_phys_page_debug(env, pc);
1442 section = phys_page_find(addr >> TARGET_PAGE_BITS);
1443 if (!(memory_region_is_ram(section.mr)
1444 || (section.mr->rom_device && section.mr->readable))) {
1445 return;
1446 }
1447 ram_addr = (memory_region_get_ram_addr(section.mr)
1448 + section.offset_within_region) & TARGET_PAGE_MASK;
1449 ram_addr |= (pc & ~TARGET_PAGE_MASK);
1450 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1451 }
1452 #endif
1453 #endif /* TARGET_HAS_ICE */
1454
1455 #if defined(CONFIG_USER_ONLY)
1456 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1457
1458 {
1459 }
1460
1461 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1462 int flags, CPUWatchpoint **watchpoint)
1463 {
1464 return -ENOSYS;
1465 }
1466 #else
1467 /* Add a watchpoint. */
1468 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1469 int flags, CPUWatchpoint **watchpoint)
1470 {
1471 target_ulong len_mask = ~(len - 1);
1472 CPUWatchpoint *wp;
1473
1474 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1475 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1476 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1477 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1478 return -EINVAL;
1479 }
1480 wp = g_malloc(sizeof(*wp));
1481
1482 wp->vaddr = addr;
1483 wp->len_mask = len_mask;
1484 wp->flags = flags;
1485
1486 /* keep all GDB-injected watchpoints in front */
1487 if (flags & BP_GDB)
1488 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1489 else
1490 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1491
1492 tlb_flush_page(env, addr);
1493
1494 if (watchpoint)
1495 *watchpoint = wp;
1496 return 0;
1497 }
1498
1499 /* Remove a specific watchpoint. */
1500 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1501 int flags)
1502 {
1503 target_ulong len_mask = ~(len - 1);
1504 CPUWatchpoint *wp;
1505
1506 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1507 if (addr == wp->vaddr && len_mask == wp->len_mask
1508 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1509 cpu_watchpoint_remove_by_ref(env, wp);
1510 return 0;
1511 }
1512 }
1513 return -ENOENT;
1514 }
1515
1516 /* Remove a specific watchpoint by reference. */
1517 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1518 {
1519 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1520
1521 tlb_flush_page(env, watchpoint->vaddr);
1522
1523 g_free(watchpoint);
1524 }
1525
1526 /* Remove all matching watchpoints. */
1527 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1528 {
1529 CPUWatchpoint *wp, *next;
1530
1531 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1532 if (wp->flags & mask)
1533 cpu_watchpoint_remove_by_ref(env, wp);
1534 }
1535 }
1536 #endif
1537
1538 /* Add a breakpoint. */
1539 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1540 CPUBreakpoint **breakpoint)
1541 {
1542 #if defined(TARGET_HAS_ICE)
1543 CPUBreakpoint *bp;
1544
1545 bp = g_malloc(sizeof(*bp));
1546
1547 bp->pc = pc;
1548 bp->flags = flags;
1549
1550 /* keep all GDB-injected breakpoints in front */
1551 if (flags & BP_GDB)
1552 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1553 else
1554 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1555
1556 breakpoint_invalidate(env, pc);
1557
1558 if (breakpoint)
1559 *breakpoint = bp;
1560 return 0;
1561 #else
1562 return -ENOSYS;
1563 #endif
1564 }
1565
1566 /* Remove a specific breakpoint. */
1567 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1568 {
1569 #if defined(TARGET_HAS_ICE)
1570 CPUBreakpoint *bp;
1571
1572 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1573 if (bp->pc == pc && bp->flags == flags) {
1574 cpu_breakpoint_remove_by_ref(env, bp);
1575 return 0;
1576 }
1577 }
1578 return -ENOENT;
1579 #else
1580 return -ENOSYS;
1581 #endif
1582 }
1583
1584 /* Remove a specific breakpoint by reference. */
1585 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1586 {
1587 #if defined(TARGET_HAS_ICE)
1588 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1589
1590 breakpoint_invalidate(env, breakpoint->pc);
1591
1592 g_free(breakpoint);
1593 #endif
1594 }
1595
1596 /* Remove all matching breakpoints. */
1597 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1598 {
1599 #if defined(TARGET_HAS_ICE)
1600 CPUBreakpoint *bp, *next;
1601
1602 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1603 if (bp->flags & mask)
1604 cpu_breakpoint_remove_by_ref(env, bp);
1605 }
1606 #endif
1607 }
1608
1609 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1610 CPU loop after each instruction */
1611 void cpu_single_step(CPUState *env, int enabled)
1612 {
1613 #if defined(TARGET_HAS_ICE)
1614 if (env->singlestep_enabled != enabled) {
1615 env->singlestep_enabled = enabled;
1616 if (kvm_enabled())
1617 kvm_update_guest_debug(env, 0);
1618 else {
1619 /* must flush all the translated code to avoid inconsistencies */
1620 /* XXX: only flush what is necessary */
1621 tb_flush(env);
1622 }
1623 }
1624 #endif
1625 }
1626
1627 /* enable or disable low levels log */
1628 void cpu_set_log(int log_flags)
1629 {
1630 loglevel = log_flags;
1631 if (loglevel && !logfile) {
1632 logfile = fopen(logfilename, log_append ? "a" : "w");
1633 if (!logfile) {
1634 perror(logfilename);
1635 _exit(1);
1636 }
1637 #if !defined(CONFIG_SOFTMMU)
1638 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1639 {
1640 static char logfile_buf[4096];
1641 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1642 }
1643 #elif defined(_WIN32)
1644 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1645 setvbuf(logfile, NULL, _IONBF, 0);
1646 #else
1647 setvbuf(logfile, NULL, _IOLBF, 0);
1648 #endif
1649 log_append = 1;
1650 }
1651 if (!loglevel && logfile) {
1652 fclose(logfile);
1653 logfile = NULL;
1654 }
1655 }
1656
1657 void cpu_set_log_filename(const char *filename)
1658 {
1659 logfilename = strdup(filename);
1660 if (logfile) {
1661 fclose(logfile);
1662 logfile = NULL;
1663 }
1664 cpu_set_log(loglevel);
1665 }
1666
1667 static void cpu_unlink_tb(CPUState *env)
1668 {
1669 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1670 problem and hope the cpu will stop of its own accord. For userspace
1671 emulation this often isn't actually as bad as it sounds. Often
1672 signals are used primarily to interrupt blocking syscalls. */
1673 TranslationBlock *tb;
1674 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1675
1676 spin_lock(&interrupt_lock);
1677 tb = env->current_tb;
1678 /* if the cpu is currently executing code, we must unlink it and
1679 all the potentially executing TB */
1680 if (tb) {
1681 env->current_tb = NULL;
1682 tb_reset_jump_recursive(tb);
1683 }
1684 spin_unlock(&interrupt_lock);
1685 }
1686
1687 #ifndef CONFIG_USER_ONLY
1688 /* mask must never be zero, except for A20 change call */
1689 static void tcg_handle_interrupt(CPUState *env, int mask)
1690 {
1691 int old_mask;
1692
1693 old_mask = env->interrupt_request;
1694 env->interrupt_request |= mask;
1695
1696 /*
1697 * If called from iothread context, wake the target cpu in
1698 * case its halted.
1699 */
1700 if (!qemu_cpu_is_self(env)) {
1701 qemu_cpu_kick(env);
1702 return;
1703 }
1704
1705 if (use_icount) {
1706 env->icount_decr.u16.high = 0xffff;
1707 if (!can_do_io(env)
1708 && (mask & ~old_mask) != 0) {
1709 cpu_abort(env, "Raised interrupt while not in I/O function");
1710 }
1711 } else {
1712 cpu_unlink_tb(env);
1713 }
1714 }
1715
1716 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1717
1718 #else /* CONFIG_USER_ONLY */
1719
1720 void cpu_interrupt(CPUState *env, int mask)
1721 {
1722 env->interrupt_request |= mask;
1723 cpu_unlink_tb(env);
1724 }
1725 #endif /* CONFIG_USER_ONLY */
1726
1727 void cpu_reset_interrupt(CPUState *env, int mask)
1728 {
1729 env->interrupt_request &= ~mask;
1730 }
1731
1732 void cpu_exit(CPUState *env)
1733 {
1734 env->exit_request = 1;
1735 cpu_unlink_tb(env);
1736 }
1737
1738 const CPULogItem cpu_log_items[] = {
1739 { CPU_LOG_TB_OUT_ASM, "out_asm",
1740 "show generated host assembly code for each compiled TB" },
1741 { CPU_LOG_TB_IN_ASM, "in_asm",
1742 "show target assembly code for each compiled TB" },
1743 { CPU_LOG_TB_OP, "op",
1744 "show micro ops for each compiled TB" },
1745 { CPU_LOG_TB_OP_OPT, "op_opt",
1746 "show micro ops "
1747 #ifdef TARGET_I386
1748 "before eflags optimization and "
1749 #endif
1750 "after liveness analysis" },
1751 { CPU_LOG_INT, "int",
1752 "show interrupts/exceptions in short format" },
1753 { CPU_LOG_EXEC, "exec",
1754 "show trace before each executed TB (lots of logs)" },
1755 { CPU_LOG_TB_CPU, "cpu",
1756 "show CPU state before block translation" },
1757 #ifdef TARGET_I386
1758 { CPU_LOG_PCALL, "pcall",
1759 "show protected mode far calls/returns/exceptions" },
1760 { CPU_LOG_RESET, "cpu_reset",
1761 "show CPU state before CPU resets" },
1762 #endif
1763 #ifdef DEBUG_IOPORT
1764 { CPU_LOG_IOPORT, "ioport",
1765 "show all i/o ports accesses" },
1766 #endif
1767 { 0, NULL, NULL },
1768 };
1769
1770 static int cmp1(const char *s1, int n, const char *s2)
1771 {
1772 if (strlen(s2) != n)
1773 return 0;
1774 return memcmp(s1, s2, n) == 0;
1775 }
1776
1777 /* takes a comma separated list of log masks. Return 0 if error. */
1778 int cpu_str_to_log_mask(const char *str)
1779 {
1780 const CPULogItem *item;
1781 int mask;
1782 const char *p, *p1;
1783
1784 p = str;
1785 mask = 0;
1786 for(;;) {
1787 p1 = strchr(p, ',');
1788 if (!p1)
1789 p1 = p + strlen(p);
1790 if(cmp1(p,p1-p,"all")) {
1791 for(item = cpu_log_items; item->mask != 0; item++) {
1792 mask |= item->mask;
1793 }
1794 } else {
1795 for(item = cpu_log_items; item->mask != 0; item++) {
1796 if (cmp1(p, p1 - p, item->name))
1797 goto found;
1798 }
1799 return 0;
1800 }
1801 found:
1802 mask |= item->mask;
1803 if (*p1 != ',')
1804 break;
1805 p = p1 + 1;
1806 }
1807 return mask;
1808 }
1809
1810 void cpu_abort(CPUState *env, const char *fmt, ...)
1811 {
1812 va_list ap;
1813 va_list ap2;
1814
1815 va_start(ap, fmt);
1816 va_copy(ap2, ap);
1817 fprintf(stderr, "qemu: fatal: ");
1818 vfprintf(stderr, fmt, ap);
1819 fprintf(stderr, "\n");
1820 #ifdef TARGET_I386
1821 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1822 #else
1823 cpu_dump_state(env, stderr, fprintf, 0);
1824 #endif
1825 if (qemu_log_enabled()) {
1826 qemu_log("qemu: fatal: ");
1827 qemu_log_vprintf(fmt, ap2);
1828 qemu_log("\n");
1829 #ifdef TARGET_I386
1830 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1831 #else
1832 log_cpu_state(env, 0);
1833 #endif
1834 qemu_log_flush();
1835 qemu_log_close();
1836 }
1837 va_end(ap2);
1838 va_end(ap);
1839 #if defined(CONFIG_USER_ONLY)
1840 {
1841 struct sigaction act;
1842 sigfillset(&act.sa_mask);
1843 act.sa_handler = SIG_DFL;
1844 sigaction(SIGABRT, &act, NULL);
1845 }
1846 #endif
1847 abort();
1848 }
1849
1850 CPUState *cpu_copy(CPUState *env)
1851 {
1852 CPUState *new_env = cpu_init(env->cpu_model_str);
1853 CPUState *next_cpu = new_env->next_cpu;
1854 int cpu_index = new_env->cpu_index;
1855 #if defined(TARGET_HAS_ICE)
1856 CPUBreakpoint *bp;
1857 CPUWatchpoint *wp;
1858 #endif
1859
1860 memcpy(new_env, env, sizeof(CPUState));
1861
1862 /* Preserve chaining and index. */
1863 new_env->next_cpu = next_cpu;
1864 new_env->cpu_index = cpu_index;
1865
1866 /* Clone all break/watchpoints.
1867 Note: Once we support ptrace with hw-debug register access, make sure
1868 BP_CPU break/watchpoints are handled correctly on clone. */
1869 QTAILQ_INIT(&env->breakpoints);
1870 QTAILQ_INIT(&env->watchpoints);
1871 #if defined(TARGET_HAS_ICE)
1872 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1873 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1874 }
1875 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1876 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1877 wp->flags, NULL);
1878 }
1879 #endif
1880
1881 return new_env;
1882 }
1883
1884 #if !defined(CONFIG_USER_ONLY)
1885
1886 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1887 {
1888 unsigned int i;
1889
1890 /* Discard jump cache entries for any tb which might potentially
1891 overlap the flushed page. */
1892 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1893 memset (&env->tb_jmp_cache[i], 0,
1894 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1895
1896 i = tb_jmp_cache_hash_page(addr);
1897 memset (&env->tb_jmp_cache[i], 0,
1898 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1899 }
1900
1901 static CPUTLBEntry s_cputlb_empty_entry = {
1902 .addr_read = -1,
1903 .addr_write = -1,
1904 .addr_code = -1,
1905 .addend = -1,
1906 };
1907
1908 /* NOTE:
1909 * If flush_global is true (the usual case), flush all tlb entries.
1910 * If flush_global is false, flush (at least) all tlb entries not
1911 * marked global.
1912 *
1913 * Since QEMU doesn't currently implement a global/not-global flag
1914 * for tlb entries, at the moment tlb_flush() will also flush all
1915 * tlb entries in the flush_global == false case. This is OK because
1916 * CPU architectures generally permit an implementation to drop
1917 * entries from the TLB at any time, so flushing more entries than
1918 * required is only an efficiency issue, not a correctness issue.
1919 */
1920 void tlb_flush(CPUState *env, int flush_global)
1921 {
1922 int i;
1923
1924 #if defined(DEBUG_TLB)
1925 printf("tlb_flush:\n");
1926 #endif
1927 /* must reset current TB so that interrupts cannot modify the
1928 links while we are modifying them */
1929 env->current_tb = NULL;
1930
1931 for(i = 0; i < CPU_TLB_SIZE; i++) {
1932 int mmu_idx;
1933 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1934 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1935 }
1936 }
1937
1938 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1939
1940 env->tlb_flush_addr = -1;
1941 env->tlb_flush_mask = 0;
1942 tlb_flush_count++;
1943 }
1944
1945 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1946 {
1947 if (addr == (tlb_entry->addr_read &
1948 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1949 addr == (tlb_entry->addr_write &
1950 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1951 addr == (tlb_entry->addr_code &
1952 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1953 *tlb_entry = s_cputlb_empty_entry;
1954 }
1955 }
1956
1957 void tlb_flush_page(CPUState *env, target_ulong addr)
1958 {
1959 int i;
1960 int mmu_idx;
1961
1962 #if defined(DEBUG_TLB)
1963 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1964 #endif
1965 /* Check if we need to flush due to large pages. */
1966 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1967 #if defined(DEBUG_TLB)
1968 printf("tlb_flush_page: forced full flush ("
1969 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1970 env->tlb_flush_addr, env->tlb_flush_mask);
1971 #endif
1972 tlb_flush(env, 1);
1973 return;
1974 }
1975 /* must reset current TB so that interrupts cannot modify the
1976 links while we are modifying them */
1977 env->current_tb = NULL;
1978
1979 addr &= TARGET_PAGE_MASK;
1980 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1981 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1982 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1983
1984 tlb_flush_jmp_cache(env, addr);
1985 }
1986
1987 /* update the TLBs so that writes to code in the virtual page 'addr'
1988 can be detected */
1989 static void tlb_protect_code(ram_addr_t ram_addr)
1990 {
1991 cpu_physical_memory_reset_dirty(ram_addr,
1992 ram_addr + TARGET_PAGE_SIZE,
1993 CODE_DIRTY_FLAG);
1994 }
1995
1996 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1997 tested for self modifying code */
1998 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1999 target_ulong vaddr)
2000 {
2001 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2002 }
2003
2004 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2005 unsigned long start, unsigned long length)
2006 {
2007 unsigned long addr;
2008 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
2009 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2010 if ((addr - start) < length) {
2011 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2012 }
2013 }
2014 }
2015
2016 /* Note: start and end must be within the same ram block. */
2017 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2018 int dirty_flags)
2019 {
2020 CPUState *env;
2021 unsigned long length, start1;
2022 int i;
2023
2024 start &= TARGET_PAGE_MASK;
2025 end = TARGET_PAGE_ALIGN(end);
2026
2027 length = end - start;
2028 if (length == 0)
2029 return;
2030 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2031
2032 /* we modify the TLB cache so that the dirty bit will be set again
2033 when accessing the range */
2034 start1 = (unsigned long)qemu_safe_ram_ptr(start);
2035 /* Check that we don't span multiple blocks - this breaks the
2036 address comparisons below. */
2037 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
2038 != (end - 1) - start) {
2039 abort();
2040 }
2041
2042 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2043 int mmu_idx;
2044 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2045 for(i = 0; i < CPU_TLB_SIZE; i++)
2046 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2047 start1, length);
2048 }
2049 }
2050 }
2051
2052 int cpu_physical_memory_set_dirty_tracking(int enable)
2053 {
2054 int ret = 0;
2055 in_migration = enable;
2056 return ret;
2057 }
2058
2059 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2060 {
2061 ram_addr_t ram_addr;
2062 void *p;
2063
2064 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
2065 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2066 + tlb_entry->addend);
2067 ram_addr = qemu_ram_addr_from_host_nofail(p);
2068 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2069 tlb_entry->addr_write |= TLB_NOTDIRTY;
2070 }
2071 }
2072 }
2073
2074 /* update the TLB according to the current state of the dirty bits */
2075 void cpu_tlb_update_dirty(CPUState *env)
2076 {
2077 int i;
2078 int mmu_idx;
2079 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2080 for(i = 0; i < CPU_TLB_SIZE; i++)
2081 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2082 }
2083 }
2084
2085 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2086 {
2087 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2088 tlb_entry->addr_write = vaddr;
2089 }
2090
2091 /* update the TLB corresponding to virtual page vaddr
2092 so that it is no longer dirty */
2093 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2094 {
2095 int i;
2096 int mmu_idx;
2097
2098 vaddr &= TARGET_PAGE_MASK;
2099 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2100 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2101 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2102 }
2103
2104 /* Our TLB does not support large pages, so remember the area covered by
2105 large pages and trigger a full TLB flush if these are invalidated. */
2106 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2107 target_ulong size)
2108 {
2109 target_ulong mask = ~(size - 1);
2110
2111 if (env->tlb_flush_addr == (target_ulong)-1) {
2112 env->tlb_flush_addr = vaddr & mask;
2113 env->tlb_flush_mask = mask;
2114 return;
2115 }
2116 /* Extend the existing region to include the new page.
2117 This is a compromise between unnecessary flushes and the cost
2118 of maintaining a full variable size TLB. */
2119 mask &= env->tlb_flush_mask;
2120 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2121 mask <<= 1;
2122 }
2123 env->tlb_flush_addr &= mask;
2124 env->tlb_flush_mask = mask;
2125 }
2126
2127 static bool is_ram_rom(MemoryRegionSection *s)
2128 {
2129 return memory_region_is_ram(s->mr);
2130 }
2131
2132 static bool is_romd(MemoryRegionSection *s)
2133 {
2134 MemoryRegion *mr = s->mr;
2135
2136 return mr->rom_device && mr->readable;
2137 }
2138
2139 static bool is_ram_rom_romd(MemoryRegionSection *s)
2140 {
2141 return is_ram_rom(s) || is_romd(s);
2142 }
2143
2144 /* Add a new TLB entry. At most one entry for a given virtual address
2145 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2146 supplied size is only used by tlb_flush_page. */
2147 void tlb_set_page(CPUState *env, target_ulong vaddr,
2148 target_phys_addr_t paddr, int prot,
2149 int mmu_idx, target_ulong size)
2150 {
2151 MemoryRegionSection section;
2152 unsigned int index;
2153 target_ulong address;
2154 target_ulong code_address;
2155 unsigned long addend;
2156 CPUTLBEntry *te;
2157 CPUWatchpoint *wp;
2158 target_phys_addr_t iotlb;
2159
2160 assert(size >= TARGET_PAGE_SIZE);
2161 if (size != TARGET_PAGE_SIZE) {
2162 tlb_add_large_page(env, vaddr, size);
2163 }
2164 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
2165 #if defined(DEBUG_TLB)
2166 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2167 " prot=%x idx=%d pd=0x%08lx\n",
2168 vaddr, paddr, prot, mmu_idx, pd);
2169 #endif
2170
2171 address = vaddr;
2172 if (!is_ram_rom_romd(&section)) {
2173 /* IO memory case (romd handled later) */
2174 address |= TLB_MMIO;
2175 }
2176 if (is_ram_rom_romd(&section)) {
2177 addend = (unsigned long)(memory_region_get_ram_ptr(section.mr)
2178 + section.offset_within_region);
2179 } else {
2180 addend = 0;
2181 }
2182 if (is_ram_rom(&section)) {
2183 /* Normal RAM. */
2184 iotlb = (memory_region_get_ram_addr(section.mr)
2185 + section.offset_within_region) & TARGET_PAGE_MASK;
2186 if (!section.readonly)
2187 iotlb |= io_mem_notdirty.ram_addr;
2188 else
2189 iotlb |= io_mem_rom.ram_addr;
2190 } else {
2191 /* IO handlers are currently passed a physical address.
2192 It would be nice to pass an offset from the base address
2193 of that region. This would avoid having to special case RAM,
2194 and avoid full address decoding in every device.
2195 We can't use the high bits of pd for this because
2196 IO_MEM_ROMD uses these as a ram address. */
2197 iotlb = memory_region_get_ram_addr(section.mr) & ~TARGET_PAGE_MASK;
2198 iotlb += section.offset_within_region;
2199 }
2200
2201 code_address = address;
2202 /* Make accesses to pages with watchpoints go via the
2203 watchpoint trap routines. */
2204 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2205 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2206 /* Avoid trapping reads of pages with a write breakpoint. */
2207 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2208 iotlb = io_mem_watch.ram_addr + paddr;
2209 address |= TLB_MMIO;
2210 break;
2211 }
2212 }
2213 }
2214
2215 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2216 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2217 te = &env->tlb_table[mmu_idx][index];
2218 te->addend = addend - vaddr;
2219 if (prot & PAGE_READ) {
2220 te->addr_read = address;
2221 } else {
2222 te->addr_read = -1;
2223 }
2224
2225 if (prot & PAGE_EXEC) {
2226 te->addr_code = code_address;
2227 } else {
2228 te->addr_code = -1;
2229 }
2230 if (prot & PAGE_WRITE) {
2231 if ((memory_region_is_ram(section.mr) && section.readonly)
2232 || is_romd(&section)) {
2233 /* Write access calls the I/O callback. */
2234 te->addr_write = address | TLB_MMIO;
2235 } else if (memory_region_is_ram(section.mr)
2236 && !cpu_physical_memory_is_dirty(
2237 section.mr->ram_addr
2238 + section.offset_within_region)) {
2239 te->addr_write = address | TLB_NOTDIRTY;
2240 } else {
2241 te->addr_write = address;
2242 }
2243 } else {
2244 te->addr_write = -1;
2245 }
2246 }
2247
2248 #else
2249
2250 void tlb_flush(CPUState *env, int flush_global)
2251 {
2252 }
2253
2254 void tlb_flush_page(CPUState *env, target_ulong addr)
2255 {
2256 }
2257
2258 /*
2259 * Walks guest process memory "regions" one by one
2260 * and calls callback function 'fn' for each region.
2261 */
2262
2263 struct walk_memory_regions_data
2264 {
2265 walk_memory_regions_fn fn;
2266 void *priv;
2267 unsigned long start;
2268 int prot;
2269 };
2270
2271 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2272 abi_ulong end, int new_prot)
2273 {
2274 if (data->start != -1ul) {
2275 int rc = data->fn(data->priv, data->start, end, data->prot);
2276 if (rc != 0) {
2277 return rc;
2278 }
2279 }
2280
2281 data->start = (new_prot ? end : -1ul);
2282 data->prot = new_prot;
2283
2284 return 0;
2285 }
2286
2287 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2288 abi_ulong base, int level, void **lp)
2289 {
2290 abi_ulong pa;
2291 int i, rc;
2292
2293 if (*lp == NULL) {
2294 return walk_memory_regions_end(data, base, 0);
2295 }
2296
2297 if (level == 0) {
2298 PageDesc *pd = *lp;
2299 for (i = 0; i < L2_SIZE; ++i) {
2300 int prot = pd[i].flags;
2301
2302 pa = base | (i << TARGET_PAGE_BITS);
2303 if (prot != data->prot) {
2304 rc = walk_memory_regions_end(data, pa, prot);
2305 if (rc != 0) {
2306 return rc;
2307 }
2308 }
2309 }
2310 } else {
2311 void **pp = *lp;
2312 for (i = 0; i < L2_SIZE; ++i) {
2313 pa = base | ((abi_ulong)i <<
2314 (TARGET_PAGE_BITS + L2_BITS * level));
2315 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2316 if (rc != 0) {
2317 return rc;
2318 }
2319 }
2320 }
2321
2322 return 0;
2323 }
2324
2325 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2326 {
2327 struct walk_memory_regions_data data;
2328 unsigned long i;
2329
2330 data.fn = fn;
2331 data.priv = priv;
2332 data.start = -1ul;
2333 data.prot = 0;
2334
2335 for (i = 0; i < V_L1_SIZE; i++) {
2336 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2337 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2338 if (rc != 0) {
2339 return rc;
2340 }
2341 }
2342
2343 return walk_memory_regions_end(&data, 0, 0);
2344 }
2345
2346 static int dump_region(void *priv, abi_ulong start,
2347 abi_ulong end, unsigned long prot)
2348 {
2349 FILE *f = (FILE *)priv;
2350
2351 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2352 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2353 start, end, end - start,
2354 ((prot & PAGE_READ) ? 'r' : '-'),
2355 ((prot & PAGE_WRITE) ? 'w' : '-'),
2356 ((prot & PAGE_EXEC) ? 'x' : '-'));
2357
2358 return (0);
2359 }
2360
2361 /* dump memory mappings */
2362 void page_dump(FILE *f)
2363 {
2364 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2365 "start", "end", "size", "prot");
2366 walk_memory_regions(f, dump_region);
2367 }
2368
2369 int page_get_flags(target_ulong address)
2370 {
2371 PageDesc *p;
2372
2373 p = page_find(address >> TARGET_PAGE_BITS);
2374 if (!p)
2375 return 0;
2376 return p->flags;
2377 }
2378
2379 /* Modify the flags of a page and invalidate the code if necessary.
2380 The flag PAGE_WRITE_ORG is positioned automatically depending
2381 on PAGE_WRITE. The mmap_lock should already be held. */
2382 void page_set_flags(target_ulong start, target_ulong end, int flags)
2383 {
2384 target_ulong addr, len;
2385
2386 /* This function should never be called with addresses outside the
2387 guest address space. If this assert fires, it probably indicates
2388 a missing call to h2g_valid. */
2389 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2390 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2391 #endif
2392 assert(start < end);
2393
2394 start = start & TARGET_PAGE_MASK;
2395 end = TARGET_PAGE_ALIGN(end);
2396
2397 if (flags & PAGE_WRITE) {
2398 flags |= PAGE_WRITE_ORG;
2399 }
2400
2401 for (addr = start, len = end - start;
2402 len != 0;
2403 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2404 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2405
2406 /* If the write protection bit is set, then we invalidate
2407 the code inside. */
2408 if (!(p->flags & PAGE_WRITE) &&
2409 (flags & PAGE_WRITE) &&
2410 p->first_tb) {
2411 tb_invalidate_phys_page(addr, 0, NULL);
2412 }
2413 p->flags = flags;
2414 }
2415 }
2416
2417 int page_check_range(target_ulong start, target_ulong len, int flags)
2418 {
2419 PageDesc *p;
2420 target_ulong end;
2421 target_ulong addr;
2422
2423 /* This function should never be called with addresses outside the
2424 guest address space. If this assert fires, it probably indicates
2425 a missing call to h2g_valid. */
2426 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2427 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2428 #endif
2429
2430 if (len == 0) {
2431 return 0;
2432 }
2433 if (start + len - 1 < start) {
2434 /* We've wrapped around. */
2435 return -1;
2436 }
2437
2438 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2439 start = start & TARGET_PAGE_MASK;
2440
2441 for (addr = start, len = end - start;
2442 len != 0;
2443 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2444 p = page_find(addr >> TARGET_PAGE_BITS);
2445 if( !p )
2446 return -1;
2447 if( !(p->flags & PAGE_VALID) )
2448 return -1;
2449
2450 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2451 return -1;
2452 if (flags & PAGE_WRITE) {
2453 if (!(p->flags & PAGE_WRITE_ORG))
2454 return -1;
2455 /* unprotect the page if it was put read-only because it
2456 contains translated code */
2457 if (!(p->flags & PAGE_WRITE)) {
2458 if (!page_unprotect(addr, 0, NULL))
2459 return -1;
2460 }
2461 return 0;
2462 }
2463 }
2464 return 0;
2465 }
2466
2467 /* called from signal handler: invalidate the code and unprotect the
2468 page. Return TRUE if the fault was successfully handled. */
2469 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2470 {
2471 unsigned int prot;
2472 PageDesc *p;
2473 target_ulong host_start, host_end, addr;
2474
2475 /* Technically this isn't safe inside a signal handler. However we
2476 know this only ever happens in a synchronous SEGV handler, so in
2477 practice it seems to be ok. */
2478 mmap_lock();
2479
2480 p = page_find(address >> TARGET_PAGE_BITS);
2481 if (!p) {
2482 mmap_unlock();
2483 return 0;
2484 }
2485
2486 /* if the page was really writable, then we change its
2487 protection back to writable */
2488 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2489 host_start = address & qemu_host_page_mask;
2490 host_end = host_start + qemu_host_page_size;
2491
2492 prot = 0;
2493 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2494 p = page_find(addr >> TARGET_PAGE_BITS);
2495 p->flags |= PAGE_WRITE;
2496 prot |= p->flags;
2497
2498 /* and since the content will be modified, we must invalidate
2499 the corresponding translated code. */
2500 tb_invalidate_phys_page(addr, pc, puc);
2501 #ifdef DEBUG_TB_CHECK
2502 tb_invalidate_check(addr);
2503 #endif
2504 }
2505 mprotect((void *)g2h(host_start), qemu_host_page_size,
2506 prot & PAGE_BITS);
2507
2508 mmap_unlock();
2509 return 1;
2510 }
2511 mmap_unlock();
2512 return 0;
2513 }
2514
2515 static inline void tlb_set_dirty(CPUState *env,
2516 unsigned long addr, target_ulong vaddr)
2517 {
2518 }
2519 #endif /* defined(CONFIG_USER_ONLY) */
2520
2521 #if !defined(CONFIG_USER_ONLY)
2522
2523 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2524 typedef struct subpage_t {
2525 MemoryRegion iomem;
2526 target_phys_addr_t base;
2527 uint16_t sub_section[TARGET_PAGE_SIZE];
2528 } subpage_t;
2529
2530 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2531 uint16_t section);
2532 static subpage_t *subpage_init (target_phys_addr_t base, uint16_t *section,
2533 uint16_t orig_section);
2534 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2535 need_subpage) \
2536 do { \
2537 if (addr > start_addr) \
2538 start_addr2 = 0; \
2539 else { \
2540 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2541 if (start_addr2 > 0) \
2542 need_subpage = 1; \
2543 } \
2544 \
2545 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2546 end_addr2 = TARGET_PAGE_SIZE - 1; \
2547 else { \
2548 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2549 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2550 need_subpage = 1; \
2551 } \
2552 } while (0)
2553
2554 static void destroy_page_desc(uint16_t section_index)
2555 {
2556 MemoryRegionSection *section = &phys_sections[section_index];
2557 MemoryRegion *mr = section->mr;
2558
2559 if (mr->subpage) {
2560 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2561 memory_region_destroy(&subpage->iomem);
2562 g_free(subpage);
2563 }
2564 }
2565
2566 static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
2567 {
2568 unsigned i;
2569 PhysPageEntry *p;
2570
2571 if (lp->u.node == PHYS_MAP_NODE_NIL) {
2572 return;
2573 }
2574
2575 p = phys_map_nodes[lp->u.node];
2576 for (i = 0; i < L2_SIZE; ++i) {
2577 if (level > 0) {
2578 destroy_l2_mapping(&p[i], level - 1);
2579 } else {
2580 destroy_page_desc(p[i].u.leaf);
2581 }
2582 }
2583 lp->u.node = PHYS_MAP_NODE_NIL;
2584 }
2585
2586 static void destroy_all_mappings(void)
2587 {
2588 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
2589 phys_map_nodes_reset();
2590 }
2591
2592 static uint16_t phys_section_add(MemoryRegionSection *section)
2593 {
2594 if (phys_sections_nb == phys_sections_nb_alloc) {
2595 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2596 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2597 phys_sections_nb_alloc);
2598 }
2599 phys_sections[phys_sections_nb] = *section;
2600 return phys_sections_nb++;
2601 }
2602
2603 static void phys_sections_clear(void)
2604 {
2605 phys_sections_nb = 0;
2606 }
2607
2608 /* register physical memory.
2609 For RAM, 'size' must be a multiple of the target page size.
2610 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2611 io memory page. The address used when calling the IO function is
2612 the offset from the start of the region, plus region_offset. Both
2613 start_addr and region_offset are rounded down to a page boundary
2614 before calculating this offset. This should not be a problem unless
2615 the low bits of start_addr and region_offset differ. */
2616 void cpu_register_physical_memory_log(MemoryRegionSection *section,
2617 bool readonly)
2618 {
2619 target_phys_addr_t start_addr = section->offset_within_address_space;
2620 ram_addr_t size = section->size;
2621 target_phys_addr_t addr, end_addr;
2622 ram_addr_t orig_size = size;
2623 subpage_t *subpage;
2624 uint16_t section_index = phys_section_add(section);
2625
2626 assert(size);
2627
2628 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2629 end_addr = start_addr + (target_phys_addr_t)size;
2630
2631 addr = start_addr;
2632 do {
2633 uint16_t *p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2634 uint16_t orig_memory = *p;
2635 target_phys_addr_t start_addr2, end_addr2;
2636 int need_subpage = 0;
2637 MemoryRegion *mr = phys_sections[orig_memory].mr;
2638
2639 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2640 need_subpage);
2641 if (need_subpage) {
2642 if (!(mr->subpage)) {
2643 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2644 p, orig_memory);
2645 } else {
2646 subpage = container_of(mr, subpage_t, iomem);
2647 }
2648 subpage_register(subpage, start_addr2, end_addr2,
2649 section_index);
2650 } else {
2651 *p = section_index;
2652 }
2653 addr += TARGET_PAGE_SIZE;
2654 } while (addr != end_addr);
2655 }
2656
2657 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2658 {
2659 if (kvm_enabled())
2660 kvm_coalesce_mmio_region(addr, size);
2661 }
2662
2663 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2664 {
2665 if (kvm_enabled())
2666 kvm_uncoalesce_mmio_region(addr, size);
2667 }
2668
2669 void qemu_flush_coalesced_mmio_buffer(void)
2670 {
2671 if (kvm_enabled())
2672 kvm_flush_coalesced_mmio_buffer();
2673 }
2674
2675 #if defined(__linux__) && !defined(TARGET_S390X)
2676
2677 #include <sys/vfs.h>
2678
2679 #define HUGETLBFS_MAGIC 0x958458f6
2680
2681 static long gethugepagesize(const char *path)
2682 {
2683 struct statfs fs;
2684 int ret;
2685
2686 do {
2687 ret = statfs(path, &fs);
2688 } while (ret != 0 && errno == EINTR);
2689
2690 if (ret != 0) {
2691 perror(path);
2692 return 0;
2693 }
2694
2695 if (fs.f_type != HUGETLBFS_MAGIC)
2696 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2697
2698 return fs.f_bsize;
2699 }
2700
2701 static void *file_ram_alloc(RAMBlock *block,
2702 ram_addr_t memory,
2703 const char *path)
2704 {
2705 char *filename;
2706 void *area;
2707 int fd;
2708 #ifdef MAP_POPULATE
2709 int flags;
2710 #endif
2711 unsigned long hpagesize;
2712
2713 hpagesize = gethugepagesize(path);
2714 if (!hpagesize) {
2715 return NULL;
2716 }
2717
2718 if (memory < hpagesize) {
2719 return NULL;
2720 }
2721
2722 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2723 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2724 return NULL;
2725 }
2726
2727 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2728 return NULL;
2729 }
2730
2731 fd = mkstemp(filename);
2732 if (fd < 0) {
2733 perror("unable to create backing store for hugepages");
2734 free(filename);
2735 return NULL;
2736 }
2737 unlink(filename);
2738 free(filename);
2739
2740 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2741
2742 /*
2743 * ftruncate is not supported by hugetlbfs in older
2744 * hosts, so don't bother bailing out on errors.
2745 * If anything goes wrong with it under other filesystems,
2746 * mmap will fail.
2747 */
2748 if (ftruncate(fd, memory))
2749 perror("ftruncate");
2750
2751 #ifdef MAP_POPULATE
2752 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2753 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2754 * to sidestep this quirk.
2755 */
2756 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2757 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2758 #else
2759 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2760 #endif
2761 if (area == MAP_FAILED) {
2762 perror("file_ram_alloc: can't mmap RAM pages");
2763 close(fd);
2764 return (NULL);
2765 }
2766 block->fd = fd;
2767 return area;
2768 }
2769 #endif
2770
2771 static ram_addr_t find_ram_offset(ram_addr_t size)
2772 {
2773 RAMBlock *block, *next_block;
2774 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
2775
2776 if (QLIST_EMPTY(&ram_list.blocks))
2777 return 0;
2778
2779 QLIST_FOREACH(block, &ram_list.blocks, next) {
2780 ram_addr_t end, next = RAM_ADDR_MAX;
2781
2782 end = block->offset + block->length;
2783
2784 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2785 if (next_block->offset >= end) {
2786 next = MIN(next, next_block->offset);
2787 }
2788 }
2789 if (next - end >= size && next - end < mingap) {
2790 offset = end;
2791 mingap = next - end;
2792 }
2793 }
2794
2795 if (offset == RAM_ADDR_MAX) {
2796 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2797 (uint64_t)size);
2798 abort();
2799 }
2800
2801 return offset;
2802 }
2803
2804 static ram_addr_t last_ram_offset(void)
2805 {
2806 RAMBlock *block;
2807 ram_addr_t last = 0;
2808
2809 QLIST_FOREACH(block, &ram_list.blocks, next)
2810 last = MAX(last, block->offset + block->length);
2811
2812 return last;
2813 }
2814
2815 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
2816 {
2817 RAMBlock *new_block, *block;
2818
2819 new_block = NULL;
2820 QLIST_FOREACH(block, &ram_list.blocks, next) {
2821 if (block->offset == addr) {
2822 new_block = block;
2823 break;
2824 }
2825 }
2826 assert(new_block);
2827 assert(!new_block->idstr[0]);
2828
2829 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2830 char *id = dev->parent_bus->info->get_dev_path(dev);
2831 if (id) {
2832 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2833 g_free(id);
2834 }
2835 }
2836 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2837
2838 QLIST_FOREACH(block, &ram_list.blocks, next) {
2839 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
2840 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2841 new_block->idstr);
2842 abort();
2843 }
2844 }
2845 }
2846
2847 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2848 MemoryRegion *mr)
2849 {
2850 RAMBlock *new_block;
2851
2852 size = TARGET_PAGE_ALIGN(size);
2853 new_block = g_malloc0(sizeof(*new_block));
2854
2855 new_block->mr = mr;
2856 new_block->offset = find_ram_offset(size);
2857 if (host) {
2858 new_block->host = host;
2859 new_block->flags |= RAM_PREALLOC_MASK;
2860 } else {
2861 if (mem_path) {
2862 #if defined (__linux__) && !defined(TARGET_S390X)
2863 new_block->host = file_ram_alloc(new_block, size, mem_path);
2864 if (!new_block->host) {
2865 new_block->host = qemu_vmalloc(size);
2866 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2867 }
2868 #else
2869 fprintf(stderr, "-mem-path option unsupported\n");
2870 exit(1);
2871 #endif
2872 } else {
2873 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2874 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2875 an system defined value, which is at least 256GB. Larger systems
2876 have larger values. We put the guest between the end of data
2877 segment (system break) and this value. We use 32GB as a base to
2878 have enough room for the system break to grow. */
2879 new_block->host = mmap((void*)0x800000000, size,
2880 PROT_EXEC|PROT_READ|PROT_WRITE,
2881 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2882 if (new_block->host == MAP_FAILED) {
2883 fprintf(stderr, "Allocating RAM failed\n");
2884 abort();
2885 }
2886 #else
2887 if (xen_enabled()) {
2888 xen_ram_alloc(new_block->offset, size, mr);
2889 } else {
2890 new_block->host = qemu_vmalloc(size);
2891 }
2892 #endif
2893 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2894 }
2895 }
2896 new_block->length = size;
2897
2898 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2899
2900 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
2901 last_ram_offset() >> TARGET_PAGE_BITS);
2902 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2903 0xff, size >> TARGET_PAGE_BITS);
2904
2905 if (kvm_enabled())
2906 kvm_setup_guest_memory(new_block->host, size);
2907
2908 return new_block->offset;
2909 }
2910
2911 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
2912 {
2913 return qemu_ram_alloc_from_ptr(size, NULL, mr);
2914 }
2915
2916 void qemu_ram_free_from_ptr(ram_addr_t addr)
2917 {
2918 RAMBlock *block;
2919
2920 QLIST_FOREACH(block, &ram_list.blocks, next) {
2921 if (addr == block->offset) {
2922 QLIST_REMOVE(block, next);
2923 g_free(block);
2924 return;
2925 }
2926 }
2927 }
2928
2929 void qemu_ram_free(ram_addr_t addr)
2930 {
2931 RAMBlock *block;
2932
2933 QLIST_FOREACH(block, &ram_list.blocks, next) {
2934 if (addr == block->offset) {
2935 QLIST_REMOVE(block, next);
2936 if (block->flags & RAM_PREALLOC_MASK) {
2937 ;
2938 } else if (mem_path) {
2939 #if defined (__linux__) && !defined(TARGET_S390X)
2940 if (block->fd) {
2941 munmap(block->host, block->length);
2942 close(block->fd);
2943 } else {
2944 qemu_vfree(block->host);
2945 }
2946 #else
2947 abort();
2948 #endif
2949 } else {
2950 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2951 munmap(block->host, block->length);
2952 #else
2953 if (xen_enabled()) {
2954 xen_invalidate_map_cache_entry(block->host);
2955 } else {
2956 qemu_vfree(block->host);
2957 }
2958 #endif
2959 }
2960 g_free(block);
2961 return;
2962 }
2963 }
2964
2965 }
2966
2967 #ifndef _WIN32
2968 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2969 {
2970 RAMBlock *block;
2971 ram_addr_t offset;
2972 int flags;
2973 void *area, *vaddr;
2974
2975 QLIST_FOREACH(block, &ram_list.blocks, next) {
2976 offset = addr - block->offset;
2977 if (offset < block->length) {
2978 vaddr = block->host + offset;
2979 if (block->flags & RAM_PREALLOC_MASK) {
2980 ;
2981 } else {
2982 flags = MAP_FIXED;
2983 munmap(vaddr, length);
2984 if (mem_path) {
2985 #if defined(__linux__) && !defined(TARGET_S390X)
2986 if (block->fd) {
2987 #ifdef MAP_POPULATE
2988 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2989 MAP_PRIVATE;
2990 #else
2991 flags |= MAP_PRIVATE;
2992 #endif
2993 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2994 flags, block->fd, offset);
2995 } else {
2996 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2997 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2998 flags, -1, 0);
2999 }
3000 #else
3001 abort();
3002 #endif
3003 } else {
3004 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3005 flags |= MAP_SHARED | MAP_ANONYMOUS;
3006 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3007 flags, -1, 0);
3008 #else
3009 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3010 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3011 flags, -1, 0);
3012 #endif
3013 }
3014 if (area != vaddr) {
3015 fprintf(stderr, "Could not remap addr: "
3016 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
3017 length, addr);
3018 exit(1);
3019 }
3020 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3021 }
3022 return;
3023 }
3024 }
3025 }
3026 #endif /* !_WIN32 */
3027
3028 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3029 With the exception of the softmmu code in this file, this should
3030 only be used for local memory (e.g. video ram) that the device owns,
3031 and knows it isn't going to access beyond the end of the block.
3032
3033 It should not be used for general purpose DMA.
3034 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3035 */
3036 void *qemu_get_ram_ptr(ram_addr_t addr)
3037 {
3038 RAMBlock *block;
3039
3040 QLIST_FOREACH(block, &ram_list.blocks, next) {
3041 if (addr - block->offset < block->length) {
3042 /* Move this entry to to start of the list. */
3043 if (block != QLIST_FIRST(&ram_list.blocks)) {
3044 QLIST_REMOVE(block, next);
3045 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3046 }
3047 if (xen_enabled()) {
3048 /* We need to check if the requested address is in the RAM
3049 * because we don't want to map the entire memory in QEMU.
3050 * In that case just map until the end of the page.
3051 */
3052 if (block->offset == 0) {
3053 return xen_map_cache(addr, 0, 0);
3054 } else if (block->host == NULL) {
3055 block->host =
3056 xen_map_cache(block->offset, block->length, 1);
3057 }
3058 }
3059 return block->host + (addr - block->offset);
3060 }
3061 }
3062
3063 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3064 abort();
3065
3066 return NULL;
3067 }
3068
3069 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3070 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3071 */
3072 void *qemu_safe_ram_ptr(ram_addr_t addr)
3073 {
3074 RAMBlock *block;
3075
3076 QLIST_FOREACH(block, &ram_list.blocks, next) {
3077 if (addr - block->offset < block->length) {
3078 if (xen_enabled()) {
3079 /* We need to check if the requested address is in the RAM
3080 * because we don't want to map the entire memory in QEMU.
3081 * In that case just map until the end of the page.
3082 */
3083 if (block->offset == 0) {
3084 return xen_map_cache(addr, 0, 0);
3085 } else if (block->host == NULL) {
3086 block->host =
3087 xen_map_cache(block->offset, block->length, 1);
3088 }
3089 }
3090 return block->host + (addr - block->offset);
3091 }
3092 }
3093
3094 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3095 abort();
3096
3097 return NULL;
3098 }
3099
3100 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3101 * but takes a size argument */
3102 void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
3103 {
3104 if (*size == 0) {
3105 return NULL;
3106 }
3107 if (xen_enabled()) {
3108 return xen_map_cache(addr, *size, 1);
3109 } else {
3110 RAMBlock *block;
3111
3112 QLIST_FOREACH(block, &ram_list.blocks, next) {
3113 if (addr - block->offset < block->length) {
3114 if (addr - block->offset + *size > block->length)
3115 *size = block->length - addr + block->offset;
3116 return block->host + (addr - block->offset);
3117 }
3118 }
3119
3120 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3121 abort();
3122 }
3123 }
3124
3125 void qemu_put_ram_ptr(void *addr)
3126 {
3127 trace_qemu_put_ram_ptr(addr);
3128 }
3129
3130 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3131 {
3132 RAMBlock *block;
3133 uint8_t *host = ptr;
3134
3135 if (xen_enabled()) {
3136 *ram_addr = xen_ram_addr_from_mapcache(ptr);
3137 return 0;
3138 }
3139
3140 QLIST_FOREACH(block, &ram_list.blocks, next) {
3141 /* This case append when the block is not mapped. */
3142 if (block->host == NULL) {
3143 continue;
3144 }
3145 if (host - block->host < block->length) {
3146 *ram_addr = block->offset + (host - block->host);
3147 return 0;
3148 }
3149 }
3150
3151 return -1;
3152 }
3153
3154 /* Some of the softmmu routines need to translate from a host pointer
3155 (typically a TLB entry) back to a ram offset. */
3156 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3157 {
3158 ram_addr_t ram_addr;
3159
3160 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3161 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3162 abort();
3163 }
3164 return ram_addr;
3165 }
3166
3167 static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3168 unsigned size)
3169 {
3170 #ifdef DEBUG_UNASSIGNED
3171 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3172 #endif
3173 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3174 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
3175 #endif
3176 return 0;
3177 }
3178
3179 static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3180 uint64_t val, unsigned size)
3181 {
3182 #ifdef DEBUG_UNASSIGNED
3183 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
3184 #endif
3185 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3186 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
3187 #endif
3188 }
3189
3190 static const MemoryRegionOps unassigned_mem_ops = {
3191 .read = unassigned_mem_read,
3192 .write = unassigned_mem_write,
3193 .endianness = DEVICE_NATIVE_ENDIAN,
3194 };
3195
3196 static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3197 unsigned size)
3198 {
3199 abort();
3200 }
3201
3202 static void error_mem_write(void *opaque, target_phys_addr_t addr,
3203 uint64_t value, unsigned size)
3204 {
3205 abort();
3206 }
3207
3208 static const MemoryRegionOps error_mem_ops = {
3209 .read = error_mem_read,
3210 .write = error_mem_write,
3211 .endianness = DEVICE_NATIVE_ENDIAN,
3212 };
3213
3214 static const MemoryRegionOps rom_mem_ops = {
3215 .read = error_mem_read,
3216 .write = unassigned_mem_write,
3217 .endianness = DEVICE_NATIVE_ENDIAN,
3218 };
3219
3220 static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3221 uint64_t val, unsigned size)
3222 {
3223 int dirty_flags;
3224 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3225 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3226 #if !defined(CONFIG_USER_ONLY)
3227 tb_invalidate_phys_page_fast(ram_addr, size);
3228 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3229 #endif
3230 }
3231 switch (size) {
3232 case 1:
3233 stb_p(qemu_get_ram_ptr(ram_addr), val);
3234 break;
3235 case 2:
3236 stw_p(qemu_get_ram_ptr(ram_addr), val);
3237 break;
3238 case 4:
3239 stl_p(qemu_get_ram_ptr(ram_addr), val);
3240 break;
3241 default:
3242 abort();
3243 }
3244 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3245 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3246 /* we remove the notdirty callback only if the code has been
3247 flushed */
3248 if (dirty_flags == 0xff)
3249 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3250 }
3251
3252 static const MemoryRegionOps notdirty_mem_ops = {
3253 .read = error_mem_read,
3254 .write = notdirty_mem_write,
3255 .endianness = DEVICE_NATIVE_ENDIAN,
3256 };
3257
3258 /* Generate a debug exception if a watchpoint has been hit. */
3259 static void check_watchpoint(int offset, int len_mask, int flags)
3260 {
3261 CPUState *env = cpu_single_env;
3262 target_ulong pc, cs_base;
3263 TranslationBlock *tb;
3264 target_ulong vaddr;
3265 CPUWatchpoint *wp;
3266 int cpu_flags;
3267
3268 if (env->watchpoint_hit) {
3269 /* We re-entered the check after replacing the TB. Now raise
3270 * the debug interrupt so that is will trigger after the
3271 * current instruction. */
3272 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3273 return;
3274 }
3275 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3276 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3277 if ((vaddr == (wp->vaddr & len_mask) ||
3278 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3279 wp->flags |= BP_WATCHPOINT_HIT;
3280 if (!env->watchpoint_hit) {
3281 env->watchpoint_hit = wp;
3282 tb = tb_find_pc(env->mem_io_pc);
3283 if (!tb) {
3284 cpu_abort(env, "check_watchpoint: could not find TB for "
3285 "pc=%p", (void *)env->mem_io_pc);
3286 }
3287 cpu_restore_state(tb, env, env->mem_io_pc);
3288 tb_phys_invalidate(tb, -1);
3289 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3290 env->exception_index = EXCP_DEBUG;
3291 } else {
3292 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3293 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3294 }
3295 cpu_resume_from_signal(env, NULL);
3296 }
3297 } else {
3298 wp->flags &= ~BP_WATCHPOINT_HIT;
3299 }
3300 }
3301 }
3302
3303 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3304 so these check for a hit then pass through to the normal out-of-line
3305 phys routines. */
3306 static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3307 unsigned size)
3308 {
3309 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3310 switch (size) {
3311 case 1: return ldub_phys(addr);
3312 case 2: return lduw_phys(addr);
3313 case 4: return ldl_phys(addr);
3314 default: abort();
3315 }
3316 }
3317
3318 static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3319 uint64_t val, unsigned size)
3320 {
3321 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3322 switch (size) {
3323 case 1: stb_phys(addr, val);
3324 case 2: stw_phys(addr, val);
3325 case 4: stl_phys(addr, val);
3326 default: abort();
3327 }
3328 }
3329
3330 static const MemoryRegionOps watch_mem_ops = {
3331 .read = watch_mem_read,
3332 .write = watch_mem_write,
3333 .endianness = DEVICE_NATIVE_ENDIAN,
3334 };
3335
3336 static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3337 unsigned len)
3338 {
3339 subpage_t *mmio = opaque;
3340 unsigned int idx = SUBPAGE_IDX(addr);
3341 MemoryRegionSection *section;
3342 #if defined(DEBUG_SUBPAGE)
3343 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3344 mmio, len, addr, idx);
3345 #endif
3346
3347 section = &phys_sections[mmio->sub_section[idx]];
3348 addr += mmio->base;
3349 addr -= section->offset_within_address_space;
3350 addr += section->offset_within_region;
3351 return io_mem_read(section->mr->ram_addr, addr, len);
3352 }
3353
3354 static void subpage_write(void *opaque, target_phys_addr_t addr,
3355 uint64_t value, unsigned len)
3356 {
3357 subpage_t *mmio = opaque;
3358 unsigned int idx = SUBPAGE_IDX(addr);
3359 MemoryRegionSection *section;
3360 #if defined(DEBUG_SUBPAGE)
3361 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3362 " idx %d value %"PRIx64"\n",
3363 __func__, mmio, len, addr, idx, value);
3364 #endif
3365
3366 section = &phys_sections[mmio->sub_section[idx]];
3367 addr += mmio->base;
3368 addr -= section->offset_within_address_space;
3369 addr += section->offset_within_region;
3370 io_mem_write(section->mr->ram_addr, addr, value, len);
3371 }
3372
3373 static const MemoryRegionOps subpage_ops = {
3374 .read = subpage_read,
3375 .write = subpage_write,
3376 .endianness = DEVICE_NATIVE_ENDIAN,
3377 };
3378
3379 static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3380 unsigned size)
3381 {
3382 ram_addr_t raddr = addr;
3383 void *ptr = qemu_get_ram_ptr(raddr);
3384 switch (size) {
3385 case 1: return ldub_p(ptr);
3386 case 2: return lduw_p(ptr);
3387 case 4: return ldl_p(ptr);
3388 default: abort();
3389 }
3390 }
3391
3392 static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3393 uint64_t value, unsigned size)
3394 {
3395 ram_addr_t raddr = addr;
3396 void *ptr = qemu_get_ram_ptr(raddr);
3397 switch (size) {
3398 case 1: return stb_p(ptr, value);
3399 case 2: return stw_p(ptr, value);
3400 case 4: return stl_p(ptr, value);
3401 default: abort();
3402 }
3403 }
3404
3405 static const MemoryRegionOps subpage_ram_ops = {
3406 .read = subpage_ram_read,
3407 .write = subpage_ram_write,
3408 .endianness = DEVICE_NATIVE_ENDIAN,
3409 };
3410
3411 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3412 uint16_t section)
3413 {
3414 int idx, eidx;
3415
3416 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3417 return -1;
3418 idx = SUBPAGE_IDX(start);
3419 eidx = SUBPAGE_IDX(end);
3420 #if defined(DEBUG_SUBPAGE)
3421 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3422 mmio, start, end, idx, eidx, memory);
3423 #endif
3424 if (memory_region_is_ram(phys_sections[section].mr)) {
3425 MemoryRegionSection new_section = phys_sections[section];
3426 new_section.mr = &io_mem_subpage_ram;
3427 section = phys_section_add(&new_section);
3428 }
3429 for (; idx <= eidx; idx++) {
3430 mmio->sub_section[idx] = section;
3431 }
3432
3433 return 0;
3434 }
3435
3436 static subpage_t *subpage_init (target_phys_addr_t base, uint16_t *section_ind,
3437 uint16_t orig_section)
3438 {
3439 subpage_t *mmio;
3440 MemoryRegionSection section = {
3441 .offset_within_address_space = base,
3442 .size = TARGET_PAGE_SIZE,
3443 };
3444
3445 mmio = g_malloc0(sizeof(subpage_t));
3446
3447 mmio->base = base;
3448 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3449 "subpage", TARGET_PAGE_SIZE);
3450 mmio->iomem.subpage = true;
3451 section.mr = &mmio->iomem;
3452 #if defined(DEBUG_SUBPAGE)
3453 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3454 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3455 #endif
3456 *section_ind = phys_section_add(&section);
3457 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_section);
3458
3459 return mmio;
3460 }
3461
3462 static int get_free_io_mem_idx(void)
3463 {
3464 int i;
3465
3466 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3467 if (!io_mem_used[i]) {
3468 io_mem_used[i] = 1;
3469 return i;
3470 }
3471 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3472 return -1;
3473 }
3474
3475 /* mem_read and mem_write are arrays of functions containing the
3476 function to access byte (index 0), word (index 1) and dword (index
3477 2). Functions can be omitted with a NULL function pointer.
3478 If io_index is non zero, the corresponding io zone is
3479 modified. If it is zero, a new io zone is allocated. The return
3480 value can be used with cpu_register_physical_memory(). (-1) is
3481 returned if error. */
3482 static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
3483 {
3484 if (io_index <= 0) {
3485 io_index = get_free_io_mem_idx();
3486 if (io_index == -1)
3487 return io_index;
3488 } else {
3489 if (io_index >= IO_MEM_NB_ENTRIES)
3490 return -1;
3491 }
3492
3493 io_mem_region[io_index] = mr;
3494
3495 return io_index;
3496 }
3497
3498 int cpu_register_io_memory(MemoryRegion *mr)
3499 {
3500 return cpu_register_io_memory_fixed(0, mr);
3501 }
3502
3503 void cpu_unregister_io_memory(int io_index)
3504 {
3505 io_mem_region[io_index] = NULL;
3506 io_mem_used[io_index] = 0;
3507 }
3508
3509 static uint16_t dummy_section(MemoryRegion *mr)
3510 {
3511 MemoryRegionSection section = {
3512 .mr = mr,
3513 .offset_within_address_space = 0,
3514 .offset_within_region = 0,
3515 .size = UINT64_MAX,
3516 };
3517
3518 return phys_section_add(&section);
3519 }
3520
3521 static void io_mem_init(void)
3522 {
3523 int i;
3524
3525 /* Must be first: */
3526 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3527 assert(io_mem_ram.ram_addr == 0);
3528 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3529 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3530 "unassigned", UINT64_MAX);
3531 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3532 "notdirty", UINT64_MAX);
3533 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3534 "subpage-ram", UINT64_MAX);
3535 for (i=0; i<5; i++)
3536 io_mem_used[i] = 1;
3537
3538 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3539 "watch", UINT64_MAX);
3540 }
3541
3542 static void core_begin(MemoryListener *listener)
3543 {
3544 destroy_all_mappings();
3545 phys_sections_clear();
3546 phys_map.u.node = PHYS_MAP_NODE_NIL;
3547 phys_section_unassigned = dummy_section(&io_mem_unassigned);
3548 }
3549
3550 static void core_commit(MemoryListener *listener)
3551 {
3552 CPUState *env;
3553
3554 /* since each CPU stores ram addresses in its TLB cache, we must
3555 reset the modified entries */
3556 /* XXX: slow ! */
3557 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3558 tlb_flush(env, 1);
3559 }
3560 }
3561
3562 static void core_region_add(MemoryListener *listener,
3563 MemoryRegionSection *section)
3564 {
3565 cpu_register_physical_memory_log(section, section->readonly);
3566 }
3567
3568 static void core_region_del(MemoryListener *listener,
3569 MemoryRegionSection *section)
3570 {
3571 }
3572
3573 static void core_region_nop(MemoryListener *listener,
3574 MemoryRegionSection *section)
3575 {
3576 cpu_register_physical_memory_log(section, section->readonly);
3577 }
3578
3579 static void core_log_start(MemoryListener *listener,
3580 MemoryRegionSection *section)
3581 {
3582 }
3583
3584 static void core_log_stop(MemoryListener *listener,
3585 MemoryRegionSection *section)
3586 {
3587 }
3588
3589 static void core_log_sync(MemoryListener *listener,
3590 MemoryRegionSection *section)
3591 {
3592 }
3593
3594 static void core_log_global_start(MemoryListener *listener)
3595 {
3596 cpu_physical_memory_set_dirty_tracking(1);
3597 }
3598
3599 static void core_log_global_stop(MemoryListener *listener)
3600 {
3601 cpu_physical_memory_set_dirty_tracking(0);
3602 }
3603
3604 static void core_eventfd_add(MemoryListener *listener,
3605 MemoryRegionSection *section,
3606 bool match_data, uint64_t data, int fd)
3607 {
3608 }
3609
3610 static void core_eventfd_del(MemoryListener *listener,
3611 MemoryRegionSection *section,
3612 bool match_data, uint64_t data, int fd)
3613 {
3614 }
3615
3616 static void io_begin(MemoryListener *listener)
3617 {
3618 }
3619
3620 static void io_commit(MemoryListener *listener)
3621 {
3622 }
3623
3624 static void io_region_add(MemoryListener *listener,
3625 MemoryRegionSection *section)
3626 {
3627 iorange_init(&section->mr->iorange, &memory_region_iorange_ops,
3628 section->offset_within_address_space, section->size);
3629 ioport_register(&section->mr->iorange);
3630 }
3631
3632 static void io_region_del(MemoryListener *listener,
3633 MemoryRegionSection *section)
3634 {
3635 isa_unassign_ioport(section->offset_within_address_space, section->size);
3636 }
3637
3638 static void io_region_nop(MemoryListener *listener,
3639 MemoryRegionSection *section)
3640 {
3641 }
3642
3643 static void io_log_start(MemoryListener *listener,
3644 MemoryRegionSection *section)
3645 {
3646 }
3647
3648 static void io_log_stop(MemoryListener *listener,
3649 MemoryRegionSection *section)
3650 {
3651 }
3652
3653 static void io_log_sync(MemoryListener *listener,
3654 MemoryRegionSection *section)
3655 {
3656 }
3657
3658 static void io_log_global_start(MemoryListener *listener)
3659 {
3660 }
3661
3662 static void io_log_global_stop(MemoryListener *listener)
3663 {
3664 }
3665
3666 static void io_eventfd_add(MemoryListener *listener,
3667 MemoryRegionSection *section,
3668 bool match_data, uint64_t data, int fd)
3669 {
3670 }
3671
3672 static void io_eventfd_del(MemoryListener *listener,
3673 MemoryRegionSection *section,
3674 bool match_data, uint64_t data, int fd)
3675 {
3676 }
3677
3678 static MemoryListener core_memory_listener = {
3679 .begin = core_begin,
3680 .commit = core_commit,
3681 .region_add = core_region_add,
3682 .region_del = core_region_del,
3683 .region_nop = core_region_nop,
3684 .log_start = core_log_start,
3685 .log_stop = core_log_stop,
3686 .log_sync = core_log_sync,
3687 .log_global_start = core_log_global_start,
3688 .log_global_stop = core_log_global_stop,
3689 .eventfd_add = core_eventfd_add,
3690 .eventfd_del = core_eventfd_del,
3691 .priority = 0,
3692 };
3693
3694 static MemoryListener io_memory_listener = {
3695 .begin = io_begin,
3696 .commit = io_commit,
3697 .region_add = io_region_add,
3698 .region_del = io_region_del,
3699 .region_nop = io_region_nop,
3700 .log_start = io_log_start,
3701 .log_stop = io_log_stop,
3702 .log_sync = io_log_sync,
3703 .log_global_start = io_log_global_start,
3704 .log_global_stop = io_log_global_stop,
3705 .eventfd_add = io_eventfd_add,
3706 .eventfd_del = io_eventfd_del,
3707 .priority = 0,
3708 };
3709
3710 static void memory_map_init(void)
3711 {
3712 system_memory = g_malloc(sizeof(*system_memory));
3713 memory_region_init(system_memory, "system", INT64_MAX);
3714 set_system_memory_map(system_memory);
3715
3716 system_io = g_malloc(sizeof(*system_io));
3717 memory_region_init(system_io, "io", 65536);
3718 set_system_io_map(system_io);
3719
3720 memory_listener_register(&core_memory_listener, system_memory);
3721 memory_listener_register(&io_memory_listener, system_io);
3722 }
3723
3724 MemoryRegion *get_system_memory(void)
3725 {
3726 return system_memory;
3727 }
3728
3729 MemoryRegion *get_system_io(void)
3730 {
3731 return system_io;
3732 }
3733
3734 #endif /* !defined(CONFIG_USER_ONLY) */
3735
3736 /* physical memory access (slow version, mainly for debug) */
3737 #if defined(CONFIG_USER_ONLY)
3738 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3739 uint8_t *buf, int len, int is_write)
3740 {
3741 int l, flags;
3742 target_ulong page;
3743 void * p;
3744
3745 while (len > 0) {
3746 page = addr & TARGET_PAGE_MASK;
3747 l = (page + TARGET_PAGE_SIZE) - addr;
3748 if (l > len)
3749 l = len;
3750 flags = page_get_flags(page);
3751 if (!(flags & PAGE_VALID))
3752 return -1;
3753 if (is_write) {
3754 if (!(flags & PAGE_WRITE))
3755 return -1;
3756 /* XXX: this code should not depend on lock_user */
3757 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3758 return -1;
3759 memcpy(p, buf, l);
3760 unlock_user(p, addr, l);
3761 } else {
3762 if (!(flags & PAGE_READ))
3763 return -1;
3764 /* XXX: this code should not depend on lock_user */
3765 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3766 return -1;
3767 memcpy(buf, p, l);
3768 unlock_user(p, addr, 0);
3769 }
3770 len -= l;
3771 buf += l;
3772 addr += l;
3773 }
3774 return 0;
3775 }
3776
3777 #else
3778 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3779 int len, int is_write)
3780 {
3781 int l, io_index;
3782 uint8_t *ptr;
3783 uint32_t val;
3784 target_phys_addr_t page;
3785 MemoryRegionSection section;
3786
3787 while (len > 0) {
3788 page = addr & TARGET_PAGE_MASK;
3789 l = (page + TARGET_PAGE_SIZE) - addr;
3790 if (l > len)
3791 l = len;
3792 section = phys_page_find(page >> TARGET_PAGE_BITS);
3793
3794 if (is_write) {
3795 if (!memory_region_is_ram(section.mr)) {
3796 target_phys_addr_t addr1;
3797 io_index = memory_region_get_ram_addr(section.mr)
3798 & (IO_MEM_NB_ENTRIES - 1);
3799 addr1 = (addr & ~TARGET_PAGE_MASK)
3800 + section.offset_within_region;
3801 /* XXX: could force cpu_single_env to NULL to avoid
3802 potential bugs */
3803 if (l >= 4 && ((addr1 & 3) == 0)) {
3804 /* 32 bit write access */
3805 val = ldl_p(buf);
3806 io_mem_write(io_index, addr1, val, 4);
3807 l = 4;
3808 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3809 /* 16 bit write access */
3810 val = lduw_p(buf);
3811 io_mem_write(io_index, addr1, val, 2);
3812 l = 2;
3813 } else {
3814 /* 8 bit write access */
3815 val = ldub_p(buf);
3816 io_mem_write(io_index, addr1, val, 1);
3817 l = 1;
3818 }
3819 } else if (!section.readonly) {
3820 ram_addr_t addr1;
3821 addr1 = (memory_region_get_ram_addr(section.mr)
3822 + section.offset_within_region)
3823 | (addr & ~TARGET_PAGE_MASK);
3824 /* RAM case */
3825 ptr = qemu_get_ram_ptr(addr1);
3826 memcpy(ptr, buf, l);
3827 if (!cpu_physical_memory_is_dirty(addr1)) {
3828 /* invalidate code */
3829 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3830 /* set dirty bit */
3831 cpu_physical_memory_set_dirty_flags(
3832 addr1, (0xff & ~CODE_DIRTY_FLAG));
3833 }
3834 qemu_put_ram_ptr(ptr);
3835 }
3836 } else {
3837 if (!is_ram_rom_romd(&section)) {
3838 target_phys_addr_t addr1;
3839 /* I/O case */
3840 io_index = memory_region_get_ram_addr(section.mr)
3841 & (IO_MEM_NB_ENTRIES - 1);
3842 addr1 = (addr & ~TARGET_PAGE_MASK)
3843 + section.offset_within_region;
3844 if (l >= 4 && ((addr1 & 3) == 0)) {
3845 /* 32 bit read access */
3846 val = io_mem_read(io_index, addr1, 4);
3847 stl_p(buf, val);
3848 l = 4;
3849 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3850 /* 16 bit read access */
3851 val = io_mem_read(io_index, addr1, 2);
3852 stw_p(buf, val);
3853 l = 2;
3854 } else {
3855 /* 8 bit read access */
3856 val = io_mem_read(io_index, addr1, 1);
3857 stb_p(buf, val);
3858 l = 1;
3859 }
3860 } else {
3861 /* RAM case */
3862 ptr = qemu_get_ram_ptr(section.mr->ram_addr
3863 + section.offset_within_region);
3864 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3865 qemu_put_ram_ptr(ptr);
3866 }
3867 }
3868 len -= l;
3869 buf += l;
3870 addr += l;
3871 }
3872 }
3873
3874 /* used for ROM loading : can write in RAM and ROM */
3875 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3876 const uint8_t *buf, int len)
3877 {
3878 int l;
3879 uint8_t *ptr;
3880 target_phys_addr_t page;
3881 MemoryRegionSection section;
3882
3883 while (len > 0) {
3884 page = addr & TARGET_PAGE_MASK;
3885 l = (page + TARGET_PAGE_SIZE) - addr;
3886 if (l > len)
3887 l = len;
3888 section = phys_page_find(page >> TARGET_PAGE_BITS);
3889
3890 if (!is_ram_rom_romd(&section)) {
3891 /* do nothing */
3892 } else {
3893 unsigned long addr1;
3894 addr1 = (memory_region_get_ram_addr(section.mr)
3895 + section.offset_within_region)
3896 + (addr & ~TARGET_PAGE_MASK);
3897 /* ROM/RAM case */
3898 ptr = qemu_get_ram_ptr(addr1);
3899 memcpy(ptr, buf, l);
3900 qemu_put_ram_ptr(ptr);
3901 }
3902 len -= l;
3903 buf += l;
3904 addr += l;
3905 }
3906 }
3907
3908 typedef struct {
3909 void *buffer;
3910 target_phys_addr_t addr;
3911 target_phys_addr_t len;
3912 } BounceBuffer;
3913
3914 static BounceBuffer bounce;
3915
3916 typedef struct MapClient {
3917 void *opaque;
3918 void (*callback)(void *opaque);
3919 QLIST_ENTRY(MapClient) link;
3920 } MapClient;
3921
3922 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3923 = QLIST_HEAD_INITIALIZER(map_client_list);
3924
3925 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3926 {
3927 MapClient *client = g_malloc(sizeof(*client));
3928
3929 client->opaque = opaque;
3930 client->callback = callback;
3931 QLIST_INSERT_HEAD(&map_client_list, client, link);
3932 return client;
3933 }
3934
3935 void cpu_unregister_map_client(void *_client)
3936 {
3937 MapClient *client = (MapClient *)_client;
3938
3939 QLIST_REMOVE(client, link);
3940 g_free(client);
3941 }
3942
3943 static void cpu_notify_map_clients(void)
3944 {
3945 MapClient *client;
3946
3947 while (!QLIST_EMPTY(&map_client_list)) {
3948 client = QLIST_FIRST(&map_client_list);
3949 client->callback(client->opaque);
3950 cpu_unregister_map_client(client);
3951 }
3952 }
3953
3954 /* Map a physical memory region into a host virtual address.
3955 * May map a subset of the requested range, given by and returned in *plen.
3956 * May return NULL if resources needed to perform the mapping are exhausted.
3957 * Use only for reads OR writes - not for read-modify-write operations.
3958 * Use cpu_register_map_client() to know when retrying the map operation is
3959 * likely to succeed.
3960 */
3961 void *cpu_physical_memory_map(target_phys_addr_t addr,
3962 target_phys_addr_t *plen,
3963 int is_write)
3964 {
3965 target_phys_addr_t len = *plen;
3966 target_phys_addr_t todo = 0;
3967 int l;
3968 target_phys_addr_t page;
3969 MemoryRegionSection section;
3970 ram_addr_t raddr = RAM_ADDR_MAX;
3971 ram_addr_t rlen;
3972 void *ret;
3973
3974 while (len > 0) {
3975 page = addr & TARGET_PAGE_MASK;
3976 l = (page + TARGET_PAGE_SIZE) - addr;
3977 if (l > len)
3978 l = len;
3979 section = phys_page_find(page >> TARGET_PAGE_BITS);
3980
3981 if (!(memory_region_is_ram(section.mr) && !section.readonly)) {
3982 if (todo || bounce.buffer) {
3983 break;
3984 }
3985 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3986 bounce.addr = addr;
3987 bounce.len = l;
3988 if (!is_write) {
3989 cpu_physical_memory_read(addr, bounce.buffer, l);
3990 }
3991
3992 *plen = l;
3993 return bounce.buffer;
3994 }
3995 if (!todo) {
3996 raddr = memory_region_get_ram_addr(section.mr)
3997 + section.offset_within_region
3998 + (addr & ~TARGET_PAGE_MASK);
3999 }
4000
4001 len -= l;
4002 addr += l;
4003 todo += l;
4004 }
4005 rlen = todo;
4006 ret = qemu_ram_ptr_length(raddr, &rlen);
4007 *plen = rlen;
4008 return ret;
4009 }
4010
4011 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4012 * Will also mark the memory as dirty if is_write == 1. access_len gives
4013 * the amount of memory that was actually read or written by the caller.
4014 */
4015 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4016 int is_write, target_phys_addr_t access_len)
4017 {
4018 if (buffer != bounce.buffer) {
4019 if (is_write) {
4020 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
4021 while (access_len) {
4022 unsigned l;
4023 l = TARGET_PAGE_SIZE;
4024 if (l > access_len)
4025 l = access_len;
4026 if (!cpu_physical_memory_is_dirty(addr1)) {
4027 /* invalidate code */
4028 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4029 /* set dirty bit */
4030 cpu_physical_memory_set_dirty_flags(
4031 addr1, (0xff & ~CODE_DIRTY_FLAG));
4032 }
4033 addr1 += l;
4034 access_len -= l;
4035 }
4036 }
4037 if (xen_enabled()) {
4038 xen_invalidate_map_cache_entry(buffer);
4039 }
4040 return;
4041 }
4042 if (is_write) {
4043 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4044 }
4045 qemu_vfree(bounce.buffer);
4046 bounce.buffer = NULL;
4047 cpu_notify_map_clients();
4048 }
4049
4050 /* warning: addr must be aligned */
4051 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4052 enum device_endian endian)
4053 {
4054 int io_index;
4055 uint8_t *ptr;
4056 uint32_t val;
4057 MemoryRegionSection section;
4058
4059 section = phys_page_find(addr >> TARGET_PAGE_BITS);
4060
4061 if (!is_ram_rom_romd(&section)) {
4062 /* I/O case */
4063 io_index = memory_region_get_ram_addr(section.mr)
4064 & (IO_MEM_NB_ENTRIES - 1);
4065 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
4066 val = io_mem_read(io_index, addr, 4);
4067 #if defined(TARGET_WORDS_BIGENDIAN)
4068 if (endian == DEVICE_LITTLE_ENDIAN) {
4069 val = bswap32(val);
4070 }
4071 #else
4072 if (endian == DEVICE_BIG_ENDIAN) {
4073 val = bswap32(val);
4074 }
4075 #endif
4076 } else {
4077 /* RAM case */
4078 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4079 & TARGET_PAGE_MASK)
4080 + section.offset_within_region) +
4081 (addr & ~TARGET_PAGE_MASK);
4082 switch (endian) {
4083 case DEVICE_LITTLE_ENDIAN:
4084 val = ldl_le_p(ptr);
4085 break;
4086 case DEVICE_BIG_ENDIAN:
4087 val = ldl_be_p(ptr);
4088 break;
4089 default:
4090 val = ldl_p(ptr);
4091 break;
4092 }
4093 }
4094 return val;
4095 }
4096
4097 uint32_t ldl_phys(target_phys_addr_t addr)
4098 {
4099 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4100 }
4101
4102 uint32_t ldl_le_phys(target_phys_addr_t addr)
4103 {
4104 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4105 }
4106
4107 uint32_t ldl_be_phys(target_phys_addr_t addr)
4108 {
4109 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4110 }
4111
4112 /* warning: addr must be aligned */
4113 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4114 enum device_endian endian)
4115 {
4116 int io_index;
4117 uint8_t *ptr;
4118 uint64_t val;
4119 MemoryRegionSection section;
4120
4121 section = phys_page_find(addr >> TARGET_PAGE_BITS);
4122
4123 if (!is_ram_rom_romd(&section)) {
4124 /* I/O case */
4125 io_index = memory_region_get_ram_addr(section.mr)
4126 & (IO_MEM_NB_ENTRIES - 1);
4127 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
4128
4129 /* XXX This is broken when device endian != cpu endian.
4130 Fix and add "endian" variable check */
4131 #ifdef TARGET_WORDS_BIGENDIAN
4132 val = io_mem_read(io_index, addr, 4) << 32;
4133 val |= io_mem_read(io_index, addr + 4, 4);
4134 #else
4135 val = io_mem_read(io_index, addr, 4);
4136 val |= io_mem_read(io_index, addr + 4, 4) << 32;
4137 #endif
4138 } else {
4139 /* RAM case */
4140 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4141 & TARGET_PAGE_MASK)
4142 + section.offset_within_region)
4143 + (addr & ~TARGET_PAGE_MASK);
4144 switch (endian) {
4145 case DEVICE_LITTLE_ENDIAN:
4146 val = ldq_le_p(ptr);
4147 break;
4148 case DEVICE_BIG_ENDIAN:
4149 val = ldq_be_p(ptr);
4150 break;
4151 default:
4152 val = ldq_p(ptr);
4153 break;
4154 }
4155 }
4156 return val;
4157 }
4158
4159 uint64_t ldq_phys(target_phys_addr_t addr)
4160 {
4161 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4162 }
4163
4164 uint64_t ldq_le_phys(target_phys_addr_t addr)
4165 {
4166 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4167 }
4168
4169 uint64_t ldq_be_phys(target_phys_addr_t addr)
4170 {
4171 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4172 }
4173
4174 /* XXX: optimize */
4175 uint32_t ldub_phys(target_phys_addr_t addr)
4176 {
4177 uint8_t val;
4178 cpu_physical_memory_read(addr, &val, 1);
4179 return val;
4180 }
4181
4182 /* warning: addr must be aligned */
4183 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4184 enum device_endian endian)
4185 {
4186 int io_index;
4187 uint8_t *ptr;
4188 uint64_t val;
4189 MemoryRegionSection section;
4190
4191 section = phys_page_find(addr >> TARGET_PAGE_BITS);
4192
4193 if (!is_ram_rom_romd(&section)) {
4194 /* I/O case */
4195 io_index = memory_region_get_ram_addr(section.mr)
4196 & (IO_MEM_NB_ENTRIES - 1);
4197 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
4198 val = io_mem_read(io_index, addr, 2);
4199 #if defined(TARGET_WORDS_BIGENDIAN)
4200 if (endian == DEVICE_LITTLE_ENDIAN) {
4201 val = bswap16(val);
4202 }
4203 #else
4204 if (endian == DEVICE_BIG_ENDIAN) {
4205 val = bswap16(val);
4206 }
4207 #endif
4208 } else {
4209 /* RAM case */
4210 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4211 & TARGET_PAGE_MASK)
4212 + section.offset_within_region)
4213 + (addr & ~TARGET_PAGE_MASK);
4214 switch (endian) {
4215 case DEVICE_LITTLE_ENDIAN:
4216 val = lduw_le_p(ptr);
4217 break;
4218 case DEVICE_BIG_ENDIAN:
4219 val = lduw_be_p(ptr);
4220 break;
4221 default:
4222 val = lduw_p(ptr);
4223 break;
4224 }
4225 }
4226 return val;
4227 }
4228
4229 uint32_t lduw_phys(target_phys_addr_t addr)
4230 {
4231 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4232 }
4233
4234 uint32_t lduw_le_phys(target_phys_addr_t addr)
4235 {
4236 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4237 }
4238
4239 uint32_t lduw_be_phys(target_phys_addr_t addr)
4240 {
4241 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4242 }
4243
4244 /* warning: addr must be aligned. The ram page is not masked as dirty
4245 and the code inside is not invalidated. It is useful if the dirty
4246 bits are used to track modified PTEs */
4247 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4248 {
4249 int io_index;
4250 uint8_t *ptr;
4251 MemoryRegionSection section;
4252
4253 section = phys_page_find(addr >> TARGET_PAGE_BITS);
4254
4255 if (!memory_region_is_ram(section.mr) || section.readonly) {
4256 if (memory_region_is_ram(section.mr)) {
4257 io_index = io_mem_rom.ram_addr;
4258 } else {
4259 io_index = memory_region_get_ram_addr(section.mr);
4260 }
4261 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
4262 io_mem_write(io_index, addr, val, 4);
4263 } else {
4264 unsigned long addr1 = (memory_region_get_ram_addr(section.mr)
4265 & TARGET_PAGE_MASK)
4266 + section.offset_within_region
4267 + (addr & ~TARGET_PAGE_MASK);
4268 ptr = qemu_get_ram_ptr(addr1);
4269 stl_p(ptr, val);
4270
4271 if (unlikely(in_migration)) {
4272 if (!cpu_physical_memory_is_dirty(addr1)) {
4273 /* invalidate code */
4274 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4275 /* set dirty bit */
4276 cpu_physical_memory_set_dirty_flags(
4277 addr1, (0xff & ~CODE_DIRTY_FLAG));
4278 }
4279 }
4280 }
4281 }
4282
4283 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4284 {
4285 int io_index;
4286 uint8_t *ptr;
4287 MemoryRegionSection section;
4288
4289 section = phys_page_find(addr >> TARGET_PAGE_BITS);
4290
4291 if (!memory_region_is_ram(section.mr) || section.readonly) {
4292 if (memory_region_is_ram(section.mr)) {
4293 io_index = io_mem_rom.ram_addr;
4294 } else {
4295 io_index = memory_region_get_ram_addr(section.mr)
4296 & (IO_MEM_NB_ENTRIES - 1);
4297 }
4298 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
4299 #ifdef TARGET_WORDS_BIGENDIAN
4300 io_mem_write(io_index, addr, val >> 32, 4);
4301 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
4302 #else
4303 io_mem_write(io_index, addr, (uint32_t)val, 4);
4304 io_mem_write(io_index, addr + 4, val >> 32, 4);
4305 #endif
4306 } else {
4307 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4308 & TARGET_PAGE_MASK)
4309 + section.offset_within_region)
4310 + (addr & ~TARGET_PAGE_MASK);
4311 stq_p(ptr, val);
4312 }
4313 }
4314
4315 /* warning: addr must be aligned */
4316 static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4317 enum device_endian endian)
4318 {
4319 int io_index;
4320 uint8_t *ptr;
4321 MemoryRegionSection section;
4322
4323 section = phys_page_find(addr >> TARGET_PAGE_BITS);
4324
4325 if (!memory_region_is_ram(section.mr) || section.readonly) {
4326 if (memory_region_is_ram(section.mr)) {
4327 io_index = io_mem_rom.ram_addr;
4328 } else {
4329 io_index = memory_region_get_ram_addr(section.mr)
4330 & (IO_MEM_NB_ENTRIES - 1);
4331 }
4332 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
4333 #if defined(TARGET_WORDS_BIGENDIAN)
4334 if (endian == DEVICE_LITTLE_ENDIAN) {
4335 val = bswap32(val);
4336 }
4337 #else
4338 if (endian == DEVICE_BIG_ENDIAN) {
4339 val = bswap32(val);
4340 }
4341 #endif
4342 io_mem_write(io_index, addr, val, 4);
4343 } else {
4344 unsigned long addr1;
4345 addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
4346 + section.offset_within_region
4347 + (addr & ~TARGET_PAGE_MASK);
4348 /* RAM case */
4349 ptr = qemu_get_ram_ptr(addr1);
4350 switch (endian) {
4351 case DEVICE_LITTLE_ENDIAN:
4352 stl_le_p(ptr, val);
4353 break;
4354 case DEVICE_BIG_ENDIAN:
4355 stl_be_p(ptr, val);
4356 break;
4357 default:
4358 stl_p(ptr, val);
4359 break;
4360 }
4361 if (!cpu_physical_memory_is_dirty(addr1)) {
4362 /* invalidate code */
4363 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4364 /* set dirty bit */
4365 cpu_physical_memory_set_dirty_flags(addr1,
4366 (0xff & ~CODE_DIRTY_FLAG));
4367 }
4368 }
4369 }
4370
4371 void stl_phys(target_phys_addr_t addr, uint32_t val)
4372 {
4373 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4374 }
4375
4376 void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4377 {
4378 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4379 }
4380
4381 void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4382 {
4383 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4384 }
4385
4386 /* XXX: optimize */
4387 void stb_phys(target_phys_addr_t addr, uint32_t val)
4388 {
4389 uint8_t v = val;
4390 cpu_physical_memory_write(addr, &v, 1);
4391 }
4392
4393 /* warning: addr must be aligned */
4394 static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4395 enum device_endian endian)
4396 {
4397 int io_index;
4398 uint8_t *ptr;
4399 MemoryRegionSection section;
4400
4401 section = phys_page_find(addr >> TARGET_PAGE_BITS);
4402
4403 if (!memory_region_is_ram(section.mr) || section.readonly) {
4404 if (memory_region_is_ram(section.mr)) {
4405 io_index = io_mem_rom.ram_addr;
4406 } else {
4407 io_index = memory_region_get_ram_addr(section.mr)
4408 & (IO_MEM_NB_ENTRIES - 1);
4409 }
4410 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
4411 #if defined(TARGET_WORDS_BIGENDIAN)
4412 if (endian == DEVICE_LITTLE_ENDIAN) {
4413 val = bswap16(val);
4414 }
4415 #else
4416 if (endian == DEVICE_BIG_ENDIAN) {
4417 val = bswap16(val);
4418 }
4419 #endif
4420 io_mem_write(io_index, addr, val, 2);
4421 } else {
4422 unsigned long addr1;
4423 addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
4424 + section.offset_within_region + (addr & ~TARGET_PAGE_MASK);
4425 /* RAM case */
4426 ptr = qemu_get_ram_ptr(addr1);
4427 switch (endian) {
4428 case DEVICE_LITTLE_ENDIAN:
4429 stw_le_p(ptr, val);
4430 break;
4431 case DEVICE_BIG_ENDIAN:
4432 stw_be_p(ptr, val);
4433 break;
4434 default:
4435 stw_p(ptr, val);
4436 break;
4437 }
4438 if (!cpu_physical_memory_is_dirty(addr1)) {
4439 /* invalidate code */
4440 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4441 /* set dirty bit */
4442 cpu_physical_memory_set_dirty_flags(addr1,
4443 (0xff & ~CODE_DIRTY_FLAG));
4444 }
4445 }
4446 }
4447
4448 void stw_phys(target_phys_addr_t addr, uint32_t val)
4449 {
4450 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4451 }
4452
4453 void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4454 {
4455 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4456 }
4457
4458 void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4459 {
4460 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4461 }
4462
4463 /* XXX: optimize */
4464 void stq_phys(target_phys_addr_t addr, uint64_t val)
4465 {
4466 val = tswap64(val);
4467 cpu_physical_memory_write(addr, &val, 8);
4468 }
4469
4470 void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4471 {
4472 val = cpu_to_le64(val);
4473 cpu_physical_memory_write(addr, &val, 8);
4474 }
4475
4476 void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4477 {
4478 val = cpu_to_be64(val);
4479 cpu_physical_memory_write(addr, &val, 8);
4480 }
4481
4482 /* virtual memory access for debug (includes writing to ROM) */
4483 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4484 uint8_t *buf, int len, int is_write)
4485 {
4486 int l;
4487 target_phys_addr_t phys_addr;
4488 target_ulong page;
4489
4490 while (len > 0) {
4491 page = addr & TARGET_PAGE_MASK;
4492 phys_addr = cpu_get_phys_page_debug(env, page);
4493 /* if no physical page mapped, return an error */
4494 if (phys_addr == -1)
4495 return -1;
4496 l = (page + TARGET_PAGE_SIZE) - addr;
4497 if (l > len)
4498 l = len;
4499 phys_addr += (addr & ~TARGET_PAGE_MASK);
4500 if (is_write)
4501 cpu_physical_memory_write_rom(phys_addr, buf, l);
4502 else
4503 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4504 len -= l;
4505 buf += l;
4506 addr += l;
4507 }
4508 return 0;
4509 }
4510 #endif
4511
4512 /* in deterministic execution mode, instructions doing device I/Os
4513 must be at the end of the TB */
4514 void cpu_io_recompile(CPUState *env, void *retaddr)
4515 {
4516 TranslationBlock *tb;
4517 uint32_t n, cflags;
4518 target_ulong pc, cs_base;
4519 uint64_t flags;
4520
4521 tb = tb_find_pc((unsigned long)retaddr);
4522 if (!tb) {
4523 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4524 retaddr);
4525 }
4526 n = env->icount_decr.u16.low + tb->icount;
4527 cpu_restore_state(tb, env, (unsigned long)retaddr);
4528 /* Calculate how many instructions had been executed before the fault
4529 occurred. */
4530 n = n - env->icount_decr.u16.low;
4531 /* Generate a new TB ending on the I/O insn. */
4532 n++;
4533 /* On MIPS and SH, delay slot instructions can only be restarted if
4534 they were already the first instruction in the TB. If this is not
4535 the first instruction in a TB then re-execute the preceding
4536 branch. */
4537 #if defined(TARGET_MIPS)
4538 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4539 env->active_tc.PC -= 4;
4540 env->icount_decr.u16.low++;
4541 env->hflags &= ~MIPS_HFLAG_BMASK;
4542 }
4543 #elif defined(TARGET_SH4)
4544 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4545 && n > 1) {
4546 env->pc -= 2;
4547 env->icount_decr.u16.low++;
4548 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4549 }
4550 #endif
4551 /* This should never happen. */
4552 if (n > CF_COUNT_MASK)
4553 cpu_abort(env, "TB too big during recompile");
4554
4555 cflags = n | CF_LAST_IO;
4556 pc = tb->pc;
4557 cs_base = tb->cs_base;
4558 flags = tb->flags;
4559 tb_phys_invalidate(tb, -1);
4560 /* FIXME: In theory this could raise an exception. In practice
4561 we have already translated the block once so it's probably ok. */
4562 tb_gen_code(env, pc, cs_base, flags, cflags);
4563 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4564 the first in the TB) then we end up generating a whole new TB and
4565 repeating the fault, which is horribly inefficient.
4566 Better would be to execute just this insn uncached, or generate a
4567 second new TB. */
4568 cpu_resume_from_signal(env, NULL);
4569 }
4570
4571 #if !defined(CONFIG_USER_ONLY)
4572
4573 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4574 {
4575 int i, target_code_size, max_target_code_size;
4576 int direct_jmp_count, direct_jmp2_count, cross_page;
4577 TranslationBlock *tb;
4578
4579 target_code_size = 0;
4580 max_target_code_size = 0;
4581 cross_page = 0;
4582 direct_jmp_count = 0;
4583 direct_jmp2_count = 0;
4584 for(i = 0; i < nb_tbs; i++) {
4585 tb = &tbs[i];
4586 target_code_size += tb->size;
4587 if (tb->size > max_target_code_size)
4588 max_target_code_size = tb->size;
4589 if (tb->page_addr[1] != -1)
4590 cross_page++;
4591 if (tb->tb_next_offset[0] != 0xffff) {
4592 direct_jmp_count++;
4593 if (tb->tb_next_offset[1] != 0xffff) {
4594 direct_jmp2_count++;
4595 }
4596 }
4597 }
4598 /* XXX: avoid using doubles ? */
4599 cpu_fprintf(f, "Translation buffer state:\n");
4600 cpu_fprintf(f, "gen code size %td/%ld\n",
4601 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4602 cpu_fprintf(f, "TB count %d/%d\n",
4603 nb_tbs, code_gen_max_blocks);
4604 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4605 nb_tbs ? target_code_size / nb_tbs : 0,
4606 max_target_code_size);
4607 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4608 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4609 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4610 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4611 cross_page,
4612 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4613 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4614 direct_jmp_count,
4615 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4616 direct_jmp2_count,
4617 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4618 cpu_fprintf(f, "\nStatistics:\n");
4619 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4620 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4621 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4622 tcg_dump_info(f, cpu_fprintf);
4623 }
4624
4625 /* NOTE: this function can trigger an exception */
4626 /* NOTE2: the returned address is not exactly the physical address: it
4627 is the offset relative to phys_ram_base */
4628 tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4629 {
4630 int mmu_idx, page_index, pd;
4631 void *p;
4632
4633 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4634 mmu_idx = cpu_mmu_index(env1);
4635 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4636 (addr & TARGET_PAGE_MASK))) {
4637 ldub_code(addr);
4638 }
4639 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
4640 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
4641 && !io_mem_region[pd]->rom_device) {
4642 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4643 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4644 #else
4645 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4646 #endif
4647 }
4648 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4649 return qemu_ram_addr_from_host_nofail(p);
4650 }
4651
4652 /*
4653 * A helper function for the _utterly broken_ virtio device model to find out if
4654 * it's running on a big endian machine. Don't do this at home kids!
4655 */
4656 bool virtio_is_big_endian(void);
4657 bool virtio_is_big_endian(void)
4658 {
4659 #if defined(TARGET_WORDS_BIGENDIAN)
4660 return true;
4661 #else
4662 return false;
4663 #endif
4664 }
4665
4666 #define MMUSUFFIX _cmmu
4667 #undef GETPC
4668 #define GETPC() NULL
4669 #define env cpu_single_env
4670 #define SOFTMMU_CODE_ACCESS
4671
4672 #define SHIFT 0
4673 #include "softmmu_template.h"
4674
4675 #define SHIFT 1
4676 #include "softmmu_template.h"
4677
4678 #define SHIFT 2
4679 #include "softmmu_template.h"
4680
4681 #define SHIFT 3
4682 #include "softmmu_template.h"
4683
4684 #undef env
4685
4686 #endif