]> git.proxmox.com Git - qemu.git/blob - exec.c
Drop IO_MEM_ROMD
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "tcg.h"
30 #include "hw/hw.h"
31 #include "hw/qdev.h"
32 #include "osdep.h"
33 #include "kvm.h"
34 #include "hw/xen.h"
35 #include "qemu-timer.h"
36 #include "memory.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
39 #include <qemu.h>
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
45 #include <sys/time.h>
46 #include <sys/proc.h>
47 #include <machine/profile.h>
48 #define _KERNEL
49 #include <sys/user.h>
50 #undef _KERNEL
51 #undef sigqueue
52 #include <libutil.h>
53 #endif
54 #endif
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
57 #include "trace.h"
58 #endif
59
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
62
63 //#define DEBUG_TB_INVALIDATE
64 //#define DEBUG_FLUSH
65 //#define DEBUG_TLB
66 //#define DEBUG_UNASSIGNED
67
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
71
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
74
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
77 #undef DEBUG_TB_CHECK
78 #endif
79
80 #define SMC_BITMAP_USE_THRESHOLD 10
81
82 static TranslationBlock *tbs;
83 static int code_gen_max_blocks;
84 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 static int nb_tbs;
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
96 #elif defined(_WIN32)
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
100 #else
101 #define code_gen_section \
102 __attribute__((aligned (32)))
103 #endif
104
105 uint8_t code_gen_prologue[1024] code_gen_section;
106 static uint8_t *code_gen_buffer;
107 static unsigned long code_gen_buffer_size;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size;
110 static uint8_t *code_gen_ptr;
111
112 #if !defined(CONFIG_USER_ONLY)
113 int phys_ram_fd;
114 static int in_migration;
115
116 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
117
118 static MemoryRegion *system_memory;
119 static MemoryRegion *system_io;
120
121 MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
122 static MemoryRegion io_mem_subpage_ram;
123
124 #endif
125
126 CPUState *first_cpu;
127 /* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
129 DEFINE_TLS(CPUState *,cpu_single_env);
130 /* 0 = Do not count executed instructions.
131 1 = Precise instruction counting.
132 2 = Adaptive rate instruction counting. */
133 int use_icount = 0;
134
135 typedef struct PageDesc {
136 /* list of TBs intersecting this ram page */
137 TranslationBlock *first_tb;
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142 #if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144 #endif
145 } PageDesc;
146
147 /* In system mode we want L1_MAP to be based on ram offsets,
148 while in user mode we want it to be based on virtual addresses. */
149 #if !defined(CONFIG_USER_ONLY)
150 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152 #else
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
154 #endif
155 #else
156 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
157 #endif
158
159 /* Size of the L2 (and L3, etc) page tables. */
160 #define L2_BITS 10
161 #define L2_SIZE (1 << L2_BITS)
162
163 /* The bits remaining after N lower levels of page tables. */
164 #define P_L1_BITS_REM \
165 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166 #define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
169 /* Size of the L1 page table. Avoid silly small sizes. */
170 #if P_L1_BITS_REM < 4
171 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
172 #else
173 #define P_L1_BITS P_L1_BITS_REM
174 #endif
175
176 #if V_L1_BITS_REM < 4
177 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
178 #else
179 #define V_L1_BITS V_L1_BITS_REM
180 #endif
181
182 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
183 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
184
185 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
186 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
187
188 unsigned long qemu_real_host_page_size;
189 unsigned long qemu_host_page_size;
190 unsigned long qemu_host_page_mask;
191
192 /* This is a multi-level map on the virtual address space.
193 The bottom level has pointers to PageDesc. */
194 static void *l1_map[V_L1_SIZE];
195
196 #if !defined(CONFIG_USER_ONLY)
197 typedef struct PhysPageDesc {
198 /* offset in host memory of the page + io_index in the low bits */
199 ram_addr_t phys_offset;
200 ram_addr_t region_offset;
201 } PhysPageDesc;
202
203 /* This is a multi-level map on the physical address space.
204 The bottom level has pointers to PhysPageDesc. */
205 static void *l1_phys_map[P_L1_SIZE];
206
207 static void io_mem_init(void);
208 static void memory_map_init(void);
209
210 /* io memory support */
211 MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
212 static char io_mem_used[IO_MEM_NB_ENTRIES];
213 static MemoryRegion io_mem_watch;
214 #endif
215
216 /* log support */
217 #ifdef WIN32
218 static const char *logfilename = "qemu.log";
219 #else
220 static const char *logfilename = "/tmp/qemu.log";
221 #endif
222 FILE *logfile;
223 int loglevel;
224 static int log_append = 0;
225
226 /* statistics */
227 #if !defined(CONFIG_USER_ONLY)
228 static int tlb_flush_count;
229 #endif
230 static int tb_flush_count;
231 static int tb_phys_invalidate_count;
232
233 #ifdef _WIN32
234 static void map_exec(void *addr, long size)
235 {
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240 }
241 #else
242 static void map_exec(void *addr, long size)
243 {
244 unsigned long start, end, page_size;
245
246 page_size = getpagesize();
247 start = (unsigned long)addr;
248 start &= ~(page_size - 1);
249
250 end = (unsigned long)addr + size;
251 end += page_size - 1;
252 end &= ~(page_size - 1);
253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256 }
257 #endif
258
259 static void page_init(void)
260 {
261 /* NOTE: we can always suppose that qemu_host_page_size >=
262 TARGET_PAGE_SIZE */
263 #ifdef _WIN32
264 {
265 SYSTEM_INFO system_info;
266
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
269 }
270 #else
271 qemu_real_host_page_size = getpagesize();
272 #endif
273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
277 qemu_host_page_mask = ~(qemu_host_page_size - 1);
278
279 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
280 {
281 #ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry *freep;
283 int i, cnt;
284
285 freep = kinfo_getvmmap(getpid(), &cnt);
286 if (freep) {
287 mmap_lock();
288 for (i = 0; i < cnt; i++) {
289 unsigned long startaddr, endaddr;
290
291 startaddr = freep[i].kve_start;
292 endaddr = freep[i].kve_end;
293 if (h2g_valid(startaddr)) {
294 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295
296 if (h2g_valid(endaddr)) {
297 endaddr = h2g(endaddr);
298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
299 } else {
300 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 endaddr = ~0ul;
302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
303 #endif
304 }
305 }
306 }
307 free(freep);
308 mmap_unlock();
309 }
310 #else
311 FILE *f;
312
313 last_brk = (unsigned long)sbrk(0);
314
315 f = fopen("/compat/linux/proc/self/maps", "r");
316 if (f) {
317 mmap_lock();
318
319 do {
320 unsigned long startaddr, endaddr;
321 int n;
322
323 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324
325 if (n == 2 && h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 } else {
331 endaddr = ~0ul;
332 }
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
334 }
335 } while (!feof(f));
336
337 fclose(f);
338 mmap_unlock();
339 }
340 #endif
341 }
342 #endif
343 }
344
345 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
346 {
347 PageDesc *pd;
348 void **lp;
349 int i;
350
351 #if defined(CONFIG_USER_ONLY)
352 /* We can't use g_malloc because it may recurse into a locked mutex. */
353 # define ALLOC(P, SIZE) \
354 do { \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
357 } while (0)
358 #else
359 # define ALLOC(P, SIZE) \
360 do { P = g_malloc0(SIZE); } while (0)
361 #endif
362
363 /* Level 1. Always allocated. */
364 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365
366 /* Level 2..N-1. */
367 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 void **p = *lp;
369
370 if (p == NULL) {
371 if (!alloc) {
372 return NULL;
373 }
374 ALLOC(p, sizeof(void *) * L2_SIZE);
375 *lp = p;
376 }
377
378 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
379 }
380
381 pd = *lp;
382 if (pd == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 *lp = pd;
388 }
389
390 #undef ALLOC
391
392 return pd + (index & (L2_SIZE - 1));
393 }
394
395 static inline PageDesc *page_find(tb_page_addr_t index)
396 {
397 return page_find_alloc(index, 0);
398 }
399
400 #if !defined(CONFIG_USER_ONLY)
401 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
402 {
403 PhysPageDesc *pd;
404 void **lp;
405 int i;
406
407 /* Level 1. Always allocated. */
408 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
409
410 /* Level 2..N-1. */
411 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
412 void **p = *lp;
413 if (p == NULL) {
414 if (!alloc) {
415 return NULL;
416 }
417 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
418 }
419 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
420 }
421
422 pd = *lp;
423 if (pd == NULL) {
424 int i;
425 int first_index = index & ~(L2_SIZE - 1);
426
427 if (!alloc) {
428 return NULL;
429 }
430
431 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
432
433 for (i = 0; i < L2_SIZE; i++) {
434 pd[i].phys_offset = io_mem_unassigned.ram_addr;
435 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
436 }
437 }
438
439 return pd + (index & (L2_SIZE - 1));
440 }
441
442 static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
443 {
444 PhysPageDesc *p = phys_page_find_alloc(index, 0);
445
446 if (p) {
447 return *p;
448 } else {
449 return (PhysPageDesc) {
450 .phys_offset = io_mem_unassigned.ram_addr,
451 .region_offset = index << TARGET_PAGE_BITS,
452 };
453 }
454 }
455
456 static void tlb_protect_code(ram_addr_t ram_addr);
457 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
458 target_ulong vaddr);
459 #define mmap_lock() do { } while(0)
460 #define mmap_unlock() do { } while(0)
461 #endif
462
463 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
464
465 #if defined(CONFIG_USER_ONLY)
466 /* Currently it is not recommended to allocate big chunks of data in
467 user mode. It will change when a dedicated libc will be used */
468 #define USE_STATIC_CODE_GEN_BUFFER
469 #endif
470
471 #ifdef USE_STATIC_CODE_GEN_BUFFER
472 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
473 __attribute__((aligned (CODE_GEN_ALIGN)));
474 #endif
475
476 static void code_gen_alloc(unsigned long tb_size)
477 {
478 #ifdef USE_STATIC_CODE_GEN_BUFFER
479 code_gen_buffer = static_code_gen_buffer;
480 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
481 map_exec(code_gen_buffer, code_gen_buffer_size);
482 #else
483 code_gen_buffer_size = tb_size;
484 if (code_gen_buffer_size == 0) {
485 #if defined(CONFIG_USER_ONLY)
486 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
487 #else
488 /* XXX: needs adjustments */
489 code_gen_buffer_size = (unsigned long)(ram_size / 4);
490 #endif
491 }
492 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
493 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
494 /* The code gen buffer location may have constraints depending on
495 the host cpu and OS */
496 #if defined(__linux__)
497 {
498 int flags;
499 void *start = NULL;
500
501 flags = MAP_PRIVATE | MAP_ANONYMOUS;
502 #if defined(__x86_64__)
503 flags |= MAP_32BIT;
504 /* Cannot map more than that */
505 if (code_gen_buffer_size > (800 * 1024 * 1024))
506 code_gen_buffer_size = (800 * 1024 * 1024);
507 #elif defined(__sparc_v9__)
508 // Map the buffer below 2G, so we can use direct calls and branches
509 flags |= MAP_FIXED;
510 start = (void *) 0x60000000UL;
511 if (code_gen_buffer_size > (512 * 1024 * 1024))
512 code_gen_buffer_size = (512 * 1024 * 1024);
513 #elif defined(__arm__)
514 /* Keep the buffer no bigger than 16GB to branch between blocks */
515 if (code_gen_buffer_size > 16 * 1024 * 1024)
516 code_gen_buffer_size = 16 * 1024 * 1024;
517 #elif defined(__s390x__)
518 /* Map the buffer so that we can use direct calls and branches. */
519 /* We have a +- 4GB range on the branches; leave some slop. */
520 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
521 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
522 }
523 start = (void *)0x90000000UL;
524 #endif
525 code_gen_buffer = mmap(start, code_gen_buffer_size,
526 PROT_WRITE | PROT_READ | PROT_EXEC,
527 flags, -1, 0);
528 if (code_gen_buffer == MAP_FAILED) {
529 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
530 exit(1);
531 }
532 }
533 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
534 || defined(__DragonFly__) || defined(__OpenBSD__) \
535 || defined(__NetBSD__)
536 {
537 int flags;
538 void *addr = NULL;
539 flags = MAP_PRIVATE | MAP_ANONYMOUS;
540 #if defined(__x86_64__)
541 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
542 * 0x40000000 is free */
543 flags |= MAP_FIXED;
544 addr = (void *)0x40000000;
545 /* Cannot map more than that */
546 if (code_gen_buffer_size > (800 * 1024 * 1024))
547 code_gen_buffer_size = (800 * 1024 * 1024);
548 #elif defined(__sparc_v9__)
549 // Map the buffer below 2G, so we can use direct calls and branches
550 flags |= MAP_FIXED;
551 addr = (void *) 0x60000000UL;
552 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
553 code_gen_buffer_size = (512 * 1024 * 1024);
554 }
555 #endif
556 code_gen_buffer = mmap(addr, code_gen_buffer_size,
557 PROT_WRITE | PROT_READ | PROT_EXEC,
558 flags, -1, 0);
559 if (code_gen_buffer == MAP_FAILED) {
560 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
561 exit(1);
562 }
563 }
564 #else
565 code_gen_buffer = g_malloc(code_gen_buffer_size);
566 map_exec(code_gen_buffer, code_gen_buffer_size);
567 #endif
568 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
569 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
570 code_gen_buffer_max_size = code_gen_buffer_size -
571 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
572 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
573 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
574 }
575
576 /* Must be called before using the QEMU cpus. 'tb_size' is the size
577 (in bytes) allocated to the translation buffer. Zero means default
578 size. */
579 void tcg_exec_init(unsigned long tb_size)
580 {
581 cpu_gen_init();
582 code_gen_alloc(tb_size);
583 code_gen_ptr = code_gen_buffer;
584 page_init();
585 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
586 /* There's no guest base to take into account, so go ahead and
587 initialize the prologue now. */
588 tcg_prologue_init(&tcg_ctx);
589 #endif
590 }
591
592 bool tcg_enabled(void)
593 {
594 return code_gen_buffer != NULL;
595 }
596
597 void cpu_exec_init_all(void)
598 {
599 #if !defined(CONFIG_USER_ONLY)
600 memory_map_init();
601 io_mem_init();
602 #endif
603 }
604
605 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
606
607 static int cpu_common_post_load(void *opaque, int version_id)
608 {
609 CPUState *env = opaque;
610
611 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
612 version_id is increased. */
613 env->interrupt_request &= ~0x01;
614 tlb_flush(env, 1);
615
616 return 0;
617 }
618
619 static const VMStateDescription vmstate_cpu_common = {
620 .name = "cpu_common",
621 .version_id = 1,
622 .minimum_version_id = 1,
623 .minimum_version_id_old = 1,
624 .post_load = cpu_common_post_load,
625 .fields = (VMStateField []) {
626 VMSTATE_UINT32(halted, CPUState),
627 VMSTATE_UINT32(interrupt_request, CPUState),
628 VMSTATE_END_OF_LIST()
629 }
630 };
631 #endif
632
633 CPUState *qemu_get_cpu(int cpu)
634 {
635 CPUState *env = first_cpu;
636
637 while (env) {
638 if (env->cpu_index == cpu)
639 break;
640 env = env->next_cpu;
641 }
642
643 return env;
644 }
645
646 void cpu_exec_init(CPUState *env)
647 {
648 CPUState **penv;
649 int cpu_index;
650
651 #if defined(CONFIG_USER_ONLY)
652 cpu_list_lock();
653 #endif
654 env->next_cpu = NULL;
655 penv = &first_cpu;
656 cpu_index = 0;
657 while (*penv != NULL) {
658 penv = &(*penv)->next_cpu;
659 cpu_index++;
660 }
661 env->cpu_index = cpu_index;
662 env->numa_node = 0;
663 QTAILQ_INIT(&env->breakpoints);
664 QTAILQ_INIT(&env->watchpoints);
665 #ifndef CONFIG_USER_ONLY
666 env->thread_id = qemu_get_thread_id();
667 #endif
668 *penv = env;
669 #if defined(CONFIG_USER_ONLY)
670 cpu_list_unlock();
671 #endif
672 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
673 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
674 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
675 cpu_save, cpu_load, env);
676 #endif
677 }
678
679 /* Allocate a new translation block. Flush the translation buffer if
680 too many translation blocks or too much generated code. */
681 static TranslationBlock *tb_alloc(target_ulong pc)
682 {
683 TranslationBlock *tb;
684
685 if (nb_tbs >= code_gen_max_blocks ||
686 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
687 return NULL;
688 tb = &tbs[nb_tbs++];
689 tb->pc = pc;
690 tb->cflags = 0;
691 return tb;
692 }
693
694 void tb_free(TranslationBlock *tb)
695 {
696 /* In practice this is mostly used for single use temporary TB
697 Ignore the hard cases and just back up if this TB happens to
698 be the last one generated. */
699 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
700 code_gen_ptr = tb->tc_ptr;
701 nb_tbs--;
702 }
703 }
704
705 static inline void invalidate_page_bitmap(PageDesc *p)
706 {
707 if (p->code_bitmap) {
708 g_free(p->code_bitmap);
709 p->code_bitmap = NULL;
710 }
711 p->code_write_count = 0;
712 }
713
714 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
715
716 static void page_flush_tb_1 (int level, void **lp)
717 {
718 int i;
719
720 if (*lp == NULL) {
721 return;
722 }
723 if (level == 0) {
724 PageDesc *pd = *lp;
725 for (i = 0; i < L2_SIZE; ++i) {
726 pd[i].first_tb = NULL;
727 invalidate_page_bitmap(pd + i);
728 }
729 } else {
730 void **pp = *lp;
731 for (i = 0; i < L2_SIZE; ++i) {
732 page_flush_tb_1 (level - 1, pp + i);
733 }
734 }
735 }
736
737 static void page_flush_tb(void)
738 {
739 int i;
740 for (i = 0; i < V_L1_SIZE; i++) {
741 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
742 }
743 }
744
745 /* flush all the translation blocks */
746 /* XXX: tb_flush is currently not thread safe */
747 void tb_flush(CPUState *env1)
748 {
749 CPUState *env;
750 #if defined(DEBUG_FLUSH)
751 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
752 (unsigned long)(code_gen_ptr - code_gen_buffer),
753 nb_tbs, nb_tbs > 0 ?
754 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
755 #endif
756 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
757 cpu_abort(env1, "Internal error: code buffer overflow\n");
758
759 nb_tbs = 0;
760
761 for(env = first_cpu; env != NULL; env = env->next_cpu) {
762 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
763 }
764
765 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
766 page_flush_tb();
767
768 code_gen_ptr = code_gen_buffer;
769 /* XXX: flush processor icache at this point if cache flush is
770 expensive */
771 tb_flush_count++;
772 }
773
774 #ifdef DEBUG_TB_CHECK
775
776 static void tb_invalidate_check(target_ulong address)
777 {
778 TranslationBlock *tb;
779 int i;
780 address &= TARGET_PAGE_MASK;
781 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
782 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
783 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
784 address >= tb->pc + tb->size)) {
785 printf("ERROR invalidate: address=" TARGET_FMT_lx
786 " PC=%08lx size=%04x\n",
787 address, (long)tb->pc, tb->size);
788 }
789 }
790 }
791 }
792
793 /* verify that all the pages have correct rights for code */
794 static void tb_page_check(void)
795 {
796 TranslationBlock *tb;
797 int i, flags1, flags2;
798
799 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
800 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
801 flags1 = page_get_flags(tb->pc);
802 flags2 = page_get_flags(tb->pc + tb->size - 1);
803 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
804 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
805 (long)tb->pc, tb->size, flags1, flags2);
806 }
807 }
808 }
809 }
810
811 #endif
812
813 /* invalidate one TB */
814 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
815 int next_offset)
816 {
817 TranslationBlock *tb1;
818 for(;;) {
819 tb1 = *ptb;
820 if (tb1 == tb) {
821 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
822 break;
823 }
824 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
825 }
826 }
827
828 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
829 {
830 TranslationBlock *tb1;
831 unsigned int n1;
832
833 for(;;) {
834 tb1 = *ptb;
835 n1 = (long)tb1 & 3;
836 tb1 = (TranslationBlock *)((long)tb1 & ~3);
837 if (tb1 == tb) {
838 *ptb = tb1->page_next[n1];
839 break;
840 }
841 ptb = &tb1->page_next[n1];
842 }
843 }
844
845 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
846 {
847 TranslationBlock *tb1, **ptb;
848 unsigned int n1;
849
850 ptb = &tb->jmp_next[n];
851 tb1 = *ptb;
852 if (tb1) {
853 /* find tb(n) in circular list */
854 for(;;) {
855 tb1 = *ptb;
856 n1 = (long)tb1 & 3;
857 tb1 = (TranslationBlock *)((long)tb1 & ~3);
858 if (n1 == n && tb1 == tb)
859 break;
860 if (n1 == 2) {
861 ptb = &tb1->jmp_first;
862 } else {
863 ptb = &tb1->jmp_next[n1];
864 }
865 }
866 /* now we can suppress tb(n) from the list */
867 *ptb = tb->jmp_next[n];
868
869 tb->jmp_next[n] = NULL;
870 }
871 }
872
873 /* reset the jump entry 'n' of a TB so that it is not chained to
874 another TB */
875 static inline void tb_reset_jump(TranslationBlock *tb, int n)
876 {
877 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
878 }
879
880 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
881 {
882 CPUState *env;
883 PageDesc *p;
884 unsigned int h, n1;
885 tb_page_addr_t phys_pc;
886 TranslationBlock *tb1, *tb2;
887
888 /* remove the TB from the hash list */
889 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
890 h = tb_phys_hash_func(phys_pc);
891 tb_remove(&tb_phys_hash[h], tb,
892 offsetof(TranslationBlock, phys_hash_next));
893
894 /* remove the TB from the page list */
895 if (tb->page_addr[0] != page_addr) {
896 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
897 tb_page_remove(&p->first_tb, tb);
898 invalidate_page_bitmap(p);
899 }
900 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
901 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
902 tb_page_remove(&p->first_tb, tb);
903 invalidate_page_bitmap(p);
904 }
905
906 tb_invalidated_flag = 1;
907
908 /* remove the TB from the hash list */
909 h = tb_jmp_cache_hash_func(tb->pc);
910 for(env = first_cpu; env != NULL; env = env->next_cpu) {
911 if (env->tb_jmp_cache[h] == tb)
912 env->tb_jmp_cache[h] = NULL;
913 }
914
915 /* suppress this TB from the two jump lists */
916 tb_jmp_remove(tb, 0);
917 tb_jmp_remove(tb, 1);
918
919 /* suppress any remaining jumps to this TB */
920 tb1 = tb->jmp_first;
921 for(;;) {
922 n1 = (long)tb1 & 3;
923 if (n1 == 2)
924 break;
925 tb1 = (TranslationBlock *)((long)tb1 & ~3);
926 tb2 = tb1->jmp_next[n1];
927 tb_reset_jump(tb1, n1);
928 tb1->jmp_next[n1] = NULL;
929 tb1 = tb2;
930 }
931 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
932
933 tb_phys_invalidate_count++;
934 }
935
936 static inline void set_bits(uint8_t *tab, int start, int len)
937 {
938 int end, mask, end1;
939
940 end = start + len;
941 tab += start >> 3;
942 mask = 0xff << (start & 7);
943 if ((start & ~7) == (end & ~7)) {
944 if (start < end) {
945 mask &= ~(0xff << (end & 7));
946 *tab |= mask;
947 }
948 } else {
949 *tab++ |= mask;
950 start = (start + 8) & ~7;
951 end1 = end & ~7;
952 while (start < end1) {
953 *tab++ = 0xff;
954 start += 8;
955 }
956 if (start < end) {
957 mask = ~(0xff << (end & 7));
958 *tab |= mask;
959 }
960 }
961 }
962
963 static void build_page_bitmap(PageDesc *p)
964 {
965 int n, tb_start, tb_end;
966 TranslationBlock *tb;
967
968 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
969
970 tb = p->first_tb;
971 while (tb != NULL) {
972 n = (long)tb & 3;
973 tb = (TranslationBlock *)((long)tb & ~3);
974 /* NOTE: this is subtle as a TB may span two physical pages */
975 if (n == 0) {
976 /* NOTE: tb_end may be after the end of the page, but
977 it is not a problem */
978 tb_start = tb->pc & ~TARGET_PAGE_MASK;
979 tb_end = tb_start + tb->size;
980 if (tb_end > TARGET_PAGE_SIZE)
981 tb_end = TARGET_PAGE_SIZE;
982 } else {
983 tb_start = 0;
984 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
985 }
986 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
987 tb = tb->page_next[n];
988 }
989 }
990
991 TranslationBlock *tb_gen_code(CPUState *env,
992 target_ulong pc, target_ulong cs_base,
993 int flags, int cflags)
994 {
995 TranslationBlock *tb;
996 uint8_t *tc_ptr;
997 tb_page_addr_t phys_pc, phys_page2;
998 target_ulong virt_page2;
999 int code_gen_size;
1000
1001 phys_pc = get_page_addr_code(env, pc);
1002 tb = tb_alloc(pc);
1003 if (!tb) {
1004 /* flush must be done */
1005 tb_flush(env);
1006 /* cannot fail at this point */
1007 tb = tb_alloc(pc);
1008 /* Don't forget to invalidate previous TB info. */
1009 tb_invalidated_flag = 1;
1010 }
1011 tc_ptr = code_gen_ptr;
1012 tb->tc_ptr = tc_ptr;
1013 tb->cs_base = cs_base;
1014 tb->flags = flags;
1015 tb->cflags = cflags;
1016 cpu_gen_code(env, tb, &code_gen_size);
1017 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1018
1019 /* check next page if needed */
1020 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1021 phys_page2 = -1;
1022 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1023 phys_page2 = get_page_addr_code(env, virt_page2);
1024 }
1025 tb_link_page(tb, phys_pc, phys_page2);
1026 return tb;
1027 }
1028
1029 /* invalidate all TBs which intersect with the target physical page
1030 starting in range [start;end[. NOTE: start and end must refer to
1031 the same physical page. 'is_cpu_write_access' should be true if called
1032 from a real cpu write access: the virtual CPU will exit the current
1033 TB if code is modified inside this TB. */
1034 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1035 int is_cpu_write_access)
1036 {
1037 TranslationBlock *tb, *tb_next, *saved_tb;
1038 CPUState *env = cpu_single_env;
1039 tb_page_addr_t tb_start, tb_end;
1040 PageDesc *p;
1041 int n;
1042 #ifdef TARGET_HAS_PRECISE_SMC
1043 int current_tb_not_found = is_cpu_write_access;
1044 TranslationBlock *current_tb = NULL;
1045 int current_tb_modified = 0;
1046 target_ulong current_pc = 0;
1047 target_ulong current_cs_base = 0;
1048 int current_flags = 0;
1049 #endif /* TARGET_HAS_PRECISE_SMC */
1050
1051 p = page_find(start >> TARGET_PAGE_BITS);
1052 if (!p)
1053 return;
1054 if (!p->code_bitmap &&
1055 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1056 is_cpu_write_access) {
1057 /* build code bitmap */
1058 build_page_bitmap(p);
1059 }
1060
1061 /* we remove all the TBs in the range [start, end[ */
1062 /* XXX: see if in some cases it could be faster to invalidate all the code */
1063 tb = p->first_tb;
1064 while (tb != NULL) {
1065 n = (long)tb & 3;
1066 tb = (TranslationBlock *)((long)tb & ~3);
1067 tb_next = tb->page_next[n];
1068 /* NOTE: this is subtle as a TB may span two physical pages */
1069 if (n == 0) {
1070 /* NOTE: tb_end may be after the end of the page, but
1071 it is not a problem */
1072 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1073 tb_end = tb_start + tb->size;
1074 } else {
1075 tb_start = tb->page_addr[1];
1076 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1077 }
1078 if (!(tb_end <= start || tb_start >= end)) {
1079 #ifdef TARGET_HAS_PRECISE_SMC
1080 if (current_tb_not_found) {
1081 current_tb_not_found = 0;
1082 current_tb = NULL;
1083 if (env->mem_io_pc) {
1084 /* now we have a real cpu fault */
1085 current_tb = tb_find_pc(env->mem_io_pc);
1086 }
1087 }
1088 if (current_tb == tb &&
1089 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1090 /* If we are modifying the current TB, we must stop
1091 its execution. We could be more precise by checking
1092 that the modification is after the current PC, but it
1093 would require a specialized function to partially
1094 restore the CPU state */
1095
1096 current_tb_modified = 1;
1097 cpu_restore_state(current_tb, env, env->mem_io_pc);
1098 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1099 &current_flags);
1100 }
1101 #endif /* TARGET_HAS_PRECISE_SMC */
1102 /* we need to do that to handle the case where a signal
1103 occurs while doing tb_phys_invalidate() */
1104 saved_tb = NULL;
1105 if (env) {
1106 saved_tb = env->current_tb;
1107 env->current_tb = NULL;
1108 }
1109 tb_phys_invalidate(tb, -1);
1110 if (env) {
1111 env->current_tb = saved_tb;
1112 if (env->interrupt_request && env->current_tb)
1113 cpu_interrupt(env, env->interrupt_request);
1114 }
1115 }
1116 tb = tb_next;
1117 }
1118 #if !defined(CONFIG_USER_ONLY)
1119 /* if no code remaining, no need to continue to use slow writes */
1120 if (!p->first_tb) {
1121 invalidate_page_bitmap(p);
1122 if (is_cpu_write_access) {
1123 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1124 }
1125 }
1126 #endif
1127 #ifdef TARGET_HAS_PRECISE_SMC
1128 if (current_tb_modified) {
1129 /* we generate a block containing just the instruction
1130 modifying the memory. It will ensure that it cannot modify
1131 itself */
1132 env->current_tb = NULL;
1133 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1134 cpu_resume_from_signal(env, NULL);
1135 }
1136 #endif
1137 }
1138
1139 /* len must be <= 8 and start must be a multiple of len */
1140 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1141 {
1142 PageDesc *p;
1143 int offset, b;
1144 #if 0
1145 if (1) {
1146 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1147 cpu_single_env->mem_io_vaddr, len,
1148 cpu_single_env->eip,
1149 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1150 }
1151 #endif
1152 p = page_find(start >> TARGET_PAGE_BITS);
1153 if (!p)
1154 return;
1155 if (p->code_bitmap) {
1156 offset = start & ~TARGET_PAGE_MASK;
1157 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1158 if (b & ((1 << len) - 1))
1159 goto do_invalidate;
1160 } else {
1161 do_invalidate:
1162 tb_invalidate_phys_page_range(start, start + len, 1);
1163 }
1164 }
1165
1166 #if !defined(CONFIG_SOFTMMU)
1167 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1168 unsigned long pc, void *puc)
1169 {
1170 TranslationBlock *tb;
1171 PageDesc *p;
1172 int n;
1173 #ifdef TARGET_HAS_PRECISE_SMC
1174 TranslationBlock *current_tb = NULL;
1175 CPUState *env = cpu_single_env;
1176 int current_tb_modified = 0;
1177 target_ulong current_pc = 0;
1178 target_ulong current_cs_base = 0;
1179 int current_flags = 0;
1180 #endif
1181
1182 addr &= TARGET_PAGE_MASK;
1183 p = page_find(addr >> TARGET_PAGE_BITS);
1184 if (!p)
1185 return;
1186 tb = p->first_tb;
1187 #ifdef TARGET_HAS_PRECISE_SMC
1188 if (tb && pc != 0) {
1189 current_tb = tb_find_pc(pc);
1190 }
1191 #endif
1192 while (tb != NULL) {
1193 n = (long)tb & 3;
1194 tb = (TranslationBlock *)((long)tb & ~3);
1195 #ifdef TARGET_HAS_PRECISE_SMC
1196 if (current_tb == tb &&
1197 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1198 /* If we are modifying the current TB, we must stop
1199 its execution. We could be more precise by checking
1200 that the modification is after the current PC, but it
1201 would require a specialized function to partially
1202 restore the CPU state */
1203
1204 current_tb_modified = 1;
1205 cpu_restore_state(current_tb, env, pc);
1206 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1207 &current_flags);
1208 }
1209 #endif /* TARGET_HAS_PRECISE_SMC */
1210 tb_phys_invalidate(tb, addr);
1211 tb = tb->page_next[n];
1212 }
1213 p->first_tb = NULL;
1214 #ifdef TARGET_HAS_PRECISE_SMC
1215 if (current_tb_modified) {
1216 /* we generate a block containing just the instruction
1217 modifying the memory. It will ensure that it cannot modify
1218 itself */
1219 env->current_tb = NULL;
1220 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1221 cpu_resume_from_signal(env, puc);
1222 }
1223 #endif
1224 }
1225 #endif
1226
1227 /* add the tb in the target page and protect it if necessary */
1228 static inline void tb_alloc_page(TranslationBlock *tb,
1229 unsigned int n, tb_page_addr_t page_addr)
1230 {
1231 PageDesc *p;
1232 #ifndef CONFIG_USER_ONLY
1233 bool page_already_protected;
1234 #endif
1235
1236 tb->page_addr[n] = page_addr;
1237 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1238 tb->page_next[n] = p->first_tb;
1239 #ifndef CONFIG_USER_ONLY
1240 page_already_protected = p->first_tb != NULL;
1241 #endif
1242 p->first_tb = (TranslationBlock *)((long)tb | n);
1243 invalidate_page_bitmap(p);
1244
1245 #if defined(TARGET_HAS_SMC) || 1
1246
1247 #if defined(CONFIG_USER_ONLY)
1248 if (p->flags & PAGE_WRITE) {
1249 target_ulong addr;
1250 PageDesc *p2;
1251 int prot;
1252
1253 /* force the host page as non writable (writes will have a
1254 page fault + mprotect overhead) */
1255 page_addr &= qemu_host_page_mask;
1256 prot = 0;
1257 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1258 addr += TARGET_PAGE_SIZE) {
1259
1260 p2 = page_find (addr >> TARGET_PAGE_BITS);
1261 if (!p2)
1262 continue;
1263 prot |= p2->flags;
1264 p2->flags &= ~PAGE_WRITE;
1265 }
1266 mprotect(g2h(page_addr), qemu_host_page_size,
1267 (prot & PAGE_BITS) & ~PAGE_WRITE);
1268 #ifdef DEBUG_TB_INVALIDATE
1269 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1270 page_addr);
1271 #endif
1272 }
1273 #else
1274 /* if some code is already present, then the pages are already
1275 protected. So we handle the case where only the first TB is
1276 allocated in a physical page */
1277 if (!page_already_protected) {
1278 tlb_protect_code(page_addr);
1279 }
1280 #endif
1281
1282 #endif /* TARGET_HAS_SMC */
1283 }
1284
1285 /* add a new TB and link it to the physical page tables. phys_page2 is
1286 (-1) to indicate that only one page contains the TB. */
1287 void tb_link_page(TranslationBlock *tb,
1288 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1289 {
1290 unsigned int h;
1291 TranslationBlock **ptb;
1292
1293 /* Grab the mmap lock to stop another thread invalidating this TB
1294 before we are done. */
1295 mmap_lock();
1296 /* add in the physical hash table */
1297 h = tb_phys_hash_func(phys_pc);
1298 ptb = &tb_phys_hash[h];
1299 tb->phys_hash_next = *ptb;
1300 *ptb = tb;
1301
1302 /* add in the page list */
1303 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1304 if (phys_page2 != -1)
1305 tb_alloc_page(tb, 1, phys_page2);
1306 else
1307 tb->page_addr[1] = -1;
1308
1309 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1310 tb->jmp_next[0] = NULL;
1311 tb->jmp_next[1] = NULL;
1312
1313 /* init original jump addresses */
1314 if (tb->tb_next_offset[0] != 0xffff)
1315 tb_reset_jump(tb, 0);
1316 if (tb->tb_next_offset[1] != 0xffff)
1317 tb_reset_jump(tb, 1);
1318
1319 #ifdef DEBUG_TB_CHECK
1320 tb_page_check();
1321 #endif
1322 mmap_unlock();
1323 }
1324
1325 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1326 tb[1].tc_ptr. Return NULL if not found */
1327 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1328 {
1329 int m_min, m_max, m;
1330 unsigned long v;
1331 TranslationBlock *tb;
1332
1333 if (nb_tbs <= 0)
1334 return NULL;
1335 if (tc_ptr < (unsigned long)code_gen_buffer ||
1336 tc_ptr >= (unsigned long)code_gen_ptr)
1337 return NULL;
1338 /* binary search (cf Knuth) */
1339 m_min = 0;
1340 m_max = nb_tbs - 1;
1341 while (m_min <= m_max) {
1342 m = (m_min + m_max) >> 1;
1343 tb = &tbs[m];
1344 v = (unsigned long)tb->tc_ptr;
1345 if (v == tc_ptr)
1346 return tb;
1347 else if (tc_ptr < v) {
1348 m_max = m - 1;
1349 } else {
1350 m_min = m + 1;
1351 }
1352 }
1353 return &tbs[m_max];
1354 }
1355
1356 static void tb_reset_jump_recursive(TranslationBlock *tb);
1357
1358 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1359 {
1360 TranslationBlock *tb1, *tb_next, **ptb;
1361 unsigned int n1;
1362
1363 tb1 = tb->jmp_next[n];
1364 if (tb1 != NULL) {
1365 /* find head of list */
1366 for(;;) {
1367 n1 = (long)tb1 & 3;
1368 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1369 if (n1 == 2)
1370 break;
1371 tb1 = tb1->jmp_next[n1];
1372 }
1373 /* we are now sure now that tb jumps to tb1 */
1374 tb_next = tb1;
1375
1376 /* remove tb from the jmp_first list */
1377 ptb = &tb_next->jmp_first;
1378 for(;;) {
1379 tb1 = *ptb;
1380 n1 = (long)tb1 & 3;
1381 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1382 if (n1 == n && tb1 == tb)
1383 break;
1384 ptb = &tb1->jmp_next[n1];
1385 }
1386 *ptb = tb->jmp_next[n];
1387 tb->jmp_next[n] = NULL;
1388
1389 /* suppress the jump to next tb in generated code */
1390 tb_reset_jump(tb, n);
1391
1392 /* suppress jumps in the tb on which we could have jumped */
1393 tb_reset_jump_recursive(tb_next);
1394 }
1395 }
1396
1397 static void tb_reset_jump_recursive(TranslationBlock *tb)
1398 {
1399 tb_reset_jump_recursive2(tb, 0);
1400 tb_reset_jump_recursive2(tb, 1);
1401 }
1402
1403 #if defined(TARGET_HAS_ICE)
1404 #if defined(CONFIG_USER_ONLY)
1405 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1406 {
1407 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1408 }
1409 #else
1410 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1411 {
1412 target_phys_addr_t addr;
1413 target_ulong pd;
1414 ram_addr_t ram_addr;
1415 PhysPageDesc p;
1416
1417 addr = cpu_get_phys_page_debug(env, pc);
1418 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1419 pd = p.phys_offset;
1420 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1421 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1422 }
1423 #endif
1424 #endif /* TARGET_HAS_ICE */
1425
1426 #if defined(CONFIG_USER_ONLY)
1427 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1428
1429 {
1430 }
1431
1432 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1433 int flags, CPUWatchpoint **watchpoint)
1434 {
1435 return -ENOSYS;
1436 }
1437 #else
1438 /* Add a watchpoint. */
1439 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1440 int flags, CPUWatchpoint **watchpoint)
1441 {
1442 target_ulong len_mask = ~(len - 1);
1443 CPUWatchpoint *wp;
1444
1445 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1446 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1447 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1448 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1449 return -EINVAL;
1450 }
1451 wp = g_malloc(sizeof(*wp));
1452
1453 wp->vaddr = addr;
1454 wp->len_mask = len_mask;
1455 wp->flags = flags;
1456
1457 /* keep all GDB-injected watchpoints in front */
1458 if (flags & BP_GDB)
1459 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1460 else
1461 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1462
1463 tlb_flush_page(env, addr);
1464
1465 if (watchpoint)
1466 *watchpoint = wp;
1467 return 0;
1468 }
1469
1470 /* Remove a specific watchpoint. */
1471 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1472 int flags)
1473 {
1474 target_ulong len_mask = ~(len - 1);
1475 CPUWatchpoint *wp;
1476
1477 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1478 if (addr == wp->vaddr && len_mask == wp->len_mask
1479 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1480 cpu_watchpoint_remove_by_ref(env, wp);
1481 return 0;
1482 }
1483 }
1484 return -ENOENT;
1485 }
1486
1487 /* Remove a specific watchpoint by reference. */
1488 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1489 {
1490 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1491
1492 tlb_flush_page(env, watchpoint->vaddr);
1493
1494 g_free(watchpoint);
1495 }
1496
1497 /* Remove all matching watchpoints. */
1498 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1499 {
1500 CPUWatchpoint *wp, *next;
1501
1502 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1503 if (wp->flags & mask)
1504 cpu_watchpoint_remove_by_ref(env, wp);
1505 }
1506 }
1507 #endif
1508
1509 /* Add a breakpoint. */
1510 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1511 CPUBreakpoint **breakpoint)
1512 {
1513 #if defined(TARGET_HAS_ICE)
1514 CPUBreakpoint *bp;
1515
1516 bp = g_malloc(sizeof(*bp));
1517
1518 bp->pc = pc;
1519 bp->flags = flags;
1520
1521 /* keep all GDB-injected breakpoints in front */
1522 if (flags & BP_GDB)
1523 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1524 else
1525 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1526
1527 breakpoint_invalidate(env, pc);
1528
1529 if (breakpoint)
1530 *breakpoint = bp;
1531 return 0;
1532 #else
1533 return -ENOSYS;
1534 #endif
1535 }
1536
1537 /* Remove a specific breakpoint. */
1538 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1539 {
1540 #if defined(TARGET_HAS_ICE)
1541 CPUBreakpoint *bp;
1542
1543 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1544 if (bp->pc == pc && bp->flags == flags) {
1545 cpu_breakpoint_remove_by_ref(env, bp);
1546 return 0;
1547 }
1548 }
1549 return -ENOENT;
1550 #else
1551 return -ENOSYS;
1552 #endif
1553 }
1554
1555 /* Remove a specific breakpoint by reference. */
1556 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1557 {
1558 #if defined(TARGET_HAS_ICE)
1559 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1560
1561 breakpoint_invalidate(env, breakpoint->pc);
1562
1563 g_free(breakpoint);
1564 #endif
1565 }
1566
1567 /* Remove all matching breakpoints. */
1568 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1569 {
1570 #if defined(TARGET_HAS_ICE)
1571 CPUBreakpoint *bp, *next;
1572
1573 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1574 if (bp->flags & mask)
1575 cpu_breakpoint_remove_by_ref(env, bp);
1576 }
1577 #endif
1578 }
1579
1580 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1581 CPU loop after each instruction */
1582 void cpu_single_step(CPUState *env, int enabled)
1583 {
1584 #if defined(TARGET_HAS_ICE)
1585 if (env->singlestep_enabled != enabled) {
1586 env->singlestep_enabled = enabled;
1587 if (kvm_enabled())
1588 kvm_update_guest_debug(env, 0);
1589 else {
1590 /* must flush all the translated code to avoid inconsistencies */
1591 /* XXX: only flush what is necessary */
1592 tb_flush(env);
1593 }
1594 }
1595 #endif
1596 }
1597
1598 /* enable or disable low levels log */
1599 void cpu_set_log(int log_flags)
1600 {
1601 loglevel = log_flags;
1602 if (loglevel && !logfile) {
1603 logfile = fopen(logfilename, log_append ? "a" : "w");
1604 if (!logfile) {
1605 perror(logfilename);
1606 _exit(1);
1607 }
1608 #if !defined(CONFIG_SOFTMMU)
1609 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1610 {
1611 static char logfile_buf[4096];
1612 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1613 }
1614 #elif defined(_WIN32)
1615 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1616 setvbuf(logfile, NULL, _IONBF, 0);
1617 #else
1618 setvbuf(logfile, NULL, _IOLBF, 0);
1619 #endif
1620 log_append = 1;
1621 }
1622 if (!loglevel && logfile) {
1623 fclose(logfile);
1624 logfile = NULL;
1625 }
1626 }
1627
1628 void cpu_set_log_filename(const char *filename)
1629 {
1630 logfilename = strdup(filename);
1631 if (logfile) {
1632 fclose(logfile);
1633 logfile = NULL;
1634 }
1635 cpu_set_log(loglevel);
1636 }
1637
1638 static void cpu_unlink_tb(CPUState *env)
1639 {
1640 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1641 problem and hope the cpu will stop of its own accord. For userspace
1642 emulation this often isn't actually as bad as it sounds. Often
1643 signals are used primarily to interrupt blocking syscalls. */
1644 TranslationBlock *tb;
1645 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1646
1647 spin_lock(&interrupt_lock);
1648 tb = env->current_tb;
1649 /* if the cpu is currently executing code, we must unlink it and
1650 all the potentially executing TB */
1651 if (tb) {
1652 env->current_tb = NULL;
1653 tb_reset_jump_recursive(tb);
1654 }
1655 spin_unlock(&interrupt_lock);
1656 }
1657
1658 #ifndef CONFIG_USER_ONLY
1659 /* mask must never be zero, except for A20 change call */
1660 static void tcg_handle_interrupt(CPUState *env, int mask)
1661 {
1662 int old_mask;
1663
1664 old_mask = env->interrupt_request;
1665 env->interrupt_request |= mask;
1666
1667 /*
1668 * If called from iothread context, wake the target cpu in
1669 * case its halted.
1670 */
1671 if (!qemu_cpu_is_self(env)) {
1672 qemu_cpu_kick(env);
1673 return;
1674 }
1675
1676 if (use_icount) {
1677 env->icount_decr.u16.high = 0xffff;
1678 if (!can_do_io(env)
1679 && (mask & ~old_mask) != 0) {
1680 cpu_abort(env, "Raised interrupt while not in I/O function");
1681 }
1682 } else {
1683 cpu_unlink_tb(env);
1684 }
1685 }
1686
1687 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1688
1689 #else /* CONFIG_USER_ONLY */
1690
1691 void cpu_interrupt(CPUState *env, int mask)
1692 {
1693 env->interrupt_request |= mask;
1694 cpu_unlink_tb(env);
1695 }
1696 #endif /* CONFIG_USER_ONLY */
1697
1698 void cpu_reset_interrupt(CPUState *env, int mask)
1699 {
1700 env->interrupt_request &= ~mask;
1701 }
1702
1703 void cpu_exit(CPUState *env)
1704 {
1705 env->exit_request = 1;
1706 cpu_unlink_tb(env);
1707 }
1708
1709 const CPULogItem cpu_log_items[] = {
1710 { CPU_LOG_TB_OUT_ASM, "out_asm",
1711 "show generated host assembly code for each compiled TB" },
1712 { CPU_LOG_TB_IN_ASM, "in_asm",
1713 "show target assembly code for each compiled TB" },
1714 { CPU_LOG_TB_OP, "op",
1715 "show micro ops for each compiled TB" },
1716 { CPU_LOG_TB_OP_OPT, "op_opt",
1717 "show micro ops "
1718 #ifdef TARGET_I386
1719 "before eflags optimization and "
1720 #endif
1721 "after liveness analysis" },
1722 { CPU_LOG_INT, "int",
1723 "show interrupts/exceptions in short format" },
1724 { CPU_LOG_EXEC, "exec",
1725 "show trace before each executed TB (lots of logs)" },
1726 { CPU_LOG_TB_CPU, "cpu",
1727 "show CPU state before block translation" },
1728 #ifdef TARGET_I386
1729 { CPU_LOG_PCALL, "pcall",
1730 "show protected mode far calls/returns/exceptions" },
1731 { CPU_LOG_RESET, "cpu_reset",
1732 "show CPU state before CPU resets" },
1733 #endif
1734 #ifdef DEBUG_IOPORT
1735 { CPU_LOG_IOPORT, "ioport",
1736 "show all i/o ports accesses" },
1737 #endif
1738 { 0, NULL, NULL },
1739 };
1740
1741 static int cmp1(const char *s1, int n, const char *s2)
1742 {
1743 if (strlen(s2) != n)
1744 return 0;
1745 return memcmp(s1, s2, n) == 0;
1746 }
1747
1748 /* takes a comma separated list of log masks. Return 0 if error. */
1749 int cpu_str_to_log_mask(const char *str)
1750 {
1751 const CPULogItem *item;
1752 int mask;
1753 const char *p, *p1;
1754
1755 p = str;
1756 mask = 0;
1757 for(;;) {
1758 p1 = strchr(p, ',');
1759 if (!p1)
1760 p1 = p + strlen(p);
1761 if(cmp1(p,p1-p,"all")) {
1762 for(item = cpu_log_items; item->mask != 0; item++) {
1763 mask |= item->mask;
1764 }
1765 } else {
1766 for(item = cpu_log_items; item->mask != 0; item++) {
1767 if (cmp1(p, p1 - p, item->name))
1768 goto found;
1769 }
1770 return 0;
1771 }
1772 found:
1773 mask |= item->mask;
1774 if (*p1 != ',')
1775 break;
1776 p = p1 + 1;
1777 }
1778 return mask;
1779 }
1780
1781 void cpu_abort(CPUState *env, const char *fmt, ...)
1782 {
1783 va_list ap;
1784 va_list ap2;
1785
1786 va_start(ap, fmt);
1787 va_copy(ap2, ap);
1788 fprintf(stderr, "qemu: fatal: ");
1789 vfprintf(stderr, fmt, ap);
1790 fprintf(stderr, "\n");
1791 #ifdef TARGET_I386
1792 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1793 #else
1794 cpu_dump_state(env, stderr, fprintf, 0);
1795 #endif
1796 if (qemu_log_enabled()) {
1797 qemu_log("qemu: fatal: ");
1798 qemu_log_vprintf(fmt, ap2);
1799 qemu_log("\n");
1800 #ifdef TARGET_I386
1801 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1802 #else
1803 log_cpu_state(env, 0);
1804 #endif
1805 qemu_log_flush();
1806 qemu_log_close();
1807 }
1808 va_end(ap2);
1809 va_end(ap);
1810 #if defined(CONFIG_USER_ONLY)
1811 {
1812 struct sigaction act;
1813 sigfillset(&act.sa_mask);
1814 act.sa_handler = SIG_DFL;
1815 sigaction(SIGABRT, &act, NULL);
1816 }
1817 #endif
1818 abort();
1819 }
1820
1821 CPUState *cpu_copy(CPUState *env)
1822 {
1823 CPUState *new_env = cpu_init(env->cpu_model_str);
1824 CPUState *next_cpu = new_env->next_cpu;
1825 int cpu_index = new_env->cpu_index;
1826 #if defined(TARGET_HAS_ICE)
1827 CPUBreakpoint *bp;
1828 CPUWatchpoint *wp;
1829 #endif
1830
1831 memcpy(new_env, env, sizeof(CPUState));
1832
1833 /* Preserve chaining and index. */
1834 new_env->next_cpu = next_cpu;
1835 new_env->cpu_index = cpu_index;
1836
1837 /* Clone all break/watchpoints.
1838 Note: Once we support ptrace with hw-debug register access, make sure
1839 BP_CPU break/watchpoints are handled correctly on clone. */
1840 QTAILQ_INIT(&env->breakpoints);
1841 QTAILQ_INIT(&env->watchpoints);
1842 #if defined(TARGET_HAS_ICE)
1843 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1844 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1845 }
1846 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1847 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1848 wp->flags, NULL);
1849 }
1850 #endif
1851
1852 return new_env;
1853 }
1854
1855 #if !defined(CONFIG_USER_ONLY)
1856
1857 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1858 {
1859 unsigned int i;
1860
1861 /* Discard jump cache entries for any tb which might potentially
1862 overlap the flushed page. */
1863 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1864 memset (&env->tb_jmp_cache[i], 0,
1865 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1866
1867 i = tb_jmp_cache_hash_page(addr);
1868 memset (&env->tb_jmp_cache[i], 0,
1869 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1870 }
1871
1872 static CPUTLBEntry s_cputlb_empty_entry = {
1873 .addr_read = -1,
1874 .addr_write = -1,
1875 .addr_code = -1,
1876 .addend = -1,
1877 };
1878
1879 /* NOTE: if flush_global is true, also flush global entries (not
1880 implemented yet) */
1881 void tlb_flush(CPUState *env, int flush_global)
1882 {
1883 int i;
1884
1885 #if defined(DEBUG_TLB)
1886 printf("tlb_flush:\n");
1887 #endif
1888 /* must reset current TB so that interrupts cannot modify the
1889 links while we are modifying them */
1890 env->current_tb = NULL;
1891
1892 for(i = 0; i < CPU_TLB_SIZE; i++) {
1893 int mmu_idx;
1894 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1895 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1896 }
1897 }
1898
1899 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1900
1901 env->tlb_flush_addr = -1;
1902 env->tlb_flush_mask = 0;
1903 tlb_flush_count++;
1904 }
1905
1906 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1907 {
1908 if (addr == (tlb_entry->addr_read &
1909 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1910 addr == (tlb_entry->addr_write &
1911 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1912 addr == (tlb_entry->addr_code &
1913 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1914 *tlb_entry = s_cputlb_empty_entry;
1915 }
1916 }
1917
1918 void tlb_flush_page(CPUState *env, target_ulong addr)
1919 {
1920 int i;
1921 int mmu_idx;
1922
1923 #if defined(DEBUG_TLB)
1924 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1925 #endif
1926 /* Check if we need to flush due to large pages. */
1927 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1928 #if defined(DEBUG_TLB)
1929 printf("tlb_flush_page: forced full flush ("
1930 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1931 env->tlb_flush_addr, env->tlb_flush_mask);
1932 #endif
1933 tlb_flush(env, 1);
1934 return;
1935 }
1936 /* must reset current TB so that interrupts cannot modify the
1937 links while we are modifying them */
1938 env->current_tb = NULL;
1939
1940 addr &= TARGET_PAGE_MASK;
1941 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1942 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1943 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1944
1945 tlb_flush_jmp_cache(env, addr);
1946 }
1947
1948 /* update the TLBs so that writes to code in the virtual page 'addr'
1949 can be detected */
1950 static void tlb_protect_code(ram_addr_t ram_addr)
1951 {
1952 cpu_physical_memory_reset_dirty(ram_addr,
1953 ram_addr + TARGET_PAGE_SIZE,
1954 CODE_DIRTY_FLAG);
1955 }
1956
1957 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1958 tested for self modifying code */
1959 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1960 target_ulong vaddr)
1961 {
1962 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1963 }
1964
1965 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1966 unsigned long start, unsigned long length)
1967 {
1968 unsigned long addr;
1969 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
1970 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1971 if ((addr - start) < length) {
1972 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1973 }
1974 }
1975 }
1976
1977 /* Note: start and end must be within the same ram block. */
1978 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1979 int dirty_flags)
1980 {
1981 CPUState *env;
1982 unsigned long length, start1;
1983 int i;
1984
1985 start &= TARGET_PAGE_MASK;
1986 end = TARGET_PAGE_ALIGN(end);
1987
1988 length = end - start;
1989 if (length == 0)
1990 return;
1991 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1992
1993 /* we modify the TLB cache so that the dirty bit will be set again
1994 when accessing the range */
1995 start1 = (unsigned long)qemu_safe_ram_ptr(start);
1996 /* Check that we don't span multiple blocks - this breaks the
1997 address comparisons below. */
1998 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
1999 != (end - 1) - start) {
2000 abort();
2001 }
2002
2003 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2004 int mmu_idx;
2005 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2006 for(i = 0; i < CPU_TLB_SIZE; i++)
2007 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2008 start1, length);
2009 }
2010 }
2011 }
2012
2013 int cpu_physical_memory_set_dirty_tracking(int enable)
2014 {
2015 int ret = 0;
2016 in_migration = enable;
2017 return ret;
2018 }
2019
2020 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2021 {
2022 ram_addr_t ram_addr;
2023 void *p;
2024
2025 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
2026 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2027 + tlb_entry->addend);
2028 ram_addr = qemu_ram_addr_from_host_nofail(p);
2029 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2030 tlb_entry->addr_write |= TLB_NOTDIRTY;
2031 }
2032 }
2033 }
2034
2035 /* update the TLB according to the current state of the dirty bits */
2036 void cpu_tlb_update_dirty(CPUState *env)
2037 {
2038 int i;
2039 int mmu_idx;
2040 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2041 for(i = 0; i < CPU_TLB_SIZE; i++)
2042 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2043 }
2044 }
2045
2046 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2047 {
2048 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2049 tlb_entry->addr_write = vaddr;
2050 }
2051
2052 /* update the TLB corresponding to virtual page vaddr
2053 so that it is no longer dirty */
2054 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2055 {
2056 int i;
2057 int mmu_idx;
2058
2059 vaddr &= TARGET_PAGE_MASK;
2060 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2061 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2062 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2063 }
2064
2065 /* Our TLB does not support large pages, so remember the area covered by
2066 large pages and trigger a full TLB flush if these are invalidated. */
2067 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2068 target_ulong size)
2069 {
2070 target_ulong mask = ~(size - 1);
2071
2072 if (env->tlb_flush_addr == (target_ulong)-1) {
2073 env->tlb_flush_addr = vaddr & mask;
2074 env->tlb_flush_mask = mask;
2075 return;
2076 }
2077 /* Extend the existing region to include the new page.
2078 This is a compromise between unnecessary flushes and the cost
2079 of maintaining a full variable size TLB. */
2080 mask &= env->tlb_flush_mask;
2081 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2082 mask <<= 1;
2083 }
2084 env->tlb_flush_addr &= mask;
2085 env->tlb_flush_mask = mask;
2086 }
2087
2088 static bool is_ram_rom(ram_addr_t pd)
2089 {
2090 pd &= ~TARGET_PAGE_MASK;
2091 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
2092 }
2093
2094 static bool is_romd(ram_addr_t pd)
2095 {
2096 MemoryRegion *mr;
2097
2098 pd &= ~TARGET_PAGE_MASK;
2099 mr = io_mem_region[pd >> IO_MEM_SHIFT];
2100 return mr->rom_device && mr->readable;
2101 }
2102
2103 static bool is_ram_rom_romd(ram_addr_t pd)
2104 {
2105 return is_ram_rom(pd) || is_romd(pd);
2106 }
2107
2108 /* Add a new TLB entry. At most one entry for a given virtual address
2109 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2110 supplied size is only used by tlb_flush_page. */
2111 void tlb_set_page(CPUState *env, target_ulong vaddr,
2112 target_phys_addr_t paddr, int prot,
2113 int mmu_idx, target_ulong size)
2114 {
2115 PhysPageDesc p;
2116 unsigned long pd;
2117 unsigned int index;
2118 target_ulong address;
2119 target_ulong code_address;
2120 unsigned long addend;
2121 CPUTLBEntry *te;
2122 CPUWatchpoint *wp;
2123 target_phys_addr_t iotlb;
2124
2125 assert(size >= TARGET_PAGE_SIZE);
2126 if (size != TARGET_PAGE_SIZE) {
2127 tlb_add_large_page(env, vaddr, size);
2128 }
2129 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2130 pd = p.phys_offset;
2131 #if defined(DEBUG_TLB)
2132 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2133 " prot=%x idx=%d pd=0x%08lx\n",
2134 vaddr, paddr, prot, mmu_idx, pd);
2135 #endif
2136
2137 address = vaddr;
2138 if (!is_ram_rom_romd(pd)) {
2139 /* IO memory case (romd handled later) */
2140 address |= TLB_MMIO;
2141 }
2142 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2143 if (is_ram_rom(pd)) {
2144 /* Normal RAM. */
2145 iotlb = pd & TARGET_PAGE_MASK;
2146 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2147 iotlb |= io_mem_notdirty.ram_addr;
2148 else
2149 iotlb |= io_mem_rom.ram_addr;
2150 } else {
2151 /* IO handlers are currently passed a physical address.
2152 It would be nice to pass an offset from the base address
2153 of that region. This would avoid having to special case RAM,
2154 and avoid full address decoding in every device.
2155 We can't use the high bits of pd for this because
2156 IO_MEM_ROMD uses these as a ram address. */
2157 iotlb = (pd & ~TARGET_PAGE_MASK);
2158 iotlb += p.region_offset;
2159 }
2160
2161 code_address = address;
2162 /* Make accesses to pages with watchpoints go via the
2163 watchpoint trap routines. */
2164 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2165 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2166 /* Avoid trapping reads of pages with a write breakpoint. */
2167 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2168 iotlb = io_mem_watch.ram_addr + paddr;
2169 address |= TLB_MMIO;
2170 break;
2171 }
2172 }
2173 }
2174
2175 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2176 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2177 te = &env->tlb_table[mmu_idx][index];
2178 te->addend = addend - vaddr;
2179 if (prot & PAGE_READ) {
2180 te->addr_read = address;
2181 } else {
2182 te->addr_read = -1;
2183 }
2184
2185 if (prot & PAGE_EXEC) {
2186 te->addr_code = code_address;
2187 } else {
2188 te->addr_code = -1;
2189 }
2190 if (prot & PAGE_WRITE) {
2191 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
2192 /* Write access calls the I/O callback. */
2193 te->addr_write = address | TLB_MMIO;
2194 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
2195 !cpu_physical_memory_is_dirty(pd)) {
2196 te->addr_write = address | TLB_NOTDIRTY;
2197 } else {
2198 te->addr_write = address;
2199 }
2200 } else {
2201 te->addr_write = -1;
2202 }
2203 }
2204
2205 #else
2206
2207 void tlb_flush(CPUState *env, int flush_global)
2208 {
2209 }
2210
2211 void tlb_flush_page(CPUState *env, target_ulong addr)
2212 {
2213 }
2214
2215 /*
2216 * Walks guest process memory "regions" one by one
2217 * and calls callback function 'fn' for each region.
2218 */
2219
2220 struct walk_memory_regions_data
2221 {
2222 walk_memory_regions_fn fn;
2223 void *priv;
2224 unsigned long start;
2225 int prot;
2226 };
2227
2228 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2229 abi_ulong end, int new_prot)
2230 {
2231 if (data->start != -1ul) {
2232 int rc = data->fn(data->priv, data->start, end, data->prot);
2233 if (rc != 0) {
2234 return rc;
2235 }
2236 }
2237
2238 data->start = (new_prot ? end : -1ul);
2239 data->prot = new_prot;
2240
2241 return 0;
2242 }
2243
2244 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2245 abi_ulong base, int level, void **lp)
2246 {
2247 abi_ulong pa;
2248 int i, rc;
2249
2250 if (*lp == NULL) {
2251 return walk_memory_regions_end(data, base, 0);
2252 }
2253
2254 if (level == 0) {
2255 PageDesc *pd = *lp;
2256 for (i = 0; i < L2_SIZE; ++i) {
2257 int prot = pd[i].flags;
2258
2259 pa = base | (i << TARGET_PAGE_BITS);
2260 if (prot != data->prot) {
2261 rc = walk_memory_regions_end(data, pa, prot);
2262 if (rc != 0) {
2263 return rc;
2264 }
2265 }
2266 }
2267 } else {
2268 void **pp = *lp;
2269 for (i = 0; i < L2_SIZE; ++i) {
2270 pa = base | ((abi_ulong)i <<
2271 (TARGET_PAGE_BITS + L2_BITS * level));
2272 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2273 if (rc != 0) {
2274 return rc;
2275 }
2276 }
2277 }
2278
2279 return 0;
2280 }
2281
2282 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2283 {
2284 struct walk_memory_regions_data data;
2285 unsigned long i;
2286
2287 data.fn = fn;
2288 data.priv = priv;
2289 data.start = -1ul;
2290 data.prot = 0;
2291
2292 for (i = 0; i < V_L1_SIZE; i++) {
2293 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2294 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2295 if (rc != 0) {
2296 return rc;
2297 }
2298 }
2299
2300 return walk_memory_regions_end(&data, 0, 0);
2301 }
2302
2303 static int dump_region(void *priv, abi_ulong start,
2304 abi_ulong end, unsigned long prot)
2305 {
2306 FILE *f = (FILE *)priv;
2307
2308 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2309 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2310 start, end, end - start,
2311 ((prot & PAGE_READ) ? 'r' : '-'),
2312 ((prot & PAGE_WRITE) ? 'w' : '-'),
2313 ((prot & PAGE_EXEC) ? 'x' : '-'));
2314
2315 return (0);
2316 }
2317
2318 /* dump memory mappings */
2319 void page_dump(FILE *f)
2320 {
2321 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2322 "start", "end", "size", "prot");
2323 walk_memory_regions(f, dump_region);
2324 }
2325
2326 int page_get_flags(target_ulong address)
2327 {
2328 PageDesc *p;
2329
2330 p = page_find(address >> TARGET_PAGE_BITS);
2331 if (!p)
2332 return 0;
2333 return p->flags;
2334 }
2335
2336 /* Modify the flags of a page and invalidate the code if necessary.
2337 The flag PAGE_WRITE_ORG is positioned automatically depending
2338 on PAGE_WRITE. The mmap_lock should already be held. */
2339 void page_set_flags(target_ulong start, target_ulong end, int flags)
2340 {
2341 target_ulong addr, len;
2342
2343 /* This function should never be called with addresses outside the
2344 guest address space. If this assert fires, it probably indicates
2345 a missing call to h2g_valid. */
2346 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2347 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2348 #endif
2349 assert(start < end);
2350
2351 start = start & TARGET_PAGE_MASK;
2352 end = TARGET_PAGE_ALIGN(end);
2353
2354 if (flags & PAGE_WRITE) {
2355 flags |= PAGE_WRITE_ORG;
2356 }
2357
2358 for (addr = start, len = end - start;
2359 len != 0;
2360 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2361 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2362
2363 /* If the write protection bit is set, then we invalidate
2364 the code inside. */
2365 if (!(p->flags & PAGE_WRITE) &&
2366 (flags & PAGE_WRITE) &&
2367 p->first_tb) {
2368 tb_invalidate_phys_page(addr, 0, NULL);
2369 }
2370 p->flags = flags;
2371 }
2372 }
2373
2374 int page_check_range(target_ulong start, target_ulong len, int flags)
2375 {
2376 PageDesc *p;
2377 target_ulong end;
2378 target_ulong addr;
2379
2380 /* This function should never be called with addresses outside the
2381 guest address space. If this assert fires, it probably indicates
2382 a missing call to h2g_valid. */
2383 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2384 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2385 #endif
2386
2387 if (len == 0) {
2388 return 0;
2389 }
2390 if (start + len - 1 < start) {
2391 /* We've wrapped around. */
2392 return -1;
2393 }
2394
2395 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2396 start = start & TARGET_PAGE_MASK;
2397
2398 for (addr = start, len = end - start;
2399 len != 0;
2400 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2401 p = page_find(addr >> TARGET_PAGE_BITS);
2402 if( !p )
2403 return -1;
2404 if( !(p->flags & PAGE_VALID) )
2405 return -1;
2406
2407 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2408 return -1;
2409 if (flags & PAGE_WRITE) {
2410 if (!(p->flags & PAGE_WRITE_ORG))
2411 return -1;
2412 /* unprotect the page if it was put read-only because it
2413 contains translated code */
2414 if (!(p->flags & PAGE_WRITE)) {
2415 if (!page_unprotect(addr, 0, NULL))
2416 return -1;
2417 }
2418 return 0;
2419 }
2420 }
2421 return 0;
2422 }
2423
2424 /* called from signal handler: invalidate the code and unprotect the
2425 page. Return TRUE if the fault was successfully handled. */
2426 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2427 {
2428 unsigned int prot;
2429 PageDesc *p;
2430 target_ulong host_start, host_end, addr;
2431
2432 /* Technically this isn't safe inside a signal handler. However we
2433 know this only ever happens in a synchronous SEGV handler, so in
2434 practice it seems to be ok. */
2435 mmap_lock();
2436
2437 p = page_find(address >> TARGET_PAGE_BITS);
2438 if (!p) {
2439 mmap_unlock();
2440 return 0;
2441 }
2442
2443 /* if the page was really writable, then we change its
2444 protection back to writable */
2445 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2446 host_start = address & qemu_host_page_mask;
2447 host_end = host_start + qemu_host_page_size;
2448
2449 prot = 0;
2450 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2451 p = page_find(addr >> TARGET_PAGE_BITS);
2452 p->flags |= PAGE_WRITE;
2453 prot |= p->flags;
2454
2455 /* and since the content will be modified, we must invalidate
2456 the corresponding translated code. */
2457 tb_invalidate_phys_page(addr, pc, puc);
2458 #ifdef DEBUG_TB_CHECK
2459 tb_invalidate_check(addr);
2460 #endif
2461 }
2462 mprotect((void *)g2h(host_start), qemu_host_page_size,
2463 prot & PAGE_BITS);
2464
2465 mmap_unlock();
2466 return 1;
2467 }
2468 mmap_unlock();
2469 return 0;
2470 }
2471
2472 static inline void tlb_set_dirty(CPUState *env,
2473 unsigned long addr, target_ulong vaddr)
2474 {
2475 }
2476 #endif /* defined(CONFIG_USER_ONLY) */
2477
2478 #if !defined(CONFIG_USER_ONLY)
2479
2480 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2481 typedef struct subpage_t {
2482 MemoryRegion iomem;
2483 target_phys_addr_t base;
2484 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2485 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2486 } subpage_t;
2487
2488 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2489 ram_addr_t memory, ram_addr_t region_offset);
2490 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2491 ram_addr_t orig_memory,
2492 ram_addr_t region_offset);
2493 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2494 need_subpage) \
2495 do { \
2496 if (addr > start_addr) \
2497 start_addr2 = 0; \
2498 else { \
2499 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2500 if (start_addr2 > 0) \
2501 need_subpage = 1; \
2502 } \
2503 \
2504 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2505 end_addr2 = TARGET_PAGE_SIZE - 1; \
2506 else { \
2507 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2508 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2509 need_subpage = 1; \
2510 } \
2511 } while (0)
2512
2513 /* register physical memory.
2514 For RAM, 'size' must be a multiple of the target page size.
2515 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2516 io memory page. The address used when calling the IO function is
2517 the offset from the start of the region, plus region_offset. Both
2518 start_addr and region_offset are rounded down to a page boundary
2519 before calculating this offset. This should not be a problem unless
2520 the low bits of start_addr and region_offset differ. */
2521 void cpu_register_physical_memory_log(MemoryRegionSection *section,
2522 bool readable, bool readonly)
2523 {
2524 target_phys_addr_t start_addr = section->offset_within_address_space;
2525 ram_addr_t size = section->size;
2526 ram_addr_t phys_offset = section->mr->ram_addr;
2527 ram_addr_t region_offset = section->offset_within_region;
2528 target_phys_addr_t addr, end_addr;
2529 PhysPageDesc *p;
2530 CPUState *env;
2531 ram_addr_t orig_size = size;
2532 subpage_t *subpage;
2533
2534 if (memory_region_is_ram(section->mr)) {
2535 phys_offset += region_offset;
2536 region_offset = 0;
2537 }
2538
2539 if (readonly) {
2540 phys_offset |= io_mem_rom.ram_addr;
2541 }
2542
2543 assert(size);
2544
2545 if (phys_offset == io_mem_unassigned.ram_addr) {
2546 region_offset = start_addr;
2547 }
2548 region_offset &= TARGET_PAGE_MASK;
2549 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2550 end_addr = start_addr + (target_phys_addr_t)size;
2551
2552 addr = start_addr;
2553 do {
2554 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
2555 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
2556 ram_addr_t orig_memory = p->phys_offset;
2557 target_phys_addr_t start_addr2, end_addr2;
2558 int need_subpage = 0;
2559 MemoryRegion *mr = io_mem_region[(orig_memory & ~TARGET_PAGE_MASK)
2560 >> IO_MEM_SHIFT];
2561
2562 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2563 need_subpage);
2564 if (need_subpage) {
2565 if (!(mr->subpage)) {
2566 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2567 &p->phys_offset, orig_memory,
2568 p->region_offset);
2569 } else {
2570 subpage = container_of(mr, subpage_t, iomem);
2571 }
2572 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2573 region_offset);
2574 p->region_offset = 0;
2575 } else {
2576 p->phys_offset = phys_offset;
2577 p->region_offset = region_offset;
2578 if (is_ram_rom_romd(phys_offset))
2579 phys_offset += TARGET_PAGE_SIZE;
2580 }
2581 } else {
2582 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2583 p->phys_offset = phys_offset;
2584 p->region_offset = region_offset;
2585 if (is_ram_rom_romd(phys_offset)) {
2586 phys_offset += TARGET_PAGE_SIZE;
2587 } else {
2588 target_phys_addr_t start_addr2, end_addr2;
2589 int need_subpage = 0;
2590
2591 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2592 end_addr2, need_subpage);
2593
2594 if (need_subpage) {
2595 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2596 &p->phys_offset,
2597 io_mem_unassigned.ram_addr,
2598 addr & TARGET_PAGE_MASK);
2599 subpage_register(subpage, start_addr2, end_addr2,
2600 phys_offset, region_offset);
2601 p->region_offset = 0;
2602 }
2603 }
2604 }
2605 region_offset += TARGET_PAGE_SIZE;
2606 addr += TARGET_PAGE_SIZE;
2607 } while (addr != end_addr);
2608
2609 /* since each CPU stores ram addresses in its TLB cache, we must
2610 reset the modified entries */
2611 /* XXX: slow ! */
2612 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2613 tlb_flush(env, 1);
2614 }
2615 }
2616
2617 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2618 {
2619 if (kvm_enabled())
2620 kvm_coalesce_mmio_region(addr, size);
2621 }
2622
2623 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2624 {
2625 if (kvm_enabled())
2626 kvm_uncoalesce_mmio_region(addr, size);
2627 }
2628
2629 void qemu_flush_coalesced_mmio_buffer(void)
2630 {
2631 if (kvm_enabled())
2632 kvm_flush_coalesced_mmio_buffer();
2633 }
2634
2635 #if defined(__linux__) && !defined(TARGET_S390X)
2636
2637 #include <sys/vfs.h>
2638
2639 #define HUGETLBFS_MAGIC 0x958458f6
2640
2641 static long gethugepagesize(const char *path)
2642 {
2643 struct statfs fs;
2644 int ret;
2645
2646 do {
2647 ret = statfs(path, &fs);
2648 } while (ret != 0 && errno == EINTR);
2649
2650 if (ret != 0) {
2651 perror(path);
2652 return 0;
2653 }
2654
2655 if (fs.f_type != HUGETLBFS_MAGIC)
2656 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2657
2658 return fs.f_bsize;
2659 }
2660
2661 static void *file_ram_alloc(RAMBlock *block,
2662 ram_addr_t memory,
2663 const char *path)
2664 {
2665 char *filename;
2666 void *area;
2667 int fd;
2668 #ifdef MAP_POPULATE
2669 int flags;
2670 #endif
2671 unsigned long hpagesize;
2672
2673 hpagesize = gethugepagesize(path);
2674 if (!hpagesize) {
2675 return NULL;
2676 }
2677
2678 if (memory < hpagesize) {
2679 return NULL;
2680 }
2681
2682 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2683 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2684 return NULL;
2685 }
2686
2687 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2688 return NULL;
2689 }
2690
2691 fd = mkstemp(filename);
2692 if (fd < 0) {
2693 perror("unable to create backing store for hugepages");
2694 free(filename);
2695 return NULL;
2696 }
2697 unlink(filename);
2698 free(filename);
2699
2700 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2701
2702 /*
2703 * ftruncate is not supported by hugetlbfs in older
2704 * hosts, so don't bother bailing out on errors.
2705 * If anything goes wrong with it under other filesystems,
2706 * mmap will fail.
2707 */
2708 if (ftruncate(fd, memory))
2709 perror("ftruncate");
2710
2711 #ifdef MAP_POPULATE
2712 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2713 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2714 * to sidestep this quirk.
2715 */
2716 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2717 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2718 #else
2719 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2720 #endif
2721 if (area == MAP_FAILED) {
2722 perror("file_ram_alloc: can't mmap RAM pages");
2723 close(fd);
2724 return (NULL);
2725 }
2726 block->fd = fd;
2727 return area;
2728 }
2729 #endif
2730
2731 static ram_addr_t find_ram_offset(ram_addr_t size)
2732 {
2733 RAMBlock *block, *next_block;
2734 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
2735
2736 if (QLIST_EMPTY(&ram_list.blocks))
2737 return 0;
2738
2739 QLIST_FOREACH(block, &ram_list.blocks, next) {
2740 ram_addr_t end, next = RAM_ADDR_MAX;
2741
2742 end = block->offset + block->length;
2743
2744 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2745 if (next_block->offset >= end) {
2746 next = MIN(next, next_block->offset);
2747 }
2748 }
2749 if (next - end >= size && next - end < mingap) {
2750 offset = end;
2751 mingap = next - end;
2752 }
2753 }
2754
2755 if (offset == RAM_ADDR_MAX) {
2756 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2757 (uint64_t)size);
2758 abort();
2759 }
2760
2761 return offset;
2762 }
2763
2764 static ram_addr_t last_ram_offset(void)
2765 {
2766 RAMBlock *block;
2767 ram_addr_t last = 0;
2768
2769 QLIST_FOREACH(block, &ram_list.blocks, next)
2770 last = MAX(last, block->offset + block->length);
2771
2772 return last;
2773 }
2774
2775 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
2776 {
2777 RAMBlock *new_block, *block;
2778
2779 new_block = NULL;
2780 QLIST_FOREACH(block, &ram_list.blocks, next) {
2781 if (block->offset == addr) {
2782 new_block = block;
2783 break;
2784 }
2785 }
2786 assert(new_block);
2787 assert(!new_block->idstr[0]);
2788
2789 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2790 char *id = dev->parent_bus->info->get_dev_path(dev);
2791 if (id) {
2792 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2793 g_free(id);
2794 }
2795 }
2796 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2797
2798 QLIST_FOREACH(block, &ram_list.blocks, next) {
2799 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
2800 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2801 new_block->idstr);
2802 abort();
2803 }
2804 }
2805 }
2806
2807 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2808 MemoryRegion *mr)
2809 {
2810 RAMBlock *new_block;
2811
2812 size = TARGET_PAGE_ALIGN(size);
2813 new_block = g_malloc0(sizeof(*new_block));
2814
2815 new_block->mr = mr;
2816 new_block->offset = find_ram_offset(size);
2817 if (host) {
2818 new_block->host = host;
2819 new_block->flags |= RAM_PREALLOC_MASK;
2820 } else {
2821 if (mem_path) {
2822 #if defined (__linux__) && !defined(TARGET_S390X)
2823 new_block->host = file_ram_alloc(new_block, size, mem_path);
2824 if (!new_block->host) {
2825 new_block->host = qemu_vmalloc(size);
2826 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2827 }
2828 #else
2829 fprintf(stderr, "-mem-path option unsupported\n");
2830 exit(1);
2831 #endif
2832 } else {
2833 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2834 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2835 an system defined value, which is at least 256GB. Larger systems
2836 have larger values. We put the guest between the end of data
2837 segment (system break) and this value. We use 32GB as a base to
2838 have enough room for the system break to grow. */
2839 new_block->host = mmap((void*)0x800000000, size,
2840 PROT_EXEC|PROT_READ|PROT_WRITE,
2841 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2842 if (new_block->host == MAP_FAILED) {
2843 fprintf(stderr, "Allocating RAM failed\n");
2844 abort();
2845 }
2846 #else
2847 if (xen_enabled()) {
2848 xen_ram_alloc(new_block->offset, size, mr);
2849 } else {
2850 new_block->host = qemu_vmalloc(size);
2851 }
2852 #endif
2853 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2854 }
2855 }
2856 new_block->length = size;
2857
2858 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2859
2860 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
2861 last_ram_offset() >> TARGET_PAGE_BITS);
2862 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2863 0xff, size >> TARGET_PAGE_BITS);
2864
2865 if (kvm_enabled())
2866 kvm_setup_guest_memory(new_block->host, size);
2867
2868 return new_block->offset;
2869 }
2870
2871 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
2872 {
2873 return qemu_ram_alloc_from_ptr(size, NULL, mr);
2874 }
2875
2876 void qemu_ram_free_from_ptr(ram_addr_t addr)
2877 {
2878 RAMBlock *block;
2879
2880 QLIST_FOREACH(block, &ram_list.blocks, next) {
2881 if (addr == block->offset) {
2882 QLIST_REMOVE(block, next);
2883 g_free(block);
2884 return;
2885 }
2886 }
2887 }
2888
2889 void qemu_ram_free(ram_addr_t addr)
2890 {
2891 RAMBlock *block;
2892
2893 QLIST_FOREACH(block, &ram_list.blocks, next) {
2894 if (addr == block->offset) {
2895 QLIST_REMOVE(block, next);
2896 if (block->flags & RAM_PREALLOC_MASK) {
2897 ;
2898 } else if (mem_path) {
2899 #if defined (__linux__) && !defined(TARGET_S390X)
2900 if (block->fd) {
2901 munmap(block->host, block->length);
2902 close(block->fd);
2903 } else {
2904 qemu_vfree(block->host);
2905 }
2906 #else
2907 abort();
2908 #endif
2909 } else {
2910 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2911 munmap(block->host, block->length);
2912 #else
2913 if (xen_enabled()) {
2914 xen_invalidate_map_cache_entry(block->host);
2915 } else {
2916 qemu_vfree(block->host);
2917 }
2918 #endif
2919 }
2920 g_free(block);
2921 return;
2922 }
2923 }
2924
2925 }
2926
2927 #ifndef _WIN32
2928 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2929 {
2930 RAMBlock *block;
2931 ram_addr_t offset;
2932 int flags;
2933 void *area, *vaddr;
2934
2935 QLIST_FOREACH(block, &ram_list.blocks, next) {
2936 offset = addr - block->offset;
2937 if (offset < block->length) {
2938 vaddr = block->host + offset;
2939 if (block->flags & RAM_PREALLOC_MASK) {
2940 ;
2941 } else {
2942 flags = MAP_FIXED;
2943 munmap(vaddr, length);
2944 if (mem_path) {
2945 #if defined(__linux__) && !defined(TARGET_S390X)
2946 if (block->fd) {
2947 #ifdef MAP_POPULATE
2948 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2949 MAP_PRIVATE;
2950 #else
2951 flags |= MAP_PRIVATE;
2952 #endif
2953 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2954 flags, block->fd, offset);
2955 } else {
2956 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2957 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2958 flags, -1, 0);
2959 }
2960 #else
2961 abort();
2962 #endif
2963 } else {
2964 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2965 flags |= MAP_SHARED | MAP_ANONYMOUS;
2966 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2967 flags, -1, 0);
2968 #else
2969 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2970 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2971 flags, -1, 0);
2972 #endif
2973 }
2974 if (area != vaddr) {
2975 fprintf(stderr, "Could not remap addr: "
2976 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2977 length, addr);
2978 exit(1);
2979 }
2980 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2981 }
2982 return;
2983 }
2984 }
2985 }
2986 #endif /* !_WIN32 */
2987
2988 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2989 With the exception of the softmmu code in this file, this should
2990 only be used for local memory (e.g. video ram) that the device owns,
2991 and knows it isn't going to access beyond the end of the block.
2992
2993 It should not be used for general purpose DMA.
2994 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2995 */
2996 void *qemu_get_ram_ptr(ram_addr_t addr)
2997 {
2998 RAMBlock *block;
2999
3000 QLIST_FOREACH(block, &ram_list.blocks, next) {
3001 if (addr - block->offset < block->length) {
3002 /* Move this entry to to start of the list. */
3003 if (block != QLIST_FIRST(&ram_list.blocks)) {
3004 QLIST_REMOVE(block, next);
3005 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3006 }
3007 if (xen_enabled()) {
3008 /* We need to check if the requested address is in the RAM
3009 * because we don't want to map the entire memory in QEMU.
3010 * In that case just map until the end of the page.
3011 */
3012 if (block->offset == 0) {
3013 return xen_map_cache(addr, 0, 0);
3014 } else if (block->host == NULL) {
3015 block->host =
3016 xen_map_cache(block->offset, block->length, 1);
3017 }
3018 }
3019 return block->host + (addr - block->offset);
3020 }
3021 }
3022
3023 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3024 abort();
3025
3026 return NULL;
3027 }
3028
3029 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3030 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3031 */
3032 void *qemu_safe_ram_ptr(ram_addr_t addr)
3033 {
3034 RAMBlock *block;
3035
3036 QLIST_FOREACH(block, &ram_list.blocks, next) {
3037 if (addr - block->offset < block->length) {
3038 if (xen_enabled()) {
3039 /* We need to check if the requested address is in the RAM
3040 * because we don't want to map the entire memory in QEMU.
3041 * In that case just map until the end of the page.
3042 */
3043 if (block->offset == 0) {
3044 return xen_map_cache(addr, 0, 0);
3045 } else if (block->host == NULL) {
3046 block->host =
3047 xen_map_cache(block->offset, block->length, 1);
3048 }
3049 }
3050 return block->host + (addr - block->offset);
3051 }
3052 }
3053
3054 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3055 abort();
3056
3057 return NULL;
3058 }
3059
3060 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3061 * but takes a size argument */
3062 void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
3063 {
3064 if (*size == 0) {
3065 return NULL;
3066 }
3067 if (xen_enabled()) {
3068 return xen_map_cache(addr, *size, 1);
3069 } else {
3070 RAMBlock *block;
3071
3072 QLIST_FOREACH(block, &ram_list.blocks, next) {
3073 if (addr - block->offset < block->length) {
3074 if (addr - block->offset + *size > block->length)
3075 *size = block->length - addr + block->offset;
3076 return block->host + (addr - block->offset);
3077 }
3078 }
3079
3080 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3081 abort();
3082 }
3083 }
3084
3085 void qemu_put_ram_ptr(void *addr)
3086 {
3087 trace_qemu_put_ram_ptr(addr);
3088 }
3089
3090 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3091 {
3092 RAMBlock *block;
3093 uint8_t *host = ptr;
3094
3095 if (xen_enabled()) {
3096 *ram_addr = xen_ram_addr_from_mapcache(ptr);
3097 return 0;
3098 }
3099
3100 QLIST_FOREACH(block, &ram_list.blocks, next) {
3101 /* This case append when the block is not mapped. */
3102 if (block->host == NULL) {
3103 continue;
3104 }
3105 if (host - block->host < block->length) {
3106 *ram_addr = block->offset + (host - block->host);
3107 return 0;
3108 }
3109 }
3110
3111 return -1;
3112 }
3113
3114 /* Some of the softmmu routines need to translate from a host pointer
3115 (typically a TLB entry) back to a ram offset. */
3116 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3117 {
3118 ram_addr_t ram_addr;
3119
3120 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3121 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3122 abort();
3123 }
3124 return ram_addr;
3125 }
3126
3127 static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3128 unsigned size)
3129 {
3130 #ifdef DEBUG_UNASSIGNED
3131 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3132 #endif
3133 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3134 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
3135 #endif
3136 return 0;
3137 }
3138
3139 static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3140 uint64_t val, unsigned size)
3141 {
3142 #ifdef DEBUG_UNASSIGNED
3143 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
3144 #endif
3145 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3146 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
3147 #endif
3148 }
3149
3150 static const MemoryRegionOps unassigned_mem_ops = {
3151 .read = unassigned_mem_read,
3152 .write = unassigned_mem_write,
3153 .endianness = DEVICE_NATIVE_ENDIAN,
3154 };
3155
3156 static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3157 unsigned size)
3158 {
3159 abort();
3160 }
3161
3162 static void error_mem_write(void *opaque, target_phys_addr_t addr,
3163 uint64_t value, unsigned size)
3164 {
3165 abort();
3166 }
3167
3168 static const MemoryRegionOps error_mem_ops = {
3169 .read = error_mem_read,
3170 .write = error_mem_write,
3171 .endianness = DEVICE_NATIVE_ENDIAN,
3172 };
3173
3174 static const MemoryRegionOps rom_mem_ops = {
3175 .read = error_mem_read,
3176 .write = unassigned_mem_write,
3177 .endianness = DEVICE_NATIVE_ENDIAN,
3178 };
3179
3180 static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3181 uint64_t val, unsigned size)
3182 {
3183 int dirty_flags;
3184 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3185 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3186 #if !defined(CONFIG_USER_ONLY)
3187 tb_invalidate_phys_page_fast(ram_addr, size);
3188 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3189 #endif
3190 }
3191 switch (size) {
3192 case 1:
3193 stb_p(qemu_get_ram_ptr(ram_addr), val);
3194 break;
3195 case 2:
3196 stw_p(qemu_get_ram_ptr(ram_addr), val);
3197 break;
3198 case 4:
3199 stl_p(qemu_get_ram_ptr(ram_addr), val);
3200 break;
3201 default:
3202 abort();
3203 }
3204 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3205 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3206 /* we remove the notdirty callback only if the code has been
3207 flushed */
3208 if (dirty_flags == 0xff)
3209 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3210 }
3211
3212 static const MemoryRegionOps notdirty_mem_ops = {
3213 .read = error_mem_read,
3214 .write = notdirty_mem_write,
3215 .endianness = DEVICE_NATIVE_ENDIAN,
3216 };
3217
3218 /* Generate a debug exception if a watchpoint has been hit. */
3219 static void check_watchpoint(int offset, int len_mask, int flags)
3220 {
3221 CPUState *env = cpu_single_env;
3222 target_ulong pc, cs_base;
3223 TranslationBlock *tb;
3224 target_ulong vaddr;
3225 CPUWatchpoint *wp;
3226 int cpu_flags;
3227
3228 if (env->watchpoint_hit) {
3229 /* We re-entered the check after replacing the TB. Now raise
3230 * the debug interrupt so that is will trigger after the
3231 * current instruction. */
3232 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3233 return;
3234 }
3235 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3236 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3237 if ((vaddr == (wp->vaddr & len_mask) ||
3238 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3239 wp->flags |= BP_WATCHPOINT_HIT;
3240 if (!env->watchpoint_hit) {
3241 env->watchpoint_hit = wp;
3242 tb = tb_find_pc(env->mem_io_pc);
3243 if (!tb) {
3244 cpu_abort(env, "check_watchpoint: could not find TB for "
3245 "pc=%p", (void *)env->mem_io_pc);
3246 }
3247 cpu_restore_state(tb, env, env->mem_io_pc);
3248 tb_phys_invalidate(tb, -1);
3249 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3250 env->exception_index = EXCP_DEBUG;
3251 } else {
3252 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3253 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3254 }
3255 cpu_resume_from_signal(env, NULL);
3256 }
3257 } else {
3258 wp->flags &= ~BP_WATCHPOINT_HIT;
3259 }
3260 }
3261 }
3262
3263 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3264 so these check for a hit then pass through to the normal out-of-line
3265 phys routines. */
3266 static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3267 unsigned size)
3268 {
3269 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3270 switch (size) {
3271 case 1: return ldub_phys(addr);
3272 case 2: return lduw_phys(addr);
3273 case 4: return ldl_phys(addr);
3274 default: abort();
3275 }
3276 }
3277
3278 static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3279 uint64_t val, unsigned size)
3280 {
3281 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3282 switch (size) {
3283 case 1: stb_phys(addr, val);
3284 case 2: stw_phys(addr, val);
3285 case 4: stl_phys(addr, val);
3286 default: abort();
3287 }
3288 }
3289
3290 static const MemoryRegionOps watch_mem_ops = {
3291 .read = watch_mem_read,
3292 .write = watch_mem_write,
3293 .endianness = DEVICE_NATIVE_ENDIAN,
3294 };
3295
3296 static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3297 unsigned len)
3298 {
3299 subpage_t *mmio = opaque;
3300 unsigned int idx = SUBPAGE_IDX(addr);
3301 #if defined(DEBUG_SUBPAGE)
3302 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3303 mmio, len, addr, idx);
3304 #endif
3305
3306 addr += mmio->region_offset[idx];
3307 idx = mmio->sub_io_index[idx];
3308 return io_mem_read(idx, addr, len);
3309 }
3310
3311 static void subpage_write(void *opaque, target_phys_addr_t addr,
3312 uint64_t value, unsigned len)
3313 {
3314 subpage_t *mmio = opaque;
3315 unsigned int idx = SUBPAGE_IDX(addr);
3316 #if defined(DEBUG_SUBPAGE)
3317 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3318 " idx %d value %"PRIx64"\n",
3319 __func__, mmio, len, addr, idx, value);
3320 #endif
3321
3322 addr += mmio->region_offset[idx];
3323 idx = mmio->sub_io_index[idx];
3324 io_mem_write(idx, addr, value, len);
3325 }
3326
3327 static const MemoryRegionOps subpage_ops = {
3328 .read = subpage_read,
3329 .write = subpage_write,
3330 .endianness = DEVICE_NATIVE_ENDIAN,
3331 };
3332
3333 static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3334 unsigned size)
3335 {
3336 ram_addr_t raddr = addr;
3337 void *ptr = qemu_get_ram_ptr(raddr);
3338 switch (size) {
3339 case 1: return ldub_p(ptr);
3340 case 2: return lduw_p(ptr);
3341 case 4: return ldl_p(ptr);
3342 default: abort();
3343 }
3344 }
3345
3346 static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3347 uint64_t value, unsigned size)
3348 {
3349 ram_addr_t raddr = addr;
3350 void *ptr = qemu_get_ram_ptr(raddr);
3351 switch (size) {
3352 case 1: return stb_p(ptr, value);
3353 case 2: return stw_p(ptr, value);
3354 case 4: return stl_p(ptr, value);
3355 default: abort();
3356 }
3357 }
3358
3359 static const MemoryRegionOps subpage_ram_ops = {
3360 .read = subpage_ram_read,
3361 .write = subpage_ram_write,
3362 .endianness = DEVICE_NATIVE_ENDIAN,
3363 };
3364
3365 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3366 ram_addr_t memory, ram_addr_t region_offset)
3367 {
3368 int idx, eidx;
3369
3370 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3371 return -1;
3372 idx = SUBPAGE_IDX(start);
3373 eidx = SUBPAGE_IDX(end);
3374 #if defined(DEBUG_SUBPAGE)
3375 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3376 mmio, start, end, idx, eidx, memory);
3377 #endif
3378 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
3379 memory = io_mem_subpage_ram.ram_addr;
3380 }
3381 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3382 for (; idx <= eidx; idx++) {
3383 mmio->sub_io_index[idx] = memory;
3384 mmio->region_offset[idx] = region_offset;
3385 }
3386
3387 return 0;
3388 }
3389
3390 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3391 ram_addr_t orig_memory,
3392 ram_addr_t region_offset)
3393 {
3394 subpage_t *mmio;
3395 int subpage_memory;
3396
3397 mmio = g_malloc0(sizeof(subpage_t));
3398
3399 mmio->base = base;
3400 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3401 "subpage", TARGET_PAGE_SIZE);
3402 mmio->iomem.subpage = true;
3403 subpage_memory = mmio->iomem.ram_addr;
3404 #if defined(DEBUG_SUBPAGE)
3405 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3406 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3407 #endif
3408 *phys = subpage_memory;
3409 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3410
3411 return mmio;
3412 }
3413
3414 static int get_free_io_mem_idx(void)
3415 {
3416 int i;
3417
3418 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3419 if (!io_mem_used[i]) {
3420 io_mem_used[i] = 1;
3421 return i;
3422 }
3423 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3424 return -1;
3425 }
3426
3427 /* mem_read and mem_write are arrays of functions containing the
3428 function to access byte (index 0), word (index 1) and dword (index
3429 2). Functions can be omitted with a NULL function pointer.
3430 If io_index is non zero, the corresponding io zone is
3431 modified. If it is zero, a new io zone is allocated. The return
3432 value can be used with cpu_register_physical_memory(). (-1) is
3433 returned if error. */
3434 static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
3435 {
3436 if (io_index <= 0) {
3437 io_index = get_free_io_mem_idx();
3438 if (io_index == -1)
3439 return io_index;
3440 } else {
3441 io_index >>= IO_MEM_SHIFT;
3442 if (io_index >= IO_MEM_NB_ENTRIES)
3443 return -1;
3444 }
3445
3446 io_mem_region[io_index] = mr;
3447
3448 return (io_index << IO_MEM_SHIFT);
3449 }
3450
3451 int cpu_register_io_memory(MemoryRegion *mr)
3452 {
3453 return cpu_register_io_memory_fixed(0, mr);
3454 }
3455
3456 void cpu_unregister_io_memory(int io_table_address)
3457 {
3458 int io_index = io_table_address >> IO_MEM_SHIFT;
3459
3460 io_mem_region[io_index] = NULL;
3461 io_mem_used[io_index] = 0;
3462 }
3463
3464 static void io_mem_init(void)
3465 {
3466 int i;
3467
3468 /* Must be first: */
3469 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3470 assert(io_mem_ram.ram_addr == 0);
3471 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3472 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3473 "unassigned", UINT64_MAX);
3474 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3475 "notdirty", UINT64_MAX);
3476 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3477 "subpage-ram", UINT64_MAX);
3478 for (i=0; i<5; i++)
3479 io_mem_used[i] = 1;
3480
3481 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3482 "watch", UINT64_MAX);
3483 }
3484
3485 static void memory_map_init(void)
3486 {
3487 system_memory = g_malloc(sizeof(*system_memory));
3488 memory_region_init(system_memory, "system", INT64_MAX);
3489 set_system_memory_map(system_memory);
3490
3491 system_io = g_malloc(sizeof(*system_io));
3492 memory_region_init(system_io, "io", 65536);
3493 set_system_io_map(system_io);
3494 }
3495
3496 MemoryRegion *get_system_memory(void)
3497 {
3498 return system_memory;
3499 }
3500
3501 MemoryRegion *get_system_io(void)
3502 {
3503 return system_io;
3504 }
3505
3506 #endif /* !defined(CONFIG_USER_ONLY) */
3507
3508 /* physical memory access (slow version, mainly for debug) */
3509 #if defined(CONFIG_USER_ONLY)
3510 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3511 uint8_t *buf, int len, int is_write)
3512 {
3513 int l, flags;
3514 target_ulong page;
3515 void * p;
3516
3517 while (len > 0) {
3518 page = addr & TARGET_PAGE_MASK;
3519 l = (page + TARGET_PAGE_SIZE) - addr;
3520 if (l > len)
3521 l = len;
3522 flags = page_get_flags(page);
3523 if (!(flags & PAGE_VALID))
3524 return -1;
3525 if (is_write) {
3526 if (!(flags & PAGE_WRITE))
3527 return -1;
3528 /* XXX: this code should not depend on lock_user */
3529 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3530 return -1;
3531 memcpy(p, buf, l);
3532 unlock_user(p, addr, l);
3533 } else {
3534 if (!(flags & PAGE_READ))
3535 return -1;
3536 /* XXX: this code should not depend on lock_user */
3537 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3538 return -1;
3539 memcpy(buf, p, l);
3540 unlock_user(p, addr, 0);
3541 }
3542 len -= l;
3543 buf += l;
3544 addr += l;
3545 }
3546 return 0;
3547 }
3548
3549 #else
3550 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3551 int len, int is_write)
3552 {
3553 int l, io_index;
3554 uint8_t *ptr;
3555 uint32_t val;
3556 target_phys_addr_t page;
3557 ram_addr_t pd;
3558 PhysPageDesc p;
3559
3560 while (len > 0) {
3561 page = addr & TARGET_PAGE_MASK;
3562 l = (page + TARGET_PAGE_SIZE) - addr;
3563 if (l > len)
3564 l = len;
3565 p = phys_page_find(page >> TARGET_PAGE_BITS);
3566 pd = p.phys_offset;
3567
3568 if (is_write) {
3569 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
3570 target_phys_addr_t addr1;
3571 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3572 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3573 /* XXX: could force cpu_single_env to NULL to avoid
3574 potential bugs */
3575 if (l >= 4 && ((addr1 & 3) == 0)) {
3576 /* 32 bit write access */
3577 val = ldl_p(buf);
3578 io_mem_write(io_index, addr1, val, 4);
3579 l = 4;
3580 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3581 /* 16 bit write access */
3582 val = lduw_p(buf);
3583 io_mem_write(io_index, addr1, val, 2);
3584 l = 2;
3585 } else {
3586 /* 8 bit write access */
3587 val = ldub_p(buf);
3588 io_mem_write(io_index, addr1, val, 1);
3589 l = 1;
3590 }
3591 } else {
3592 ram_addr_t addr1;
3593 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3594 /* RAM case */
3595 ptr = qemu_get_ram_ptr(addr1);
3596 memcpy(ptr, buf, l);
3597 if (!cpu_physical_memory_is_dirty(addr1)) {
3598 /* invalidate code */
3599 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3600 /* set dirty bit */
3601 cpu_physical_memory_set_dirty_flags(
3602 addr1, (0xff & ~CODE_DIRTY_FLAG));
3603 }
3604 qemu_put_ram_ptr(ptr);
3605 }
3606 } else {
3607 if (!is_ram_rom_romd(pd)) {
3608 target_phys_addr_t addr1;
3609 /* I/O case */
3610 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3611 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3612 if (l >= 4 && ((addr1 & 3) == 0)) {
3613 /* 32 bit read access */
3614 val = io_mem_read(io_index, addr1, 4);
3615 stl_p(buf, val);
3616 l = 4;
3617 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3618 /* 16 bit read access */
3619 val = io_mem_read(io_index, addr1, 2);
3620 stw_p(buf, val);
3621 l = 2;
3622 } else {
3623 /* 8 bit read access */
3624 val = io_mem_read(io_index, addr1, 1);
3625 stb_p(buf, val);
3626 l = 1;
3627 }
3628 } else {
3629 /* RAM case */
3630 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3631 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3632 qemu_put_ram_ptr(ptr);
3633 }
3634 }
3635 len -= l;
3636 buf += l;
3637 addr += l;
3638 }
3639 }
3640
3641 /* used for ROM loading : can write in RAM and ROM */
3642 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3643 const uint8_t *buf, int len)
3644 {
3645 int l;
3646 uint8_t *ptr;
3647 target_phys_addr_t page;
3648 unsigned long pd;
3649 PhysPageDesc p;
3650
3651 while (len > 0) {
3652 page = addr & TARGET_PAGE_MASK;
3653 l = (page + TARGET_PAGE_SIZE) - addr;
3654 if (l > len)
3655 l = len;
3656 p = phys_page_find(page >> TARGET_PAGE_BITS);
3657 pd = p.phys_offset;
3658
3659 if (!is_ram_rom_romd(pd)) {
3660 /* do nothing */
3661 } else {
3662 unsigned long addr1;
3663 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3664 /* ROM/RAM case */
3665 ptr = qemu_get_ram_ptr(addr1);
3666 memcpy(ptr, buf, l);
3667 qemu_put_ram_ptr(ptr);
3668 }
3669 len -= l;
3670 buf += l;
3671 addr += l;
3672 }
3673 }
3674
3675 typedef struct {
3676 void *buffer;
3677 target_phys_addr_t addr;
3678 target_phys_addr_t len;
3679 } BounceBuffer;
3680
3681 static BounceBuffer bounce;
3682
3683 typedef struct MapClient {
3684 void *opaque;
3685 void (*callback)(void *opaque);
3686 QLIST_ENTRY(MapClient) link;
3687 } MapClient;
3688
3689 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3690 = QLIST_HEAD_INITIALIZER(map_client_list);
3691
3692 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3693 {
3694 MapClient *client = g_malloc(sizeof(*client));
3695
3696 client->opaque = opaque;
3697 client->callback = callback;
3698 QLIST_INSERT_HEAD(&map_client_list, client, link);
3699 return client;
3700 }
3701
3702 void cpu_unregister_map_client(void *_client)
3703 {
3704 MapClient *client = (MapClient *)_client;
3705
3706 QLIST_REMOVE(client, link);
3707 g_free(client);
3708 }
3709
3710 static void cpu_notify_map_clients(void)
3711 {
3712 MapClient *client;
3713
3714 while (!QLIST_EMPTY(&map_client_list)) {
3715 client = QLIST_FIRST(&map_client_list);
3716 client->callback(client->opaque);
3717 cpu_unregister_map_client(client);
3718 }
3719 }
3720
3721 /* Map a physical memory region into a host virtual address.
3722 * May map a subset of the requested range, given by and returned in *plen.
3723 * May return NULL if resources needed to perform the mapping are exhausted.
3724 * Use only for reads OR writes - not for read-modify-write operations.
3725 * Use cpu_register_map_client() to know when retrying the map operation is
3726 * likely to succeed.
3727 */
3728 void *cpu_physical_memory_map(target_phys_addr_t addr,
3729 target_phys_addr_t *plen,
3730 int is_write)
3731 {
3732 target_phys_addr_t len = *plen;
3733 target_phys_addr_t todo = 0;
3734 int l;
3735 target_phys_addr_t page;
3736 unsigned long pd;
3737 PhysPageDesc p;
3738 ram_addr_t raddr = RAM_ADDR_MAX;
3739 ram_addr_t rlen;
3740 void *ret;
3741
3742 while (len > 0) {
3743 page = addr & TARGET_PAGE_MASK;
3744 l = (page + TARGET_PAGE_SIZE) - addr;
3745 if (l > len)
3746 l = len;
3747 p = phys_page_find(page >> TARGET_PAGE_BITS);
3748 pd = p.phys_offset;
3749
3750 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
3751 if (todo || bounce.buffer) {
3752 break;
3753 }
3754 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3755 bounce.addr = addr;
3756 bounce.len = l;
3757 if (!is_write) {
3758 cpu_physical_memory_read(addr, bounce.buffer, l);
3759 }
3760
3761 *plen = l;
3762 return bounce.buffer;
3763 }
3764 if (!todo) {
3765 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3766 }
3767
3768 len -= l;
3769 addr += l;
3770 todo += l;
3771 }
3772 rlen = todo;
3773 ret = qemu_ram_ptr_length(raddr, &rlen);
3774 *plen = rlen;
3775 return ret;
3776 }
3777
3778 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3779 * Will also mark the memory as dirty if is_write == 1. access_len gives
3780 * the amount of memory that was actually read or written by the caller.
3781 */
3782 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3783 int is_write, target_phys_addr_t access_len)
3784 {
3785 if (buffer != bounce.buffer) {
3786 if (is_write) {
3787 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3788 while (access_len) {
3789 unsigned l;
3790 l = TARGET_PAGE_SIZE;
3791 if (l > access_len)
3792 l = access_len;
3793 if (!cpu_physical_memory_is_dirty(addr1)) {
3794 /* invalidate code */
3795 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3796 /* set dirty bit */
3797 cpu_physical_memory_set_dirty_flags(
3798 addr1, (0xff & ~CODE_DIRTY_FLAG));
3799 }
3800 addr1 += l;
3801 access_len -= l;
3802 }
3803 }
3804 if (xen_enabled()) {
3805 xen_invalidate_map_cache_entry(buffer);
3806 }
3807 return;
3808 }
3809 if (is_write) {
3810 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3811 }
3812 qemu_vfree(bounce.buffer);
3813 bounce.buffer = NULL;
3814 cpu_notify_map_clients();
3815 }
3816
3817 /* warning: addr must be aligned */
3818 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3819 enum device_endian endian)
3820 {
3821 int io_index;
3822 uint8_t *ptr;
3823 uint32_t val;
3824 unsigned long pd;
3825 PhysPageDesc p;
3826
3827 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3828 pd = p.phys_offset;
3829
3830 if (!is_ram_rom_romd(pd)) {
3831 /* I/O case */
3832 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3833 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3834 val = io_mem_read(io_index, addr, 4);
3835 #if defined(TARGET_WORDS_BIGENDIAN)
3836 if (endian == DEVICE_LITTLE_ENDIAN) {
3837 val = bswap32(val);
3838 }
3839 #else
3840 if (endian == DEVICE_BIG_ENDIAN) {
3841 val = bswap32(val);
3842 }
3843 #endif
3844 } else {
3845 /* RAM case */
3846 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3847 (addr & ~TARGET_PAGE_MASK);
3848 switch (endian) {
3849 case DEVICE_LITTLE_ENDIAN:
3850 val = ldl_le_p(ptr);
3851 break;
3852 case DEVICE_BIG_ENDIAN:
3853 val = ldl_be_p(ptr);
3854 break;
3855 default:
3856 val = ldl_p(ptr);
3857 break;
3858 }
3859 }
3860 return val;
3861 }
3862
3863 uint32_t ldl_phys(target_phys_addr_t addr)
3864 {
3865 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3866 }
3867
3868 uint32_t ldl_le_phys(target_phys_addr_t addr)
3869 {
3870 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3871 }
3872
3873 uint32_t ldl_be_phys(target_phys_addr_t addr)
3874 {
3875 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3876 }
3877
3878 /* warning: addr must be aligned */
3879 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3880 enum device_endian endian)
3881 {
3882 int io_index;
3883 uint8_t *ptr;
3884 uint64_t val;
3885 unsigned long pd;
3886 PhysPageDesc p;
3887
3888 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3889 pd = p.phys_offset;
3890
3891 if (!is_ram_rom_romd(pd)) {
3892 /* I/O case */
3893 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3894 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3895
3896 /* XXX This is broken when device endian != cpu endian.
3897 Fix and add "endian" variable check */
3898 #ifdef TARGET_WORDS_BIGENDIAN
3899 val = io_mem_read(io_index, addr, 4) << 32;
3900 val |= io_mem_read(io_index, addr + 4, 4);
3901 #else
3902 val = io_mem_read(io_index, addr, 4);
3903 val |= io_mem_read(io_index, addr + 4, 4) << 32;
3904 #endif
3905 } else {
3906 /* RAM case */
3907 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3908 (addr & ~TARGET_PAGE_MASK);
3909 switch (endian) {
3910 case DEVICE_LITTLE_ENDIAN:
3911 val = ldq_le_p(ptr);
3912 break;
3913 case DEVICE_BIG_ENDIAN:
3914 val = ldq_be_p(ptr);
3915 break;
3916 default:
3917 val = ldq_p(ptr);
3918 break;
3919 }
3920 }
3921 return val;
3922 }
3923
3924 uint64_t ldq_phys(target_phys_addr_t addr)
3925 {
3926 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3927 }
3928
3929 uint64_t ldq_le_phys(target_phys_addr_t addr)
3930 {
3931 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3932 }
3933
3934 uint64_t ldq_be_phys(target_phys_addr_t addr)
3935 {
3936 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3937 }
3938
3939 /* XXX: optimize */
3940 uint32_t ldub_phys(target_phys_addr_t addr)
3941 {
3942 uint8_t val;
3943 cpu_physical_memory_read(addr, &val, 1);
3944 return val;
3945 }
3946
3947 /* warning: addr must be aligned */
3948 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3949 enum device_endian endian)
3950 {
3951 int io_index;
3952 uint8_t *ptr;
3953 uint64_t val;
3954 unsigned long pd;
3955 PhysPageDesc p;
3956
3957 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3958 pd = p.phys_offset;
3959
3960 if (!is_ram_rom_romd(pd)) {
3961 /* I/O case */
3962 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3963 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3964 val = io_mem_read(io_index, addr, 2);
3965 #if defined(TARGET_WORDS_BIGENDIAN)
3966 if (endian == DEVICE_LITTLE_ENDIAN) {
3967 val = bswap16(val);
3968 }
3969 #else
3970 if (endian == DEVICE_BIG_ENDIAN) {
3971 val = bswap16(val);
3972 }
3973 #endif
3974 } else {
3975 /* RAM case */
3976 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3977 (addr & ~TARGET_PAGE_MASK);
3978 switch (endian) {
3979 case DEVICE_LITTLE_ENDIAN:
3980 val = lduw_le_p(ptr);
3981 break;
3982 case DEVICE_BIG_ENDIAN:
3983 val = lduw_be_p(ptr);
3984 break;
3985 default:
3986 val = lduw_p(ptr);
3987 break;
3988 }
3989 }
3990 return val;
3991 }
3992
3993 uint32_t lduw_phys(target_phys_addr_t addr)
3994 {
3995 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3996 }
3997
3998 uint32_t lduw_le_phys(target_phys_addr_t addr)
3999 {
4000 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4001 }
4002
4003 uint32_t lduw_be_phys(target_phys_addr_t addr)
4004 {
4005 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4006 }
4007
4008 /* warning: addr must be aligned. The ram page is not masked as dirty
4009 and the code inside is not invalidated. It is useful if the dirty
4010 bits are used to track modified PTEs */
4011 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4012 {
4013 int io_index;
4014 uint8_t *ptr;
4015 unsigned long pd;
4016 PhysPageDesc p;
4017
4018 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4019 pd = p.phys_offset;
4020
4021 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4022 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4023 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4024 io_mem_write(io_index, addr, val, 4);
4025 } else {
4026 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4027 ptr = qemu_get_ram_ptr(addr1);
4028 stl_p(ptr, val);
4029
4030 if (unlikely(in_migration)) {
4031 if (!cpu_physical_memory_is_dirty(addr1)) {
4032 /* invalidate code */
4033 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4034 /* set dirty bit */
4035 cpu_physical_memory_set_dirty_flags(
4036 addr1, (0xff & ~CODE_DIRTY_FLAG));
4037 }
4038 }
4039 }
4040 }
4041
4042 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4043 {
4044 int io_index;
4045 uint8_t *ptr;
4046 unsigned long pd;
4047 PhysPageDesc p;
4048
4049 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4050 pd = p.phys_offset;
4051
4052 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4053 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4054 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4055 #ifdef TARGET_WORDS_BIGENDIAN
4056 io_mem_write(io_index, addr, val >> 32, 4);
4057 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
4058 #else
4059 io_mem_write(io_index, addr, (uint32_t)val, 4);
4060 io_mem_write(io_index, addr + 4, val >> 32, 4);
4061 #endif
4062 } else {
4063 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4064 (addr & ~TARGET_PAGE_MASK);
4065 stq_p(ptr, val);
4066 }
4067 }
4068
4069 /* warning: addr must be aligned */
4070 static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4071 enum device_endian endian)
4072 {
4073 int io_index;
4074 uint8_t *ptr;
4075 unsigned long pd;
4076 PhysPageDesc p;
4077
4078 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4079 pd = p.phys_offset;
4080
4081 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4082 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4083 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4084 #if defined(TARGET_WORDS_BIGENDIAN)
4085 if (endian == DEVICE_LITTLE_ENDIAN) {
4086 val = bswap32(val);
4087 }
4088 #else
4089 if (endian == DEVICE_BIG_ENDIAN) {
4090 val = bswap32(val);
4091 }
4092 #endif
4093 io_mem_write(io_index, addr, val, 4);
4094 } else {
4095 unsigned long addr1;
4096 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4097 /* RAM case */
4098 ptr = qemu_get_ram_ptr(addr1);
4099 switch (endian) {
4100 case DEVICE_LITTLE_ENDIAN:
4101 stl_le_p(ptr, val);
4102 break;
4103 case DEVICE_BIG_ENDIAN:
4104 stl_be_p(ptr, val);
4105 break;
4106 default:
4107 stl_p(ptr, val);
4108 break;
4109 }
4110 if (!cpu_physical_memory_is_dirty(addr1)) {
4111 /* invalidate code */
4112 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4113 /* set dirty bit */
4114 cpu_physical_memory_set_dirty_flags(addr1,
4115 (0xff & ~CODE_DIRTY_FLAG));
4116 }
4117 }
4118 }
4119
4120 void stl_phys(target_phys_addr_t addr, uint32_t val)
4121 {
4122 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4123 }
4124
4125 void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4126 {
4127 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4128 }
4129
4130 void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4131 {
4132 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4133 }
4134
4135 /* XXX: optimize */
4136 void stb_phys(target_phys_addr_t addr, uint32_t val)
4137 {
4138 uint8_t v = val;
4139 cpu_physical_memory_write(addr, &v, 1);
4140 }
4141
4142 /* warning: addr must be aligned */
4143 static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4144 enum device_endian endian)
4145 {
4146 int io_index;
4147 uint8_t *ptr;
4148 unsigned long pd;
4149 PhysPageDesc p;
4150
4151 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4152 pd = p.phys_offset;
4153
4154 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4155 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4156 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4157 #if defined(TARGET_WORDS_BIGENDIAN)
4158 if (endian == DEVICE_LITTLE_ENDIAN) {
4159 val = bswap16(val);
4160 }
4161 #else
4162 if (endian == DEVICE_BIG_ENDIAN) {
4163 val = bswap16(val);
4164 }
4165 #endif
4166 io_mem_write(io_index, addr, val, 2);
4167 } else {
4168 unsigned long addr1;
4169 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4170 /* RAM case */
4171 ptr = qemu_get_ram_ptr(addr1);
4172 switch (endian) {
4173 case DEVICE_LITTLE_ENDIAN:
4174 stw_le_p(ptr, val);
4175 break;
4176 case DEVICE_BIG_ENDIAN:
4177 stw_be_p(ptr, val);
4178 break;
4179 default:
4180 stw_p(ptr, val);
4181 break;
4182 }
4183 if (!cpu_physical_memory_is_dirty(addr1)) {
4184 /* invalidate code */
4185 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4186 /* set dirty bit */
4187 cpu_physical_memory_set_dirty_flags(addr1,
4188 (0xff & ~CODE_DIRTY_FLAG));
4189 }
4190 }
4191 }
4192
4193 void stw_phys(target_phys_addr_t addr, uint32_t val)
4194 {
4195 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4196 }
4197
4198 void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4199 {
4200 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4201 }
4202
4203 void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4204 {
4205 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4206 }
4207
4208 /* XXX: optimize */
4209 void stq_phys(target_phys_addr_t addr, uint64_t val)
4210 {
4211 val = tswap64(val);
4212 cpu_physical_memory_write(addr, &val, 8);
4213 }
4214
4215 void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4216 {
4217 val = cpu_to_le64(val);
4218 cpu_physical_memory_write(addr, &val, 8);
4219 }
4220
4221 void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4222 {
4223 val = cpu_to_be64(val);
4224 cpu_physical_memory_write(addr, &val, 8);
4225 }
4226
4227 /* virtual memory access for debug (includes writing to ROM) */
4228 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4229 uint8_t *buf, int len, int is_write)
4230 {
4231 int l;
4232 target_phys_addr_t phys_addr;
4233 target_ulong page;
4234
4235 while (len > 0) {
4236 page = addr & TARGET_PAGE_MASK;
4237 phys_addr = cpu_get_phys_page_debug(env, page);
4238 /* if no physical page mapped, return an error */
4239 if (phys_addr == -1)
4240 return -1;
4241 l = (page + TARGET_PAGE_SIZE) - addr;
4242 if (l > len)
4243 l = len;
4244 phys_addr += (addr & ~TARGET_PAGE_MASK);
4245 if (is_write)
4246 cpu_physical_memory_write_rom(phys_addr, buf, l);
4247 else
4248 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4249 len -= l;
4250 buf += l;
4251 addr += l;
4252 }
4253 return 0;
4254 }
4255 #endif
4256
4257 /* in deterministic execution mode, instructions doing device I/Os
4258 must be at the end of the TB */
4259 void cpu_io_recompile(CPUState *env, void *retaddr)
4260 {
4261 TranslationBlock *tb;
4262 uint32_t n, cflags;
4263 target_ulong pc, cs_base;
4264 uint64_t flags;
4265
4266 tb = tb_find_pc((unsigned long)retaddr);
4267 if (!tb) {
4268 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4269 retaddr);
4270 }
4271 n = env->icount_decr.u16.low + tb->icount;
4272 cpu_restore_state(tb, env, (unsigned long)retaddr);
4273 /* Calculate how many instructions had been executed before the fault
4274 occurred. */
4275 n = n - env->icount_decr.u16.low;
4276 /* Generate a new TB ending on the I/O insn. */
4277 n++;
4278 /* On MIPS and SH, delay slot instructions can only be restarted if
4279 they were already the first instruction in the TB. If this is not
4280 the first instruction in a TB then re-execute the preceding
4281 branch. */
4282 #if defined(TARGET_MIPS)
4283 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4284 env->active_tc.PC -= 4;
4285 env->icount_decr.u16.low++;
4286 env->hflags &= ~MIPS_HFLAG_BMASK;
4287 }
4288 #elif defined(TARGET_SH4)
4289 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4290 && n > 1) {
4291 env->pc -= 2;
4292 env->icount_decr.u16.low++;
4293 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4294 }
4295 #endif
4296 /* This should never happen. */
4297 if (n > CF_COUNT_MASK)
4298 cpu_abort(env, "TB too big during recompile");
4299
4300 cflags = n | CF_LAST_IO;
4301 pc = tb->pc;
4302 cs_base = tb->cs_base;
4303 flags = tb->flags;
4304 tb_phys_invalidate(tb, -1);
4305 /* FIXME: In theory this could raise an exception. In practice
4306 we have already translated the block once so it's probably ok. */
4307 tb_gen_code(env, pc, cs_base, flags, cflags);
4308 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4309 the first in the TB) then we end up generating a whole new TB and
4310 repeating the fault, which is horribly inefficient.
4311 Better would be to execute just this insn uncached, or generate a
4312 second new TB. */
4313 cpu_resume_from_signal(env, NULL);
4314 }
4315
4316 #if !defined(CONFIG_USER_ONLY)
4317
4318 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4319 {
4320 int i, target_code_size, max_target_code_size;
4321 int direct_jmp_count, direct_jmp2_count, cross_page;
4322 TranslationBlock *tb;
4323
4324 target_code_size = 0;
4325 max_target_code_size = 0;
4326 cross_page = 0;
4327 direct_jmp_count = 0;
4328 direct_jmp2_count = 0;
4329 for(i = 0; i < nb_tbs; i++) {
4330 tb = &tbs[i];
4331 target_code_size += tb->size;
4332 if (tb->size > max_target_code_size)
4333 max_target_code_size = tb->size;
4334 if (tb->page_addr[1] != -1)
4335 cross_page++;
4336 if (tb->tb_next_offset[0] != 0xffff) {
4337 direct_jmp_count++;
4338 if (tb->tb_next_offset[1] != 0xffff) {
4339 direct_jmp2_count++;
4340 }
4341 }
4342 }
4343 /* XXX: avoid using doubles ? */
4344 cpu_fprintf(f, "Translation buffer state:\n");
4345 cpu_fprintf(f, "gen code size %td/%ld\n",
4346 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4347 cpu_fprintf(f, "TB count %d/%d\n",
4348 nb_tbs, code_gen_max_blocks);
4349 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4350 nb_tbs ? target_code_size / nb_tbs : 0,
4351 max_target_code_size);
4352 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4353 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4354 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4355 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4356 cross_page,
4357 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4358 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4359 direct_jmp_count,
4360 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4361 direct_jmp2_count,
4362 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4363 cpu_fprintf(f, "\nStatistics:\n");
4364 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4365 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4366 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4367 tcg_dump_info(f, cpu_fprintf);
4368 }
4369
4370 /* NOTE: this function can trigger an exception */
4371 /* NOTE2: the returned address is not exactly the physical address: it
4372 is the offset relative to phys_ram_base */
4373 tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4374 {
4375 int mmu_idx, page_index, pd;
4376 void *p;
4377
4378 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4379 mmu_idx = cpu_mmu_index(env1);
4380 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4381 (addr & TARGET_PAGE_MASK))) {
4382 ldub_code(addr);
4383 }
4384 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
4385 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
4386 && !is_romd(pd)) {
4387 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4388 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4389 #else
4390 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4391 #endif
4392 }
4393 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4394 return qemu_ram_addr_from_host_nofail(p);
4395 }
4396
4397 #define MMUSUFFIX _cmmu
4398 #undef GETPC
4399 #define GETPC() NULL
4400 #define env cpu_single_env
4401 #define SOFTMMU_CODE_ACCESS
4402
4403 #define SHIFT 0
4404 #include "softmmu_template.h"
4405
4406 #define SHIFT 1
4407 #include "softmmu_template.h"
4408
4409 #define SHIFT 2
4410 #include "softmmu_template.h"
4411
4412 #define SHIFT 3
4413 #include "softmmu_template.h"
4414
4415 #undef env
4416
4417 #endif