]> git.proxmox.com Git - qemu.git/blob - exec.c
qdev-properties-system.c: Allow vlan or netdev for -device, not both
[qemu.git] / exec.c
1 /*
2 * Virtual page mapping
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "tcg.h"
30 #include "hw/hw.h"
31 #include "hw/qdev.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "hw/xen/xen.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
42 #include <qemu.h>
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
45 #include "trace.h"
46 #endif
47 #include "exec/cpu-all.h"
48
49 #include "exec/cputlb.h"
50 #include "translate-all.h"
51
52 #include "exec/memory-internal.h"
53
54 //#define DEBUG_SUBPAGE
55
56 #if !defined(CONFIG_USER_ONLY)
57 static int in_migration;
58
59 RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
60
61 static MemoryRegion *system_memory;
62 static MemoryRegion *system_io;
63
64 AddressSpace address_space_io;
65 AddressSpace address_space_memory;
66
67 MemoryRegion io_mem_rom, io_mem_notdirty;
68 static MemoryRegion io_mem_unassigned;
69
70 #endif
71
72 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
73 /* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
75 DEFINE_TLS(CPUState *, current_cpu);
76 /* 0 = Do not count executed instructions.
77 1 = Precise instruction counting.
78 2 = Adaptive rate instruction counting. */
79 int use_icount;
80
81 #if !defined(CONFIG_USER_ONLY)
82
83 typedef struct PhysPageEntry PhysPageEntry;
84
85 struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89 };
90
91 typedef PhysPageEntry Node[L2_SIZE];
92
93 struct AddressSpaceDispatch {
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
96 */
97 PhysPageEntry phys_map;
98 Node *nodes;
99 MemoryRegionSection *sections;
100 AddressSpace *as;
101 };
102
103 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104 typedef struct subpage_t {
105 MemoryRegion iomem;
106 AddressSpace *as;
107 hwaddr base;
108 uint16_t sub_section[TARGET_PAGE_SIZE];
109 } subpage_t;
110
111 #define PHYS_SECTION_UNASSIGNED 0
112 #define PHYS_SECTION_NOTDIRTY 1
113 #define PHYS_SECTION_ROM 2
114 #define PHYS_SECTION_WATCH 3
115
116 typedef struct PhysPageMap {
117 unsigned sections_nb;
118 unsigned sections_nb_alloc;
119 unsigned nodes_nb;
120 unsigned nodes_nb_alloc;
121 Node *nodes;
122 MemoryRegionSection *sections;
123 } PhysPageMap;
124
125 static PhysPageMap *prev_map;
126 static PhysPageMap next_map;
127
128 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
129
130 static void io_mem_init(void);
131 static void memory_map_init(void);
132
133 static MemoryRegion io_mem_watch;
134 #endif
135
136 #if !defined(CONFIG_USER_ONLY)
137
138 static void phys_map_node_reserve(unsigned nodes)
139 {
140 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
141 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
142 16);
143 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
144 next_map.nodes_nb + nodes);
145 next_map.nodes = g_renew(Node, next_map.nodes,
146 next_map.nodes_nb_alloc);
147 }
148 }
149
150 static uint16_t phys_map_node_alloc(void)
151 {
152 unsigned i;
153 uint16_t ret;
154
155 ret = next_map.nodes_nb++;
156 assert(ret != PHYS_MAP_NODE_NIL);
157 assert(ret != next_map.nodes_nb_alloc);
158 for (i = 0; i < L2_SIZE; ++i) {
159 next_map.nodes[ret][i].is_leaf = 0;
160 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
161 }
162 return ret;
163 }
164
165 static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
166 hwaddr *nb, uint16_t leaf,
167 int level)
168 {
169 PhysPageEntry *p;
170 int i;
171 hwaddr step = (hwaddr)1 << (level * L2_BITS);
172
173 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
174 lp->ptr = phys_map_node_alloc();
175 p = next_map.nodes[lp->ptr];
176 if (level == 0) {
177 for (i = 0; i < L2_SIZE; i++) {
178 p[i].is_leaf = 1;
179 p[i].ptr = PHYS_SECTION_UNASSIGNED;
180 }
181 }
182 } else {
183 p = next_map.nodes[lp->ptr];
184 }
185 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
186
187 while (*nb && lp < &p[L2_SIZE]) {
188 if ((*index & (step - 1)) == 0 && *nb >= step) {
189 lp->is_leaf = true;
190 lp->ptr = leaf;
191 *index += step;
192 *nb -= step;
193 } else {
194 phys_page_set_level(lp, index, nb, leaf, level - 1);
195 }
196 ++lp;
197 }
198 }
199
200 static void phys_page_set(AddressSpaceDispatch *d,
201 hwaddr index, hwaddr nb,
202 uint16_t leaf)
203 {
204 /* Wildly overreserve - it doesn't matter much. */
205 phys_map_node_reserve(3 * P_L2_LEVELS);
206
207 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
208 }
209
210 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
211 Node *nodes, MemoryRegionSection *sections)
212 {
213 PhysPageEntry *p;
214 int i;
215
216 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
217 if (lp.ptr == PHYS_MAP_NODE_NIL) {
218 return &sections[PHYS_SECTION_UNASSIGNED];
219 }
220 p = nodes[lp.ptr];
221 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
222 }
223 return &sections[lp.ptr];
224 }
225
226 bool memory_region_is_unassigned(MemoryRegion *mr)
227 {
228 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
229 && mr != &io_mem_watch;
230 }
231
232 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
233 hwaddr addr,
234 bool resolve_subpage)
235 {
236 MemoryRegionSection *section;
237 subpage_t *subpage;
238
239 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
240 d->nodes, d->sections);
241 if (resolve_subpage && section->mr->subpage) {
242 subpage = container_of(section->mr, subpage_t, iomem);
243 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
244 }
245 return section;
246 }
247
248 static MemoryRegionSection *
249 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
250 hwaddr *plen, bool resolve_subpage)
251 {
252 MemoryRegionSection *section;
253 Int128 diff;
254
255 section = address_space_lookup_region(d, addr, resolve_subpage);
256 /* Compute offset within MemoryRegionSection */
257 addr -= section->offset_within_address_space;
258
259 /* Compute offset within MemoryRegion */
260 *xlat = addr + section->offset_within_region;
261
262 diff = int128_sub(section->mr->size, int128_make64(addr));
263 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
264 return section;
265 }
266
267 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
268 hwaddr *xlat, hwaddr *plen,
269 bool is_write)
270 {
271 IOMMUTLBEntry iotlb;
272 MemoryRegionSection *section;
273 MemoryRegion *mr;
274 hwaddr len = *plen;
275
276 for (;;) {
277 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
278 mr = section->mr;
279
280 if (!mr->iommu_ops) {
281 break;
282 }
283
284 iotlb = mr->iommu_ops->translate(mr, addr);
285 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
286 | (addr & iotlb.addr_mask));
287 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
288 if (!(iotlb.perm & (1 << is_write))) {
289 mr = &io_mem_unassigned;
290 break;
291 }
292
293 as = iotlb.target_as;
294 }
295
296 *plen = len;
297 *xlat = addr;
298 return mr;
299 }
300
301 MemoryRegionSection *
302 address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
303 hwaddr *plen)
304 {
305 MemoryRegionSection *section;
306 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
307
308 assert(!section->mr->iommu_ops);
309 return section;
310 }
311 #endif
312
313 void cpu_exec_init_all(void)
314 {
315 #if !defined(CONFIG_USER_ONLY)
316 qemu_mutex_init(&ram_list.mutex);
317 memory_map_init();
318 io_mem_init();
319 #endif
320 }
321
322 #if !defined(CONFIG_USER_ONLY)
323
324 static int cpu_common_post_load(void *opaque, int version_id)
325 {
326 CPUState *cpu = opaque;
327
328 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
329 version_id is increased. */
330 cpu->interrupt_request &= ~0x01;
331 tlb_flush(cpu->env_ptr, 1);
332
333 return 0;
334 }
335
336 const VMStateDescription vmstate_cpu_common = {
337 .name = "cpu_common",
338 .version_id = 1,
339 .minimum_version_id = 1,
340 .minimum_version_id_old = 1,
341 .post_load = cpu_common_post_load,
342 .fields = (VMStateField []) {
343 VMSTATE_UINT32(halted, CPUState),
344 VMSTATE_UINT32(interrupt_request, CPUState),
345 VMSTATE_END_OF_LIST()
346 }
347 };
348
349 #endif
350
351 CPUState *qemu_get_cpu(int index)
352 {
353 CPUState *cpu;
354
355 CPU_FOREACH(cpu) {
356 if (cpu->cpu_index == index) {
357 return cpu;
358 }
359 }
360
361 return NULL;
362 }
363
364 void cpu_exec_init(CPUArchState *env)
365 {
366 CPUState *cpu = ENV_GET_CPU(env);
367 CPUClass *cc = CPU_GET_CLASS(cpu);
368 CPUState *some_cpu;
369 int cpu_index;
370
371 #if defined(CONFIG_USER_ONLY)
372 cpu_list_lock();
373 #endif
374 cpu_index = 0;
375 CPU_FOREACH(some_cpu) {
376 cpu_index++;
377 }
378 cpu->cpu_index = cpu_index;
379 cpu->numa_node = 0;
380 QTAILQ_INIT(&env->breakpoints);
381 QTAILQ_INIT(&env->watchpoints);
382 #ifndef CONFIG_USER_ONLY
383 cpu->thread_id = qemu_get_thread_id();
384 #endif
385 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
386 #if defined(CONFIG_USER_ONLY)
387 cpu_list_unlock();
388 #endif
389 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
390 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
391 }
392 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
393 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
394 cpu_save, cpu_load, env);
395 assert(cc->vmsd == NULL);
396 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
397 #endif
398 if (cc->vmsd != NULL) {
399 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
400 }
401 }
402
403 #if defined(TARGET_HAS_ICE)
404 #if defined(CONFIG_USER_ONLY)
405 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
406 {
407 tb_invalidate_phys_page_range(pc, pc + 1, 0);
408 }
409 #else
410 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
411 {
412 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
413 if (phys != -1) {
414 tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
415 }
416 }
417 #endif
418 #endif /* TARGET_HAS_ICE */
419
420 #if defined(CONFIG_USER_ONLY)
421 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
422
423 {
424 }
425
426 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
427 int flags, CPUWatchpoint **watchpoint)
428 {
429 return -ENOSYS;
430 }
431 #else
432 /* Add a watchpoint. */
433 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
434 int flags, CPUWatchpoint **watchpoint)
435 {
436 target_ulong len_mask = ~(len - 1);
437 CPUWatchpoint *wp;
438
439 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
440 if ((len & (len - 1)) || (addr & ~len_mask) ||
441 len == 0 || len > TARGET_PAGE_SIZE) {
442 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
443 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
444 return -EINVAL;
445 }
446 wp = g_malloc(sizeof(*wp));
447
448 wp->vaddr = addr;
449 wp->len_mask = len_mask;
450 wp->flags = flags;
451
452 /* keep all GDB-injected watchpoints in front */
453 if (flags & BP_GDB)
454 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
455 else
456 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
457
458 tlb_flush_page(env, addr);
459
460 if (watchpoint)
461 *watchpoint = wp;
462 return 0;
463 }
464
465 /* Remove a specific watchpoint. */
466 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
467 int flags)
468 {
469 target_ulong len_mask = ~(len - 1);
470 CPUWatchpoint *wp;
471
472 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
473 if (addr == wp->vaddr && len_mask == wp->len_mask
474 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
475 cpu_watchpoint_remove_by_ref(env, wp);
476 return 0;
477 }
478 }
479 return -ENOENT;
480 }
481
482 /* Remove a specific watchpoint by reference. */
483 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
484 {
485 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
486
487 tlb_flush_page(env, watchpoint->vaddr);
488
489 g_free(watchpoint);
490 }
491
492 /* Remove all matching watchpoints. */
493 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
494 {
495 CPUWatchpoint *wp, *next;
496
497 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
498 if (wp->flags & mask)
499 cpu_watchpoint_remove_by_ref(env, wp);
500 }
501 }
502 #endif
503
504 /* Add a breakpoint. */
505 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
506 CPUBreakpoint **breakpoint)
507 {
508 #if defined(TARGET_HAS_ICE)
509 CPUBreakpoint *bp;
510
511 bp = g_malloc(sizeof(*bp));
512
513 bp->pc = pc;
514 bp->flags = flags;
515
516 /* keep all GDB-injected breakpoints in front */
517 if (flags & BP_GDB) {
518 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
519 } else {
520 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
521 }
522
523 breakpoint_invalidate(ENV_GET_CPU(env), pc);
524
525 if (breakpoint) {
526 *breakpoint = bp;
527 }
528 return 0;
529 #else
530 return -ENOSYS;
531 #endif
532 }
533
534 /* Remove a specific breakpoint. */
535 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
536 {
537 #if defined(TARGET_HAS_ICE)
538 CPUBreakpoint *bp;
539
540 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
541 if (bp->pc == pc && bp->flags == flags) {
542 cpu_breakpoint_remove_by_ref(env, bp);
543 return 0;
544 }
545 }
546 return -ENOENT;
547 #else
548 return -ENOSYS;
549 #endif
550 }
551
552 /* Remove a specific breakpoint by reference. */
553 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
554 {
555 #if defined(TARGET_HAS_ICE)
556 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
557
558 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
559
560 g_free(breakpoint);
561 #endif
562 }
563
564 /* Remove all matching breakpoints. */
565 void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
566 {
567 #if defined(TARGET_HAS_ICE)
568 CPUBreakpoint *bp, *next;
569
570 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
571 if (bp->flags & mask)
572 cpu_breakpoint_remove_by_ref(env, bp);
573 }
574 #endif
575 }
576
577 /* enable or disable single step mode. EXCP_DEBUG is returned by the
578 CPU loop after each instruction */
579 void cpu_single_step(CPUState *cpu, int enabled)
580 {
581 #if defined(TARGET_HAS_ICE)
582 if (cpu->singlestep_enabled != enabled) {
583 cpu->singlestep_enabled = enabled;
584 if (kvm_enabled()) {
585 kvm_update_guest_debug(cpu, 0);
586 } else {
587 /* must flush all the translated code to avoid inconsistencies */
588 /* XXX: only flush what is necessary */
589 CPUArchState *env = cpu->env_ptr;
590 tb_flush(env);
591 }
592 }
593 #endif
594 }
595
596 void cpu_abort(CPUArchState *env, const char *fmt, ...)
597 {
598 CPUState *cpu = ENV_GET_CPU(env);
599 va_list ap;
600 va_list ap2;
601
602 va_start(ap, fmt);
603 va_copy(ap2, ap);
604 fprintf(stderr, "qemu: fatal: ");
605 vfprintf(stderr, fmt, ap);
606 fprintf(stderr, "\n");
607 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
608 if (qemu_log_enabled()) {
609 qemu_log("qemu: fatal: ");
610 qemu_log_vprintf(fmt, ap2);
611 qemu_log("\n");
612 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
613 qemu_log_flush();
614 qemu_log_close();
615 }
616 va_end(ap2);
617 va_end(ap);
618 #if defined(CONFIG_USER_ONLY)
619 {
620 struct sigaction act;
621 sigfillset(&act.sa_mask);
622 act.sa_handler = SIG_DFL;
623 sigaction(SIGABRT, &act, NULL);
624 }
625 #endif
626 abort();
627 }
628
629 #if !defined(CONFIG_USER_ONLY)
630 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
631 {
632 RAMBlock *block;
633
634 /* The list is protected by the iothread lock here. */
635 block = ram_list.mru_block;
636 if (block && addr - block->offset < block->length) {
637 goto found;
638 }
639 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
640 if (addr - block->offset < block->length) {
641 goto found;
642 }
643 }
644
645 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
646 abort();
647
648 found:
649 ram_list.mru_block = block;
650 return block;
651 }
652
653 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
654 uintptr_t length)
655 {
656 RAMBlock *block;
657 ram_addr_t start1;
658
659 block = qemu_get_ram_block(start);
660 assert(block == qemu_get_ram_block(end - 1));
661 start1 = (uintptr_t)block->host + (start - block->offset);
662 cpu_tlb_reset_dirty_all(start1, length);
663 }
664
665 /* Note: start and end must be within the same ram block. */
666 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
667 int dirty_flags)
668 {
669 uintptr_t length;
670
671 start &= TARGET_PAGE_MASK;
672 end = TARGET_PAGE_ALIGN(end);
673
674 length = end - start;
675 if (length == 0)
676 return;
677 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
678
679 if (tcg_enabled()) {
680 tlb_reset_dirty_range_all(start, end, length);
681 }
682 }
683
684 static int cpu_physical_memory_set_dirty_tracking(int enable)
685 {
686 int ret = 0;
687 in_migration = enable;
688 return ret;
689 }
690
691 hwaddr memory_region_section_get_iotlb(CPUArchState *env,
692 MemoryRegionSection *section,
693 target_ulong vaddr,
694 hwaddr paddr, hwaddr xlat,
695 int prot,
696 target_ulong *address)
697 {
698 hwaddr iotlb;
699 CPUWatchpoint *wp;
700
701 if (memory_region_is_ram(section->mr)) {
702 /* Normal RAM. */
703 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
704 + xlat;
705 if (!section->readonly) {
706 iotlb |= PHYS_SECTION_NOTDIRTY;
707 } else {
708 iotlb |= PHYS_SECTION_ROM;
709 }
710 } else {
711 iotlb = section - address_space_memory.dispatch->sections;
712 iotlb += xlat;
713 }
714
715 /* Make accesses to pages with watchpoints go via the
716 watchpoint trap routines. */
717 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
718 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
719 /* Avoid trapping reads of pages with a write breakpoint. */
720 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
721 iotlb = PHYS_SECTION_WATCH + paddr;
722 *address |= TLB_MMIO;
723 break;
724 }
725 }
726 }
727
728 return iotlb;
729 }
730 #endif /* defined(CONFIG_USER_ONLY) */
731
732 #if !defined(CONFIG_USER_ONLY)
733
734 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
735 uint16_t section);
736 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
737
738 static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
739
740 /*
741 * Set a custom physical guest memory alloator.
742 * Accelerators with unusual needs may need this. Hopefully, we can
743 * get rid of it eventually.
744 */
745 void phys_mem_set_alloc(void *(*alloc)(size_t))
746 {
747 phys_mem_alloc = alloc;
748 }
749
750 static uint16_t phys_section_add(MemoryRegionSection *section)
751 {
752 /* The physical section number is ORed with a page-aligned
753 * pointer to produce the iotlb entries. Thus it should
754 * never overflow into the page-aligned value.
755 */
756 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
757
758 if (next_map.sections_nb == next_map.sections_nb_alloc) {
759 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
760 16);
761 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
762 next_map.sections_nb_alloc);
763 }
764 next_map.sections[next_map.sections_nb] = *section;
765 memory_region_ref(section->mr);
766 return next_map.sections_nb++;
767 }
768
769 static void phys_section_destroy(MemoryRegion *mr)
770 {
771 memory_region_unref(mr);
772
773 if (mr->subpage) {
774 subpage_t *subpage = container_of(mr, subpage_t, iomem);
775 memory_region_destroy(&subpage->iomem);
776 g_free(subpage);
777 }
778 }
779
780 static void phys_sections_free(PhysPageMap *map)
781 {
782 while (map->sections_nb > 0) {
783 MemoryRegionSection *section = &map->sections[--map->sections_nb];
784 phys_section_destroy(section->mr);
785 }
786 g_free(map->sections);
787 g_free(map->nodes);
788 g_free(map);
789 }
790
791 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
792 {
793 subpage_t *subpage;
794 hwaddr base = section->offset_within_address_space
795 & TARGET_PAGE_MASK;
796 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
797 next_map.nodes, next_map.sections);
798 MemoryRegionSection subsection = {
799 .offset_within_address_space = base,
800 .size = int128_make64(TARGET_PAGE_SIZE),
801 };
802 hwaddr start, end;
803
804 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
805
806 if (!(existing->mr->subpage)) {
807 subpage = subpage_init(d->as, base);
808 subsection.mr = &subpage->iomem;
809 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
810 phys_section_add(&subsection));
811 } else {
812 subpage = container_of(existing->mr, subpage_t, iomem);
813 }
814 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
815 end = start + int128_get64(section->size) - 1;
816 subpage_register(subpage, start, end, phys_section_add(section));
817 }
818
819
820 static void register_multipage(AddressSpaceDispatch *d,
821 MemoryRegionSection *section)
822 {
823 hwaddr start_addr = section->offset_within_address_space;
824 uint16_t section_index = phys_section_add(section);
825 uint64_t num_pages = int128_get64(int128_rshift(section->size,
826 TARGET_PAGE_BITS));
827
828 assert(num_pages);
829 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
830 }
831
832 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
833 {
834 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
835 AddressSpaceDispatch *d = as->next_dispatch;
836 MemoryRegionSection now = *section, remain = *section;
837 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
838
839 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
840 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
841 - now.offset_within_address_space;
842
843 now.size = int128_min(int128_make64(left), now.size);
844 register_subpage(d, &now);
845 } else {
846 now.size = int128_zero();
847 }
848 while (int128_ne(remain.size, now.size)) {
849 remain.size = int128_sub(remain.size, now.size);
850 remain.offset_within_address_space += int128_get64(now.size);
851 remain.offset_within_region += int128_get64(now.size);
852 now = remain;
853 if (int128_lt(remain.size, page_size)) {
854 register_subpage(d, &now);
855 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
856 now.size = page_size;
857 register_subpage(d, &now);
858 } else {
859 now.size = int128_and(now.size, int128_neg(page_size));
860 register_multipage(d, &now);
861 }
862 }
863 }
864
865 void qemu_flush_coalesced_mmio_buffer(void)
866 {
867 if (kvm_enabled())
868 kvm_flush_coalesced_mmio_buffer();
869 }
870
871 void qemu_mutex_lock_ramlist(void)
872 {
873 qemu_mutex_lock(&ram_list.mutex);
874 }
875
876 void qemu_mutex_unlock_ramlist(void)
877 {
878 qemu_mutex_unlock(&ram_list.mutex);
879 }
880
881 #ifdef __linux__
882
883 #include <sys/vfs.h>
884
885 #define HUGETLBFS_MAGIC 0x958458f6
886
887 static long gethugepagesize(const char *path)
888 {
889 struct statfs fs;
890 int ret;
891
892 do {
893 ret = statfs(path, &fs);
894 } while (ret != 0 && errno == EINTR);
895
896 if (ret != 0) {
897 perror(path);
898 return 0;
899 }
900
901 if (fs.f_type != HUGETLBFS_MAGIC)
902 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
903
904 return fs.f_bsize;
905 }
906
907 static void *file_ram_alloc(RAMBlock *block,
908 ram_addr_t memory,
909 const char *path)
910 {
911 char *filename;
912 char *sanitized_name;
913 char *c;
914 void *area;
915 int fd;
916 #ifdef MAP_POPULATE
917 int flags;
918 #endif
919 unsigned long hpagesize;
920
921 hpagesize = gethugepagesize(path);
922 if (!hpagesize) {
923 return NULL;
924 }
925
926 if (memory < hpagesize) {
927 return NULL;
928 }
929
930 if (kvm_enabled() && !kvm_has_sync_mmu()) {
931 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
932 return NULL;
933 }
934
935 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
936 sanitized_name = g_strdup(block->mr->name);
937 for (c = sanitized_name; *c != '\0'; c++) {
938 if (*c == '/')
939 *c = '_';
940 }
941
942 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
943 sanitized_name);
944 g_free(sanitized_name);
945
946 fd = mkstemp(filename);
947 if (fd < 0) {
948 perror("unable to create backing store for hugepages");
949 g_free(filename);
950 return NULL;
951 }
952 unlink(filename);
953 g_free(filename);
954
955 memory = (memory+hpagesize-1) & ~(hpagesize-1);
956
957 /*
958 * ftruncate is not supported by hugetlbfs in older
959 * hosts, so don't bother bailing out on errors.
960 * If anything goes wrong with it under other filesystems,
961 * mmap will fail.
962 */
963 if (ftruncate(fd, memory))
964 perror("ftruncate");
965
966 #ifdef MAP_POPULATE
967 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
968 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
969 * to sidestep this quirk.
970 */
971 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
972 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
973 #else
974 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
975 #endif
976 if (area == MAP_FAILED) {
977 perror("file_ram_alloc: can't mmap RAM pages");
978 close(fd);
979 return (NULL);
980 }
981 block->fd = fd;
982 return area;
983 }
984 #else
985 static void *file_ram_alloc(RAMBlock *block,
986 ram_addr_t memory,
987 const char *path)
988 {
989 fprintf(stderr, "-mem-path not supported on this host\n");
990 exit(1);
991 }
992 #endif
993
994 static ram_addr_t find_ram_offset(ram_addr_t size)
995 {
996 RAMBlock *block, *next_block;
997 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
998
999 assert(size != 0); /* it would hand out same offset multiple times */
1000
1001 if (QTAILQ_EMPTY(&ram_list.blocks))
1002 return 0;
1003
1004 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1005 ram_addr_t end, next = RAM_ADDR_MAX;
1006
1007 end = block->offset + block->length;
1008
1009 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
1010 if (next_block->offset >= end) {
1011 next = MIN(next, next_block->offset);
1012 }
1013 }
1014 if (next - end >= size && next - end < mingap) {
1015 offset = end;
1016 mingap = next - end;
1017 }
1018 }
1019
1020 if (offset == RAM_ADDR_MAX) {
1021 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1022 (uint64_t)size);
1023 abort();
1024 }
1025
1026 return offset;
1027 }
1028
1029 ram_addr_t last_ram_offset(void)
1030 {
1031 RAMBlock *block;
1032 ram_addr_t last = 0;
1033
1034 QTAILQ_FOREACH(block, &ram_list.blocks, next)
1035 last = MAX(last, block->offset + block->length);
1036
1037 return last;
1038 }
1039
1040 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1041 {
1042 int ret;
1043
1044 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1045 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1046 "dump-guest-core", true)) {
1047 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1048 if (ret) {
1049 perror("qemu_madvise");
1050 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1051 "but dump_guest_core=off specified\n");
1052 }
1053 }
1054 }
1055
1056 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1057 {
1058 RAMBlock *new_block, *block;
1059
1060 new_block = NULL;
1061 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1062 if (block->offset == addr) {
1063 new_block = block;
1064 break;
1065 }
1066 }
1067 assert(new_block);
1068 assert(!new_block->idstr[0]);
1069
1070 if (dev) {
1071 char *id = qdev_get_dev_path(dev);
1072 if (id) {
1073 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1074 g_free(id);
1075 }
1076 }
1077 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1078
1079 /* This assumes the iothread lock is taken here too. */
1080 qemu_mutex_lock_ramlist();
1081 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1082 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1083 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1084 new_block->idstr);
1085 abort();
1086 }
1087 }
1088 qemu_mutex_unlock_ramlist();
1089 }
1090
1091 static int memory_try_enable_merging(void *addr, size_t len)
1092 {
1093 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1094 /* disabled by the user */
1095 return 0;
1096 }
1097
1098 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1099 }
1100
1101 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1102 MemoryRegion *mr)
1103 {
1104 RAMBlock *block, *new_block;
1105
1106 size = TARGET_PAGE_ALIGN(size);
1107 new_block = g_malloc0(sizeof(*new_block));
1108 new_block->fd = -1;
1109
1110 /* This assumes the iothread lock is taken here too. */
1111 qemu_mutex_lock_ramlist();
1112 new_block->mr = mr;
1113 new_block->offset = find_ram_offset(size);
1114 if (host) {
1115 new_block->host = host;
1116 new_block->flags |= RAM_PREALLOC_MASK;
1117 } else if (xen_enabled()) {
1118 if (mem_path) {
1119 fprintf(stderr, "-mem-path not supported with Xen\n");
1120 exit(1);
1121 }
1122 xen_ram_alloc(new_block->offset, size, mr);
1123 } else {
1124 if (mem_path) {
1125 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1126 /*
1127 * file_ram_alloc() needs to allocate just like
1128 * phys_mem_alloc, but we haven't bothered to provide
1129 * a hook there.
1130 */
1131 fprintf(stderr,
1132 "-mem-path not supported with this accelerator\n");
1133 exit(1);
1134 }
1135 new_block->host = file_ram_alloc(new_block, size, mem_path);
1136 }
1137 if (!new_block->host) {
1138 new_block->host = phys_mem_alloc(size);
1139 if (!new_block->host) {
1140 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1141 new_block->mr->name, strerror(errno));
1142 exit(1);
1143 }
1144 memory_try_enable_merging(new_block->host, size);
1145 }
1146 }
1147 new_block->length = size;
1148
1149 /* Keep the list sorted from biggest to smallest block. */
1150 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1151 if (block->length < new_block->length) {
1152 break;
1153 }
1154 }
1155 if (block) {
1156 QTAILQ_INSERT_BEFORE(block, new_block, next);
1157 } else {
1158 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1159 }
1160 ram_list.mru_block = NULL;
1161
1162 ram_list.version++;
1163 qemu_mutex_unlock_ramlist();
1164
1165 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
1166 last_ram_offset() >> TARGET_PAGE_BITS);
1167 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1168 0, size >> TARGET_PAGE_BITS);
1169 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
1170
1171 qemu_ram_setup_dump(new_block->host, size);
1172 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1173 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
1174
1175 if (kvm_enabled())
1176 kvm_setup_guest_memory(new_block->host, size);
1177
1178 return new_block->offset;
1179 }
1180
1181 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1182 {
1183 return qemu_ram_alloc_from_ptr(size, NULL, mr);
1184 }
1185
1186 void qemu_ram_free_from_ptr(ram_addr_t addr)
1187 {
1188 RAMBlock *block;
1189
1190 /* This assumes the iothread lock is taken here too. */
1191 qemu_mutex_lock_ramlist();
1192 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1193 if (addr == block->offset) {
1194 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1195 ram_list.mru_block = NULL;
1196 ram_list.version++;
1197 g_free(block);
1198 break;
1199 }
1200 }
1201 qemu_mutex_unlock_ramlist();
1202 }
1203
1204 void qemu_ram_free(ram_addr_t addr)
1205 {
1206 RAMBlock *block;
1207
1208 /* This assumes the iothread lock is taken here too. */
1209 qemu_mutex_lock_ramlist();
1210 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1211 if (addr == block->offset) {
1212 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1213 ram_list.mru_block = NULL;
1214 ram_list.version++;
1215 if (block->flags & RAM_PREALLOC_MASK) {
1216 ;
1217 } else if (xen_enabled()) {
1218 xen_invalidate_map_cache_entry(block->host);
1219 #ifndef _WIN32
1220 } else if (block->fd >= 0) {
1221 munmap(block->host, block->length);
1222 close(block->fd);
1223 #endif
1224 } else {
1225 qemu_anon_ram_free(block->host, block->length);
1226 }
1227 g_free(block);
1228 break;
1229 }
1230 }
1231 qemu_mutex_unlock_ramlist();
1232
1233 }
1234
1235 #ifndef _WIN32
1236 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1237 {
1238 RAMBlock *block;
1239 ram_addr_t offset;
1240 int flags;
1241 void *area, *vaddr;
1242
1243 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1244 offset = addr - block->offset;
1245 if (offset < block->length) {
1246 vaddr = block->host + offset;
1247 if (block->flags & RAM_PREALLOC_MASK) {
1248 ;
1249 } else if (xen_enabled()) {
1250 abort();
1251 } else {
1252 flags = MAP_FIXED;
1253 munmap(vaddr, length);
1254 if (block->fd >= 0) {
1255 #ifdef MAP_POPULATE
1256 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1257 MAP_PRIVATE;
1258 #else
1259 flags |= MAP_PRIVATE;
1260 #endif
1261 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1262 flags, block->fd, offset);
1263 } else {
1264 /*
1265 * Remap needs to match alloc. Accelerators that
1266 * set phys_mem_alloc never remap. If they did,
1267 * we'd need a remap hook here.
1268 */
1269 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1270
1271 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1272 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1273 flags, -1, 0);
1274 }
1275 if (area != vaddr) {
1276 fprintf(stderr, "Could not remap addr: "
1277 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1278 length, addr);
1279 exit(1);
1280 }
1281 memory_try_enable_merging(vaddr, length);
1282 qemu_ram_setup_dump(vaddr, length);
1283 }
1284 return;
1285 }
1286 }
1287 }
1288 #endif /* !_WIN32 */
1289
1290 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1291 With the exception of the softmmu code in this file, this should
1292 only be used for local memory (e.g. video ram) that the device owns,
1293 and knows it isn't going to access beyond the end of the block.
1294
1295 It should not be used for general purpose DMA.
1296 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1297 */
1298 void *qemu_get_ram_ptr(ram_addr_t addr)
1299 {
1300 RAMBlock *block = qemu_get_ram_block(addr);
1301
1302 if (xen_enabled()) {
1303 /* We need to check if the requested address is in the RAM
1304 * because we don't want to map the entire memory in QEMU.
1305 * In that case just map until the end of the page.
1306 */
1307 if (block->offset == 0) {
1308 return xen_map_cache(addr, 0, 0);
1309 } else if (block->host == NULL) {
1310 block->host =
1311 xen_map_cache(block->offset, block->length, 1);
1312 }
1313 }
1314 return block->host + (addr - block->offset);
1315 }
1316
1317 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1318 * but takes a size argument */
1319 static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
1320 {
1321 if (*size == 0) {
1322 return NULL;
1323 }
1324 if (xen_enabled()) {
1325 return xen_map_cache(addr, *size, 1);
1326 } else {
1327 RAMBlock *block;
1328
1329 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1330 if (addr - block->offset < block->length) {
1331 if (addr - block->offset + *size > block->length)
1332 *size = block->length - addr + block->offset;
1333 return block->host + (addr - block->offset);
1334 }
1335 }
1336
1337 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1338 abort();
1339 }
1340 }
1341
1342 /* Some of the softmmu routines need to translate from a host pointer
1343 (typically a TLB entry) back to a ram offset. */
1344 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1345 {
1346 RAMBlock *block;
1347 uint8_t *host = ptr;
1348
1349 if (xen_enabled()) {
1350 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1351 return qemu_get_ram_block(*ram_addr)->mr;
1352 }
1353
1354 block = ram_list.mru_block;
1355 if (block && block->host && host - block->host < block->length) {
1356 goto found;
1357 }
1358
1359 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1360 /* This case append when the block is not mapped. */
1361 if (block->host == NULL) {
1362 continue;
1363 }
1364 if (host - block->host < block->length) {
1365 goto found;
1366 }
1367 }
1368
1369 return NULL;
1370
1371 found:
1372 *ram_addr = block->offset + (host - block->host);
1373 return block->mr;
1374 }
1375
1376 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1377 uint64_t val, unsigned size)
1378 {
1379 int dirty_flags;
1380 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1381 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1382 tb_invalidate_phys_page_fast(ram_addr, size);
1383 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1384 }
1385 switch (size) {
1386 case 1:
1387 stb_p(qemu_get_ram_ptr(ram_addr), val);
1388 break;
1389 case 2:
1390 stw_p(qemu_get_ram_ptr(ram_addr), val);
1391 break;
1392 case 4:
1393 stl_p(qemu_get_ram_ptr(ram_addr), val);
1394 break;
1395 default:
1396 abort();
1397 }
1398 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1399 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1400 /* we remove the notdirty callback only if the code has been
1401 flushed */
1402 if (dirty_flags == 0xff) {
1403 CPUArchState *env = current_cpu->env_ptr;
1404 tlb_set_dirty(env, env->mem_io_vaddr);
1405 }
1406 }
1407
1408 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1409 unsigned size, bool is_write)
1410 {
1411 return is_write;
1412 }
1413
1414 static const MemoryRegionOps notdirty_mem_ops = {
1415 .write = notdirty_mem_write,
1416 .valid.accepts = notdirty_mem_accepts,
1417 .endianness = DEVICE_NATIVE_ENDIAN,
1418 };
1419
1420 /* Generate a debug exception if a watchpoint has been hit. */
1421 static void check_watchpoint(int offset, int len_mask, int flags)
1422 {
1423 CPUArchState *env = current_cpu->env_ptr;
1424 target_ulong pc, cs_base;
1425 target_ulong vaddr;
1426 CPUWatchpoint *wp;
1427 int cpu_flags;
1428
1429 if (env->watchpoint_hit) {
1430 /* We re-entered the check after replacing the TB. Now raise
1431 * the debug interrupt so that is will trigger after the
1432 * current instruction. */
1433 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
1434 return;
1435 }
1436 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1437 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1438 if ((vaddr == (wp->vaddr & len_mask) ||
1439 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1440 wp->flags |= BP_WATCHPOINT_HIT;
1441 if (!env->watchpoint_hit) {
1442 env->watchpoint_hit = wp;
1443 tb_check_watchpoint(env);
1444 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1445 env->exception_index = EXCP_DEBUG;
1446 cpu_loop_exit(env);
1447 } else {
1448 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1449 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1450 cpu_resume_from_signal(env, NULL);
1451 }
1452 }
1453 } else {
1454 wp->flags &= ~BP_WATCHPOINT_HIT;
1455 }
1456 }
1457 }
1458
1459 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1460 so these check for a hit then pass through to the normal out-of-line
1461 phys routines. */
1462 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1463 unsigned size)
1464 {
1465 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1466 switch (size) {
1467 case 1: return ldub_phys(addr);
1468 case 2: return lduw_phys(addr);
1469 case 4: return ldl_phys(addr);
1470 default: abort();
1471 }
1472 }
1473
1474 static void watch_mem_write(void *opaque, hwaddr addr,
1475 uint64_t val, unsigned size)
1476 {
1477 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1478 switch (size) {
1479 case 1:
1480 stb_phys(addr, val);
1481 break;
1482 case 2:
1483 stw_phys(addr, val);
1484 break;
1485 case 4:
1486 stl_phys(addr, val);
1487 break;
1488 default: abort();
1489 }
1490 }
1491
1492 static const MemoryRegionOps watch_mem_ops = {
1493 .read = watch_mem_read,
1494 .write = watch_mem_write,
1495 .endianness = DEVICE_NATIVE_ENDIAN,
1496 };
1497
1498 static uint64_t subpage_read(void *opaque, hwaddr addr,
1499 unsigned len)
1500 {
1501 subpage_t *subpage = opaque;
1502 uint8_t buf[4];
1503
1504 #if defined(DEBUG_SUBPAGE)
1505 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1506 subpage, len, addr);
1507 #endif
1508 address_space_read(subpage->as, addr + subpage->base, buf, len);
1509 switch (len) {
1510 case 1:
1511 return ldub_p(buf);
1512 case 2:
1513 return lduw_p(buf);
1514 case 4:
1515 return ldl_p(buf);
1516 default:
1517 abort();
1518 }
1519 }
1520
1521 static void subpage_write(void *opaque, hwaddr addr,
1522 uint64_t value, unsigned len)
1523 {
1524 subpage_t *subpage = opaque;
1525 uint8_t buf[4];
1526
1527 #if defined(DEBUG_SUBPAGE)
1528 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1529 " value %"PRIx64"\n",
1530 __func__, subpage, len, addr, value);
1531 #endif
1532 switch (len) {
1533 case 1:
1534 stb_p(buf, value);
1535 break;
1536 case 2:
1537 stw_p(buf, value);
1538 break;
1539 case 4:
1540 stl_p(buf, value);
1541 break;
1542 default:
1543 abort();
1544 }
1545 address_space_write(subpage->as, addr + subpage->base, buf, len);
1546 }
1547
1548 static bool subpage_accepts(void *opaque, hwaddr addr,
1549 unsigned len, bool is_write)
1550 {
1551 subpage_t *subpage = opaque;
1552 #if defined(DEBUG_SUBPAGE)
1553 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
1554 __func__, subpage, is_write ? 'w' : 'r', len, addr);
1555 #endif
1556
1557 return address_space_access_valid(subpage->as, addr + subpage->base,
1558 len, is_write);
1559 }
1560
1561 static const MemoryRegionOps subpage_ops = {
1562 .read = subpage_read,
1563 .write = subpage_write,
1564 .valid.accepts = subpage_accepts,
1565 .endianness = DEVICE_NATIVE_ENDIAN,
1566 };
1567
1568 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1569 uint16_t section)
1570 {
1571 int idx, eidx;
1572
1573 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1574 return -1;
1575 idx = SUBPAGE_IDX(start);
1576 eidx = SUBPAGE_IDX(end);
1577 #if defined(DEBUG_SUBPAGE)
1578 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1579 __func__, mmio, start, end, idx, eidx, section);
1580 #endif
1581 for (; idx <= eidx; idx++) {
1582 mmio->sub_section[idx] = section;
1583 }
1584
1585 return 0;
1586 }
1587
1588 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1589 {
1590 subpage_t *mmio;
1591
1592 mmio = g_malloc0(sizeof(subpage_t));
1593
1594 mmio->as = as;
1595 mmio->base = base;
1596 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
1597 "subpage", TARGET_PAGE_SIZE);
1598 mmio->iomem.subpage = true;
1599 #if defined(DEBUG_SUBPAGE)
1600 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1601 mmio, base, TARGET_PAGE_SIZE);
1602 #endif
1603 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
1604
1605 return mmio;
1606 }
1607
1608 static uint16_t dummy_section(MemoryRegion *mr)
1609 {
1610 MemoryRegionSection section = {
1611 .mr = mr,
1612 .offset_within_address_space = 0,
1613 .offset_within_region = 0,
1614 .size = int128_2_64(),
1615 };
1616
1617 return phys_section_add(&section);
1618 }
1619
1620 MemoryRegion *iotlb_to_region(hwaddr index)
1621 {
1622 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
1623 }
1624
1625 static void io_mem_init(void)
1626 {
1627 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1628 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1629 "unassigned", UINT64_MAX);
1630 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1631 "notdirty", UINT64_MAX);
1632 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1633 "watch", UINT64_MAX);
1634 }
1635
1636 static void mem_begin(MemoryListener *listener)
1637 {
1638 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1639 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1640
1641 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1642 d->as = as;
1643 as->next_dispatch = d;
1644 }
1645
1646 static void mem_commit(MemoryListener *listener)
1647 {
1648 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1649 AddressSpaceDispatch *cur = as->dispatch;
1650 AddressSpaceDispatch *next = as->next_dispatch;
1651
1652 next->nodes = next_map.nodes;
1653 next->sections = next_map.sections;
1654
1655 as->dispatch = next;
1656 g_free(cur);
1657 }
1658
1659 static void core_begin(MemoryListener *listener)
1660 {
1661 uint16_t n;
1662
1663 prev_map = g_new(PhysPageMap, 1);
1664 *prev_map = next_map;
1665
1666 memset(&next_map, 0, sizeof(next_map));
1667 n = dummy_section(&io_mem_unassigned);
1668 assert(n == PHYS_SECTION_UNASSIGNED);
1669 n = dummy_section(&io_mem_notdirty);
1670 assert(n == PHYS_SECTION_NOTDIRTY);
1671 n = dummy_section(&io_mem_rom);
1672 assert(n == PHYS_SECTION_ROM);
1673 n = dummy_section(&io_mem_watch);
1674 assert(n == PHYS_SECTION_WATCH);
1675 }
1676
1677 /* This listener's commit run after the other AddressSpaceDispatch listeners'.
1678 * All AddressSpaceDispatch instances have switched to the next map.
1679 */
1680 static void core_commit(MemoryListener *listener)
1681 {
1682 phys_sections_free(prev_map);
1683 }
1684
1685 static void tcg_commit(MemoryListener *listener)
1686 {
1687 CPUState *cpu;
1688
1689 /* since each CPU stores ram addresses in its TLB cache, we must
1690 reset the modified entries */
1691 /* XXX: slow ! */
1692 CPU_FOREACH(cpu) {
1693 CPUArchState *env = cpu->env_ptr;
1694
1695 tlb_flush(env, 1);
1696 }
1697 }
1698
1699 static void core_log_global_start(MemoryListener *listener)
1700 {
1701 cpu_physical_memory_set_dirty_tracking(1);
1702 }
1703
1704 static void core_log_global_stop(MemoryListener *listener)
1705 {
1706 cpu_physical_memory_set_dirty_tracking(0);
1707 }
1708
1709 static MemoryListener core_memory_listener = {
1710 .begin = core_begin,
1711 .commit = core_commit,
1712 .log_global_start = core_log_global_start,
1713 .log_global_stop = core_log_global_stop,
1714 .priority = 1,
1715 };
1716
1717 static MemoryListener tcg_memory_listener = {
1718 .commit = tcg_commit,
1719 };
1720
1721 void address_space_init_dispatch(AddressSpace *as)
1722 {
1723 as->dispatch = NULL;
1724 as->dispatch_listener = (MemoryListener) {
1725 .begin = mem_begin,
1726 .commit = mem_commit,
1727 .region_add = mem_add,
1728 .region_nop = mem_add,
1729 .priority = 0,
1730 };
1731 memory_listener_register(&as->dispatch_listener, as);
1732 }
1733
1734 void address_space_destroy_dispatch(AddressSpace *as)
1735 {
1736 AddressSpaceDispatch *d = as->dispatch;
1737
1738 memory_listener_unregister(&as->dispatch_listener);
1739 g_free(d);
1740 as->dispatch = NULL;
1741 }
1742
1743 static void memory_map_init(void)
1744 {
1745 system_memory = g_malloc(sizeof(*system_memory));
1746 memory_region_init(system_memory, NULL, "system", INT64_MAX);
1747 address_space_init(&address_space_memory, system_memory, "memory");
1748
1749 system_io = g_malloc(sizeof(*system_io));
1750 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1751 65536);
1752 address_space_init(&address_space_io, system_io, "I/O");
1753
1754 memory_listener_register(&core_memory_listener, &address_space_memory);
1755 if (tcg_enabled()) {
1756 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1757 }
1758 }
1759
1760 MemoryRegion *get_system_memory(void)
1761 {
1762 return system_memory;
1763 }
1764
1765 MemoryRegion *get_system_io(void)
1766 {
1767 return system_io;
1768 }
1769
1770 #endif /* !defined(CONFIG_USER_ONLY) */
1771
1772 /* physical memory access (slow version, mainly for debug) */
1773 #if defined(CONFIG_USER_ONLY)
1774 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
1775 uint8_t *buf, int len, int is_write)
1776 {
1777 int l, flags;
1778 target_ulong page;
1779 void * p;
1780
1781 while (len > 0) {
1782 page = addr & TARGET_PAGE_MASK;
1783 l = (page + TARGET_PAGE_SIZE) - addr;
1784 if (l > len)
1785 l = len;
1786 flags = page_get_flags(page);
1787 if (!(flags & PAGE_VALID))
1788 return -1;
1789 if (is_write) {
1790 if (!(flags & PAGE_WRITE))
1791 return -1;
1792 /* XXX: this code should not depend on lock_user */
1793 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1794 return -1;
1795 memcpy(p, buf, l);
1796 unlock_user(p, addr, l);
1797 } else {
1798 if (!(flags & PAGE_READ))
1799 return -1;
1800 /* XXX: this code should not depend on lock_user */
1801 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1802 return -1;
1803 memcpy(buf, p, l);
1804 unlock_user(p, addr, 0);
1805 }
1806 len -= l;
1807 buf += l;
1808 addr += l;
1809 }
1810 return 0;
1811 }
1812
1813 #else
1814
1815 static void invalidate_and_set_dirty(hwaddr addr,
1816 hwaddr length)
1817 {
1818 if (!cpu_physical_memory_is_dirty(addr)) {
1819 /* invalidate code */
1820 tb_invalidate_phys_page_range(addr, addr + length, 0);
1821 /* set dirty bit */
1822 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1823 }
1824 xen_modified_memory(addr, length);
1825 }
1826
1827 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1828 {
1829 if (memory_region_is_ram(mr)) {
1830 return !(is_write && mr->readonly);
1831 }
1832 if (memory_region_is_romd(mr)) {
1833 return !is_write;
1834 }
1835
1836 return false;
1837 }
1838
1839 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
1840 {
1841 unsigned access_size_max = mr->ops->valid.max_access_size;
1842
1843 /* Regions are assumed to support 1-4 byte accesses unless
1844 otherwise specified. */
1845 if (access_size_max == 0) {
1846 access_size_max = 4;
1847 }
1848
1849 /* Bound the maximum access by the alignment of the address. */
1850 if (!mr->ops->impl.unaligned) {
1851 unsigned align_size_max = addr & -addr;
1852 if (align_size_max != 0 && align_size_max < access_size_max) {
1853 access_size_max = align_size_max;
1854 }
1855 }
1856
1857 /* Don't attempt accesses larger than the maximum. */
1858 if (l > access_size_max) {
1859 l = access_size_max;
1860 }
1861 if (l & (l - 1)) {
1862 l = 1 << (qemu_fls(l) - 1);
1863 }
1864
1865 return l;
1866 }
1867
1868 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1869 int len, bool is_write)
1870 {
1871 hwaddr l;
1872 uint8_t *ptr;
1873 uint64_t val;
1874 hwaddr addr1;
1875 MemoryRegion *mr;
1876 bool error = false;
1877
1878 while (len > 0) {
1879 l = len;
1880 mr = address_space_translate(as, addr, &addr1, &l, is_write);
1881
1882 if (is_write) {
1883 if (!memory_access_is_direct(mr, is_write)) {
1884 l = memory_access_size(mr, l, addr1);
1885 /* XXX: could force current_cpu to NULL to avoid
1886 potential bugs */
1887 switch (l) {
1888 case 8:
1889 /* 64 bit write access */
1890 val = ldq_p(buf);
1891 error |= io_mem_write(mr, addr1, val, 8);
1892 break;
1893 case 4:
1894 /* 32 bit write access */
1895 val = ldl_p(buf);
1896 error |= io_mem_write(mr, addr1, val, 4);
1897 break;
1898 case 2:
1899 /* 16 bit write access */
1900 val = lduw_p(buf);
1901 error |= io_mem_write(mr, addr1, val, 2);
1902 break;
1903 case 1:
1904 /* 8 bit write access */
1905 val = ldub_p(buf);
1906 error |= io_mem_write(mr, addr1, val, 1);
1907 break;
1908 default:
1909 abort();
1910 }
1911 } else {
1912 addr1 += memory_region_get_ram_addr(mr);
1913 /* RAM case */
1914 ptr = qemu_get_ram_ptr(addr1);
1915 memcpy(ptr, buf, l);
1916 invalidate_and_set_dirty(addr1, l);
1917 }
1918 } else {
1919 if (!memory_access_is_direct(mr, is_write)) {
1920 /* I/O case */
1921 l = memory_access_size(mr, l, addr1);
1922 switch (l) {
1923 case 8:
1924 /* 64 bit read access */
1925 error |= io_mem_read(mr, addr1, &val, 8);
1926 stq_p(buf, val);
1927 break;
1928 case 4:
1929 /* 32 bit read access */
1930 error |= io_mem_read(mr, addr1, &val, 4);
1931 stl_p(buf, val);
1932 break;
1933 case 2:
1934 /* 16 bit read access */
1935 error |= io_mem_read(mr, addr1, &val, 2);
1936 stw_p(buf, val);
1937 break;
1938 case 1:
1939 /* 8 bit read access */
1940 error |= io_mem_read(mr, addr1, &val, 1);
1941 stb_p(buf, val);
1942 break;
1943 default:
1944 abort();
1945 }
1946 } else {
1947 /* RAM case */
1948 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
1949 memcpy(buf, ptr, l);
1950 }
1951 }
1952 len -= l;
1953 buf += l;
1954 addr += l;
1955 }
1956
1957 return error;
1958 }
1959
1960 bool address_space_write(AddressSpace *as, hwaddr addr,
1961 const uint8_t *buf, int len)
1962 {
1963 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
1964 }
1965
1966 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
1967 {
1968 return address_space_rw(as, addr, buf, len, false);
1969 }
1970
1971
1972 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
1973 int len, int is_write)
1974 {
1975 address_space_rw(&address_space_memory, addr, buf, len, is_write);
1976 }
1977
1978 /* used for ROM loading : can write in RAM and ROM */
1979 void cpu_physical_memory_write_rom(hwaddr addr,
1980 const uint8_t *buf, int len)
1981 {
1982 hwaddr l;
1983 uint8_t *ptr;
1984 hwaddr addr1;
1985 MemoryRegion *mr;
1986
1987 while (len > 0) {
1988 l = len;
1989 mr = address_space_translate(&address_space_memory,
1990 addr, &addr1, &l, true);
1991
1992 if (!(memory_region_is_ram(mr) ||
1993 memory_region_is_romd(mr))) {
1994 /* do nothing */
1995 } else {
1996 addr1 += memory_region_get_ram_addr(mr);
1997 /* ROM/RAM case */
1998 ptr = qemu_get_ram_ptr(addr1);
1999 memcpy(ptr, buf, l);
2000 invalidate_and_set_dirty(addr1, l);
2001 }
2002 len -= l;
2003 buf += l;
2004 addr += l;
2005 }
2006 }
2007
2008 typedef struct {
2009 MemoryRegion *mr;
2010 void *buffer;
2011 hwaddr addr;
2012 hwaddr len;
2013 } BounceBuffer;
2014
2015 static BounceBuffer bounce;
2016
2017 typedef struct MapClient {
2018 void *opaque;
2019 void (*callback)(void *opaque);
2020 QLIST_ENTRY(MapClient) link;
2021 } MapClient;
2022
2023 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2024 = QLIST_HEAD_INITIALIZER(map_client_list);
2025
2026 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2027 {
2028 MapClient *client = g_malloc(sizeof(*client));
2029
2030 client->opaque = opaque;
2031 client->callback = callback;
2032 QLIST_INSERT_HEAD(&map_client_list, client, link);
2033 return client;
2034 }
2035
2036 static void cpu_unregister_map_client(void *_client)
2037 {
2038 MapClient *client = (MapClient *)_client;
2039
2040 QLIST_REMOVE(client, link);
2041 g_free(client);
2042 }
2043
2044 static void cpu_notify_map_clients(void)
2045 {
2046 MapClient *client;
2047
2048 while (!QLIST_EMPTY(&map_client_list)) {
2049 client = QLIST_FIRST(&map_client_list);
2050 client->callback(client->opaque);
2051 cpu_unregister_map_client(client);
2052 }
2053 }
2054
2055 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2056 {
2057 MemoryRegion *mr;
2058 hwaddr l, xlat;
2059
2060 while (len > 0) {
2061 l = len;
2062 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2063 if (!memory_access_is_direct(mr, is_write)) {
2064 l = memory_access_size(mr, l, addr);
2065 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2066 return false;
2067 }
2068 }
2069
2070 len -= l;
2071 addr += l;
2072 }
2073 return true;
2074 }
2075
2076 /* Map a physical memory region into a host virtual address.
2077 * May map a subset of the requested range, given by and returned in *plen.
2078 * May return NULL if resources needed to perform the mapping are exhausted.
2079 * Use only for reads OR writes - not for read-modify-write operations.
2080 * Use cpu_register_map_client() to know when retrying the map operation is
2081 * likely to succeed.
2082 */
2083 void *address_space_map(AddressSpace *as,
2084 hwaddr addr,
2085 hwaddr *plen,
2086 bool is_write)
2087 {
2088 hwaddr len = *plen;
2089 hwaddr done = 0;
2090 hwaddr l, xlat, base;
2091 MemoryRegion *mr, *this_mr;
2092 ram_addr_t raddr;
2093
2094 if (len == 0) {
2095 return NULL;
2096 }
2097
2098 l = len;
2099 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2100 if (!memory_access_is_direct(mr, is_write)) {
2101 if (bounce.buffer) {
2102 return NULL;
2103 }
2104 /* Avoid unbounded allocations */
2105 l = MIN(l, TARGET_PAGE_SIZE);
2106 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2107 bounce.addr = addr;
2108 bounce.len = l;
2109
2110 memory_region_ref(mr);
2111 bounce.mr = mr;
2112 if (!is_write) {
2113 address_space_read(as, addr, bounce.buffer, l);
2114 }
2115
2116 *plen = l;
2117 return bounce.buffer;
2118 }
2119
2120 base = xlat;
2121 raddr = memory_region_get_ram_addr(mr);
2122
2123 for (;;) {
2124 len -= l;
2125 addr += l;
2126 done += l;
2127 if (len == 0) {
2128 break;
2129 }
2130
2131 l = len;
2132 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2133 if (this_mr != mr || xlat != base + done) {
2134 break;
2135 }
2136 }
2137
2138 memory_region_ref(mr);
2139 *plen = done;
2140 return qemu_ram_ptr_length(raddr + base, plen);
2141 }
2142
2143 /* Unmaps a memory region previously mapped by address_space_map().
2144 * Will also mark the memory as dirty if is_write == 1. access_len gives
2145 * the amount of memory that was actually read or written by the caller.
2146 */
2147 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2148 int is_write, hwaddr access_len)
2149 {
2150 if (buffer != bounce.buffer) {
2151 MemoryRegion *mr;
2152 ram_addr_t addr1;
2153
2154 mr = qemu_ram_addr_from_host(buffer, &addr1);
2155 assert(mr != NULL);
2156 if (is_write) {
2157 while (access_len) {
2158 unsigned l;
2159 l = TARGET_PAGE_SIZE;
2160 if (l > access_len)
2161 l = access_len;
2162 invalidate_and_set_dirty(addr1, l);
2163 addr1 += l;
2164 access_len -= l;
2165 }
2166 }
2167 if (xen_enabled()) {
2168 xen_invalidate_map_cache_entry(buffer);
2169 }
2170 memory_region_unref(mr);
2171 return;
2172 }
2173 if (is_write) {
2174 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2175 }
2176 qemu_vfree(bounce.buffer);
2177 bounce.buffer = NULL;
2178 memory_region_unref(bounce.mr);
2179 cpu_notify_map_clients();
2180 }
2181
2182 void *cpu_physical_memory_map(hwaddr addr,
2183 hwaddr *plen,
2184 int is_write)
2185 {
2186 return address_space_map(&address_space_memory, addr, plen, is_write);
2187 }
2188
2189 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2190 int is_write, hwaddr access_len)
2191 {
2192 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2193 }
2194
2195 /* warning: addr must be aligned */
2196 static inline uint32_t ldl_phys_internal(hwaddr addr,
2197 enum device_endian endian)
2198 {
2199 uint8_t *ptr;
2200 uint64_t val;
2201 MemoryRegion *mr;
2202 hwaddr l = 4;
2203 hwaddr addr1;
2204
2205 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2206 false);
2207 if (l < 4 || !memory_access_is_direct(mr, false)) {
2208 /* I/O case */
2209 io_mem_read(mr, addr1, &val, 4);
2210 #if defined(TARGET_WORDS_BIGENDIAN)
2211 if (endian == DEVICE_LITTLE_ENDIAN) {
2212 val = bswap32(val);
2213 }
2214 #else
2215 if (endian == DEVICE_BIG_ENDIAN) {
2216 val = bswap32(val);
2217 }
2218 #endif
2219 } else {
2220 /* RAM case */
2221 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2222 & TARGET_PAGE_MASK)
2223 + addr1);
2224 switch (endian) {
2225 case DEVICE_LITTLE_ENDIAN:
2226 val = ldl_le_p(ptr);
2227 break;
2228 case DEVICE_BIG_ENDIAN:
2229 val = ldl_be_p(ptr);
2230 break;
2231 default:
2232 val = ldl_p(ptr);
2233 break;
2234 }
2235 }
2236 return val;
2237 }
2238
2239 uint32_t ldl_phys(hwaddr addr)
2240 {
2241 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2242 }
2243
2244 uint32_t ldl_le_phys(hwaddr addr)
2245 {
2246 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2247 }
2248
2249 uint32_t ldl_be_phys(hwaddr addr)
2250 {
2251 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2252 }
2253
2254 /* warning: addr must be aligned */
2255 static inline uint64_t ldq_phys_internal(hwaddr addr,
2256 enum device_endian endian)
2257 {
2258 uint8_t *ptr;
2259 uint64_t val;
2260 MemoryRegion *mr;
2261 hwaddr l = 8;
2262 hwaddr addr1;
2263
2264 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2265 false);
2266 if (l < 8 || !memory_access_is_direct(mr, false)) {
2267 /* I/O case */
2268 io_mem_read(mr, addr1, &val, 8);
2269 #if defined(TARGET_WORDS_BIGENDIAN)
2270 if (endian == DEVICE_LITTLE_ENDIAN) {
2271 val = bswap64(val);
2272 }
2273 #else
2274 if (endian == DEVICE_BIG_ENDIAN) {
2275 val = bswap64(val);
2276 }
2277 #endif
2278 } else {
2279 /* RAM case */
2280 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2281 & TARGET_PAGE_MASK)
2282 + addr1);
2283 switch (endian) {
2284 case DEVICE_LITTLE_ENDIAN:
2285 val = ldq_le_p(ptr);
2286 break;
2287 case DEVICE_BIG_ENDIAN:
2288 val = ldq_be_p(ptr);
2289 break;
2290 default:
2291 val = ldq_p(ptr);
2292 break;
2293 }
2294 }
2295 return val;
2296 }
2297
2298 uint64_t ldq_phys(hwaddr addr)
2299 {
2300 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2301 }
2302
2303 uint64_t ldq_le_phys(hwaddr addr)
2304 {
2305 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2306 }
2307
2308 uint64_t ldq_be_phys(hwaddr addr)
2309 {
2310 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2311 }
2312
2313 /* XXX: optimize */
2314 uint32_t ldub_phys(hwaddr addr)
2315 {
2316 uint8_t val;
2317 cpu_physical_memory_read(addr, &val, 1);
2318 return val;
2319 }
2320
2321 /* warning: addr must be aligned */
2322 static inline uint32_t lduw_phys_internal(hwaddr addr,
2323 enum device_endian endian)
2324 {
2325 uint8_t *ptr;
2326 uint64_t val;
2327 MemoryRegion *mr;
2328 hwaddr l = 2;
2329 hwaddr addr1;
2330
2331 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2332 false);
2333 if (l < 2 || !memory_access_is_direct(mr, false)) {
2334 /* I/O case */
2335 io_mem_read(mr, addr1, &val, 2);
2336 #if defined(TARGET_WORDS_BIGENDIAN)
2337 if (endian == DEVICE_LITTLE_ENDIAN) {
2338 val = bswap16(val);
2339 }
2340 #else
2341 if (endian == DEVICE_BIG_ENDIAN) {
2342 val = bswap16(val);
2343 }
2344 #endif
2345 } else {
2346 /* RAM case */
2347 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2348 & TARGET_PAGE_MASK)
2349 + addr1);
2350 switch (endian) {
2351 case DEVICE_LITTLE_ENDIAN:
2352 val = lduw_le_p(ptr);
2353 break;
2354 case DEVICE_BIG_ENDIAN:
2355 val = lduw_be_p(ptr);
2356 break;
2357 default:
2358 val = lduw_p(ptr);
2359 break;
2360 }
2361 }
2362 return val;
2363 }
2364
2365 uint32_t lduw_phys(hwaddr addr)
2366 {
2367 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2368 }
2369
2370 uint32_t lduw_le_phys(hwaddr addr)
2371 {
2372 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2373 }
2374
2375 uint32_t lduw_be_phys(hwaddr addr)
2376 {
2377 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2378 }
2379
2380 /* warning: addr must be aligned. The ram page is not masked as dirty
2381 and the code inside is not invalidated. It is useful if the dirty
2382 bits are used to track modified PTEs */
2383 void stl_phys_notdirty(hwaddr addr, uint32_t val)
2384 {
2385 uint8_t *ptr;
2386 MemoryRegion *mr;
2387 hwaddr l = 4;
2388 hwaddr addr1;
2389
2390 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2391 true);
2392 if (l < 4 || !memory_access_is_direct(mr, true)) {
2393 io_mem_write(mr, addr1, val, 4);
2394 } else {
2395 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2396 ptr = qemu_get_ram_ptr(addr1);
2397 stl_p(ptr, val);
2398
2399 if (unlikely(in_migration)) {
2400 if (!cpu_physical_memory_is_dirty(addr1)) {
2401 /* invalidate code */
2402 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2403 /* set dirty bit */
2404 cpu_physical_memory_set_dirty_flags(
2405 addr1, (0xff & ~CODE_DIRTY_FLAG));
2406 }
2407 }
2408 }
2409 }
2410
2411 /* warning: addr must be aligned */
2412 static inline void stl_phys_internal(hwaddr addr, uint32_t val,
2413 enum device_endian endian)
2414 {
2415 uint8_t *ptr;
2416 MemoryRegion *mr;
2417 hwaddr l = 4;
2418 hwaddr addr1;
2419
2420 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2421 true);
2422 if (l < 4 || !memory_access_is_direct(mr, true)) {
2423 #if defined(TARGET_WORDS_BIGENDIAN)
2424 if (endian == DEVICE_LITTLE_ENDIAN) {
2425 val = bswap32(val);
2426 }
2427 #else
2428 if (endian == DEVICE_BIG_ENDIAN) {
2429 val = bswap32(val);
2430 }
2431 #endif
2432 io_mem_write(mr, addr1, val, 4);
2433 } else {
2434 /* RAM case */
2435 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2436 ptr = qemu_get_ram_ptr(addr1);
2437 switch (endian) {
2438 case DEVICE_LITTLE_ENDIAN:
2439 stl_le_p(ptr, val);
2440 break;
2441 case DEVICE_BIG_ENDIAN:
2442 stl_be_p(ptr, val);
2443 break;
2444 default:
2445 stl_p(ptr, val);
2446 break;
2447 }
2448 invalidate_and_set_dirty(addr1, 4);
2449 }
2450 }
2451
2452 void stl_phys(hwaddr addr, uint32_t val)
2453 {
2454 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2455 }
2456
2457 void stl_le_phys(hwaddr addr, uint32_t val)
2458 {
2459 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2460 }
2461
2462 void stl_be_phys(hwaddr addr, uint32_t val)
2463 {
2464 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2465 }
2466
2467 /* XXX: optimize */
2468 void stb_phys(hwaddr addr, uint32_t val)
2469 {
2470 uint8_t v = val;
2471 cpu_physical_memory_write(addr, &v, 1);
2472 }
2473
2474 /* warning: addr must be aligned */
2475 static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2476 enum device_endian endian)
2477 {
2478 uint8_t *ptr;
2479 MemoryRegion *mr;
2480 hwaddr l = 2;
2481 hwaddr addr1;
2482
2483 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2484 true);
2485 if (l < 2 || !memory_access_is_direct(mr, true)) {
2486 #if defined(TARGET_WORDS_BIGENDIAN)
2487 if (endian == DEVICE_LITTLE_ENDIAN) {
2488 val = bswap16(val);
2489 }
2490 #else
2491 if (endian == DEVICE_BIG_ENDIAN) {
2492 val = bswap16(val);
2493 }
2494 #endif
2495 io_mem_write(mr, addr1, val, 2);
2496 } else {
2497 /* RAM case */
2498 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2499 ptr = qemu_get_ram_ptr(addr1);
2500 switch (endian) {
2501 case DEVICE_LITTLE_ENDIAN:
2502 stw_le_p(ptr, val);
2503 break;
2504 case DEVICE_BIG_ENDIAN:
2505 stw_be_p(ptr, val);
2506 break;
2507 default:
2508 stw_p(ptr, val);
2509 break;
2510 }
2511 invalidate_and_set_dirty(addr1, 2);
2512 }
2513 }
2514
2515 void stw_phys(hwaddr addr, uint32_t val)
2516 {
2517 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2518 }
2519
2520 void stw_le_phys(hwaddr addr, uint32_t val)
2521 {
2522 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2523 }
2524
2525 void stw_be_phys(hwaddr addr, uint32_t val)
2526 {
2527 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2528 }
2529
2530 /* XXX: optimize */
2531 void stq_phys(hwaddr addr, uint64_t val)
2532 {
2533 val = tswap64(val);
2534 cpu_physical_memory_write(addr, &val, 8);
2535 }
2536
2537 void stq_le_phys(hwaddr addr, uint64_t val)
2538 {
2539 val = cpu_to_le64(val);
2540 cpu_physical_memory_write(addr, &val, 8);
2541 }
2542
2543 void stq_be_phys(hwaddr addr, uint64_t val)
2544 {
2545 val = cpu_to_be64(val);
2546 cpu_physical_memory_write(addr, &val, 8);
2547 }
2548
2549 /* virtual memory access for debug (includes writing to ROM) */
2550 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2551 uint8_t *buf, int len, int is_write)
2552 {
2553 int l;
2554 hwaddr phys_addr;
2555 target_ulong page;
2556
2557 while (len > 0) {
2558 page = addr & TARGET_PAGE_MASK;
2559 phys_addr = cpu_get_phys_page_debug(cpu, page);
2560 /* if no physical page mapped, return an error */
2561 if (phys_addr == -1)
2562 return -1;
2563 l = (page + TARGET_PAGE_SIZE) - addr;
2564 if (l > len)
2565 l = len;
2566 phys_addr += (addr & ~TARGET_PAGE_MASK);
2567 if (is_write)
2568 cpu_physical_memory_write_rom(phys_addr, buf, l);
2569 else
2570 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
2571 len -= l;
2572 buf += l;
2573 addr += l;
2574 }
2575 return 0;
2576 }
2577 #endif
2578
2579 #if !defined(CONFIG_USER_ONLY)
2580
2581 /*
2582 * A helper function for the _utterly broken_ virtio device model to find out if
2583 * it's running on a big endian machine. Don't do this at home kids!
2584 */
2585 bool virtio_is_big_endian(void);
2586 bool virtio_is_big_endian(void)
2587 {
2588 #if defined(TARGET_WORDS_BIGENDIAN)
2589 return true;
2590 #else
2591 return false;
2592 #endif
2593 }
2594
2595 #endif
2596
2597 #ifndef CONFIG_USER_ONLY
2598 bool cpu_physical_memory_is_io(hwaddr phys_addr)
2599 {
2600 MemoryRegion*mr;
2601 hwaddr l = 1;
2602
2603 mr = address_space_translate(&address_space_memory,
2604 phys_addr, &phys_addr, &l, false);
2605
2606 return !(memory_region_is_ram(mr) ||
2607 memory_region_is_romd(mr));
2608 }
2609
2610 void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2611 {
2612 RAMBlock *block;
2613
2614 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2615 func(block->host, block->offset, block->length, opaque);
2616 }
2617 }
2618 #endif