]> git.proxmox.com Git - mirror_qemu.git/blob - exec.c
ich9: fix skipped vmstate_memhp_state subsection
[mirror_qemu.git] / exec.c
1 /*
2 * Virtual page mapping
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #ifndef _WIN32
21 #include <sys/types.h>
22 #include <sys/mman.h>
23 #endif
24
25 #include "qemu-common.h"
26 #include "cpu.h"
27 #include "tcg.h"
28 #include "hw/hw.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #endif
32 #include "hw/qdev.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
47 #include "trace.h"
48 #endif
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "exec/cputlb.h"
53 #include "translate-all.h"
54
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
57
58 #include "qemu/range.h"
59
60 //#define DEBUG_SUBPAGE
61
62 #if !defined(CONFIG_USER_ONLY)
63 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
64 * are protected by the ramlist lock.
65 */
66 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
67
68 static MemoryRegion *system_memory;
69 static MemoryRegion *system_io;
70
71 AddressSpace address_space_io;
72 AddressSpace address_space_memory;
73
74 MemoryRegion io_mem_rom, io_mem_notdirty;
75 static MemoryRegion io_mem_unassigned;
76
77 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
78 #define RAM_PREALLOC (1 << 0)
79
80 /* RAM is mmap-ed with MAP_SHARED */
81 #define RAM_SHARED (1 << 1)
82
83 /* Only a portion of RAM (used_length) is actually used, and migrated.
84 * This used_length size can change across reboots.
85 */
86 #define RAM_RESIZEABLE (1 << 2)
87
88 #endif
89
90 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
91 /* current CPU in the current thread. It is only valid inside
92 cpu_exec() */
93 DEFINE_TLS(CPUState *, current_cpu);
94 /* 0 = Do not count executed instructions.
95 1 = Precise instruction counting.
96 2 = Adaptive rate instruction counting. */
97 int use_icount;
98
99 #if !defined(CONFIG_USER_ONLY)
100
101 typedef struct PhysPageEntry PhysPageEntry;
102
103 struct PhysPageEntry {
104 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
105 uint32_t skip : 6;
106 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
107 uint32_t ptr : 26;
108 };
109
110 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
111
112 /* Size of the L2 (and L3, etc) page tables. */
113 #define ADDR_SPACE_BITS 64
114
115 #define P_L2_BITS 9
116 #define P_L2_SIZE (1 << P_L2_BITS)
117
118 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
119
120 typedef PhysPageEntry Node[P_L2_SIZE];
121
122 typedef struct PhysPageMap {
123 struct rcu_head rcu;
124
125 unsigned sections_nb;
126 unsigned sections_nb_alloc;
127 unsigned nodes_nb;
128 unsigned nodes_nb_alloc;
129 Node *nodes;
130 MemoryRegionSection *sections;
131 } PhysPageMap;
132
133 struct AddressSpaceDispatch {
134 struct rcu_head rcu;
135
136 /* This is a multi-level map on the physical address space.
137 * The bottom level has pointers to MemoryRegionSections.
138 */
139 PhysPageEntry phys_map;
140 PhysPageMap map;
141 AddressSpace *as;
142 };
143
144 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
145 typedef struct subpage_t {
146 MemoryRegion iomem;
147 AddressSpace *as;
148 hwaddr base;
149 uint16_t sub_section[TARGET_PAGE_SIZE];
150 } subpage_t;
151
152 #define PHYS_SECTION_UNASSIGNED 0
153 #define PHYS_SECTION_NOTDIRTY 1
154 #define PHYS_SECTION_ROM 2
155 #define PHYS_SECTION_WATCH 3
156
157 static void io_mem_init(void);
158 static void memory_map_init(void);
159 static void tcg_commit(MemoryListener *listener);
160
161 static MemoryRegion io_mem_watch;
162 #endif
163
164 #if !defined(CONFIG_USER_ONLY)
165
166 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
167 {
168 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
171 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
172 }
173 }
174
175 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
176 {
177 unsigned i;
178 uint32_t ret;
179 PhysPageEntry e;
180 PhysPageEntry *p;
181
182 ret = map->nodes_nb++;
183 p = map->nodes[ret];
184 assert(ret != PHYS_MAP_NODE_NIL);
185 assert(ret != map->nodes_nb_alloc);
186
187 e.skip = leaf ? 0 : 1;
188 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
189 for (i = 0; i < P_L2_SIZE; ++i) {
190 memcpy(&p[i], &e, sizeof(e));
191 }
192 return ret;
193 }
194
195 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
196 hwaddr *index, hwaddr *nb, uint16_t leaf,
197 int level)
198 {
199 PhysPageEntry *p;
200 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
201
202 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
203 lp->ptr = phys_map_node_alloc(map, level == 0);
204 }
205 p = map->nodes[lp->ptr];
206 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
207
208 while (*nb && lp < &p[P_L2_SIZE]) {
209 if ((*index & (step - 1)) == 0 && *nb >= step) {
210 lp->skip = 0;
211 lp->ptr = leaf;
212 *index += step;
213 *nb -= step;
214 } else {
215 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
216 }
217 ++lp;
218 }
219 }
220
221 static void phys_page_set(AddressSpaceDispatch *d,
222 hwaddr index, hwaddr nb,
223 uint16_t leaf)
224 {
225 /* Wildly overreserve - it doesn't matter much. */
226 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
227
228 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
229 }
230
231 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
232 * and update our entry so we can skip it and go directly to the destination.
233 */
234 static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
235 {
236 unsigned valid_ptr = P_L2_SIZE;
237 int valid = 0;
238 PhysPageEntry *p;
239 int i;
240
241 if (lp->ptr == PHYS_MAP_NODE_NIL) {
242 return;
243 }
244
245 p = nodes[lp->ptr];
246 for (i = 0; i < P_L2_SIZE; i++) {
247 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
248 continue;
249 }
250
251 valid_ptr = i;
252 valid++;
253 if (p[i].skip) {
254 phys_page_compact(&p[i], nodes, compacted);
255 }
256 }
257
258 /* We can only compress if there's only one child. */
259 if (valid != 1) {
260 return;
261 }
262
263 assert(valid_ptr < P_L2_SIZE);
264
265 /* Don't compress if it won't fit in the # of bits we have. */
266 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
267 return;
268 }
269
270 lp->ptr = p[valid_ptr].ptr;
271 if (!p[valid_ptr].skip) {
272 /* If our only child is a leaf, make this a leaf. */
273 /* By design, we should have made this node a leaf to begin with so we
274 * should never reach here.
275 * But since it's so simple to handle this, let's do it just in case we
276 * change this rule.
277 */
278 lp->skip = 0;
279 } else {
280 lp->skip += p[valid_ptr].skip;
281 }
282 }
283
284 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
285 {
286 DECLARE_BITMAP(compacted, nodes_nb);
287
288 if (d->phys_map.skip) {
289 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
290 }
291 }
292
293 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
294 Node *nodes, MemoryRegionSection *sections)
295 {
296 PhysPageEntry *p;
297 hwaddr index = addr >> TARGET_PAGE_BITS;
298 int i;
299
300 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
301 if (lp.ptr == PHYS_MAP_NODE_NIL) {
302 return &sections[PHYS_SECTION_UNASSIGNED];
303 }
304 p = nodes[lp.ptr];
305 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
306 }
307
308 if (sections[lp.ptr].size.hi ||
309 range_covers_byte(sections[lp.ptr].offset_within_address_space,
310 sections[lp.ptr].size.lo, addr)) {
311 return &sections[lp.ptr];
312 } else {
313 return &sections[PHYS_SECTION_UNASSIGNED];
314 }
315 }
316
317 bool memory_region_is_unassigned(MemoryRegion *mr)
318 {
319 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
320 && mr != &io_mem_watch;
321 }
322
323 /* Called from RCU critical section */
324 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
325 hwaddr addr,
326 bool resolve_subpage)
327 {
328 MemoryRegionSection *section;
329 subpage_t *subpage;
330
331 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
332 if (resolve_subpage && section->mr->subpage) {
333 subpage = container_of(section->mr, subpage_t, iomem);
334 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
335 }
336 return section;
337 }
338
339 /* Called from RCU critical section */
340 static MemoryRegionSection *
341 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
342 hwaddr *plen, bool resolve_subpage)
343 {
344 MemoryRegionSection *section;
345 MemoryRegion *mr;
346 Int128 diff;
347
348 section = address_space_lookup_region(d, addr, resolve_subpage);
349 /* Compute offset within MemoryRegionSection */
350 addr -= section->offset_within_address_space;
351
352 /* Compute offset within MemoryRegion */
353 *xlat = addr + section->offset_within_region;
354
355 mr = section->mr;
356
357 /* MMIO registers can be expected to perform full-width accesses based only
358 * on their address, without considering adjacent registers that could
359 * decode to completely different MemoryRegions. When such registers
360 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
361 * regions overlap wildly. For this reason we cannot clamp the accesses
362 * here.
363 *
364 * If the length is small (as is the case for address_space_ldl/stl),
365 * everything works fine. If the incoming length is large, however,
366 * the caller really has to do the clamping through memory_access_size.
367 */
368 if (memory_region_is_ram(mr)) {
369 diff = int128_sub(section->size, int128_make64(addr));
370 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
371 }
372 return section;
373 }
374
375 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
376 {
377 if (memory_region_is_ram(mr)) {
378 return !(is_write && mr->readonly);
379 }
380 if (memory_region_is_romd(mr)) {
381 return !is_write;
382 }
383
384 return false;
385 }
386
387 /* Called from RCU critical section */
388 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
389 hwaddr *xlat, hwaddr *plen,
390 bool is_write)
391 {
392 IOMMUTLBEntry iotlb;
393 MemoryRegionSection *section;
394 MemoryRegion *mr;
395
396 for (;;) {
397 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
398 section = address_space_translate_internal(d, addr, &addr, plen, true);
399 mr = section->mr;
400
401 if (!mr->iommu_ops) {
402 break;
403 }
404
405 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
406 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
407 | (addr & iotlb.addr_mask));
408 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
409 if (!(iotlb.perm & (1 << is_write))) {
410 mr = &io_mem_unassigned;
411 break;
412 }
413
414 as = iotlb.target_as;
415 }
416
417 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
418 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
419 *plen = MIN(page, *plen);
420 }
421
422 *xlat = addr;
423 return mr;
424 }
425
426 /* Called from RCU critical section */
427 MemoryRegionSection *
428 address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
429 hwaddr *xlat, hwaddr *plen)
430 {
431 MemoryRegionSection *section;
432 section = address_space_translate_internal(cpu->memory_dispatch,
433 addr, xlat, plen, false);
434
435 assert(!section->mr->iommu_ops);
436 return section;
437 }
438 #endif
439
440 #if !defined(CONFIG_USER_ONLY)
441
442 static int cpu_common_post_load(void *opaque, int version_id)
443 {
444 CPUState *cpu = opaque;
445
446 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
447 version_id is increased. */
448 cpu->interrupt_request &= ~0x01;
449 tlb_flush(cpu, 1);
450
451 return 0;
452 }
453
454 static int cpu_common_pre_load(void *opaque)
455 {
456 CPUState *cpu = opaque;
457
458 cpu->exception_index = -1;
459
460 return 0;
461 }
462
463 static bool cpu_common_exception_index_needed(void *opaque)
464 {
465 CPUState *cpu = opaque;
466
467 return tcg_enabled() && cpu->exception_index != -1;
468 }
469
470 static const VMStateDescription vmstate_cpu_common_exception_index = {
471 .name = "cpu_common/exception_index",
472 .version_id = 1,
473 .minimum_version_id = 1,
474 .needed = cpu_common_exception_index_needed,
475 .fields = (VMStateField[]) {
476 VMSTATE_INT32(exception_index, CPUState),
477 VMSTATE_END_OF_LIST()
478 }
479 };
480
481 const VMStateDescription vmstate_cpu_common = {
482 .name = "cpu_common",
483 .version_id = 1,
484 .minimum_version_id = 1,
485 .pre_load = cpu_common_pre_load,
486 .post_load = cpu_common_post_load,
487 .fields = (VMStateField[]) {
488 VMSTATE_UINT32(halted, CPUState),
489 VMSTATE_UINT32(interrupt_request, CPUState),
490 VMSTATE_END_OF_LIST()
491 },
492 .subsections = (const VMStateDescription*[]) {
493 &vmstate_cpu_common_exception_index,
494 NULL
495 }
496 };
497
498 #endif
499
500 CPUState *qemu_get_cpu(int index)
501 {
502 CPUState *cpu;
503
504 CPU_FOREACH(cpu) {
505 if (cpu->cpu_index == index) {
506 return cpu;
507 }
508 }
509
510 return NULL;
511 }
512
513 #if !defined(CONFIG_USER_ONLY)
514 void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
515 {
516 /* We only support one address space per cpu at the moment. */
517 assert(cpu->as == as);
518
519 if (cpu->tcg_as_listener) {
520 memory_listener_unregister(cpu->tcg_as_listener);
521 } else {
522 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
523 }
524 cpu->tcg_as_listener->commit = tcg_commit;
525 memory_listener_register(cpu->tcg_as_listener, as);
526 }
527 #endif
528
529 #ifndef CONFIG_USER_ONLY
530 static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
531
532 static int cpu_get_free_index(Error **errp)
533 {
534 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
535
536 if (cpu >= MAX_CPUMASK_BITS) {
537 error_setg(errp, "Trying to use more CPUs than max of %d",
538 MAX_CPUMASK_BITS);
539 return -1;
540 }
541
542 bitmap_set(cpu_index_map, cpu, 1);
543 return cpu;
544 }
545
546 void cpu_exec_exit(CPUState *cpu)
547 {
548 if (cpu->cpu_index == -1) {
549 /* cpu_index was never allocated by this @cpu or was already freed. */
550 return;
551 }
552
553 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
554 cpu->cpu_index = -1;
555 }
556 #else
557
558 static int cpu_get_free_index(Error **errp)
559 {
560 CPUState *some_cpu;
561 int cpu_index = 0;
562
563 CPU_FOREACH(some_cpu) {
564 cpu_index++;
565 }
566 return cpu_index;
567 }
568
569 void cpu_exec_exit(CPUState *cpu)
570 {
571 }
572 #endif
573
574 void cpu_exec_init(CPUState *cpu, Error **errp)
575 {
576 CPUClass *cc = CPU_GET_CLASS(cpu);
577 int cpu_index;
578 Error *local_err = NULL;
579
580 #ifndef CONFIG_USER_ONLY
581 cpu->as = &address_space_memory;
582 cpu->thread_id = qemu_get_thread_id();
583 cpu_reload_memory_map(cpu);
584 #endif
585
586 #if defined(CONFIG_USER_ONLY)
587 cpu_list_lock();
588 #endif
589 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
590 if (local_err) {
591 error_propagate(errp, local_err);
592 #if defined(CONFIG_USER_ONLY)
593 cpu_list_unlock();
594 #endif
595 return;
596 }
597 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
598 #if defined(CONFIG_USER_ONLY)
599 cpu_list_unlock();
600 #endif
601 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
602 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
603 }
604 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
605 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
606 cpu_save, cpu_load, cpu->env_ptr);
607 assert(cc->vmsd == NULL);
608 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
609 #endif
610 if (cc->vmsd != NULL) {
611 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
612 }
613 }
614
615 #if defined(CONFIG_USER_ONLY)
616 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
617 {
618 tb_invalidate_phys_page_range(pc, pc + 1, 0);
619 }
620 #else
621 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
622 {
623 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
624 if (phys != -1) {
625 tb_invalidate_phys_addr(cpu->as,
626 phys | (pc & ~TARGET_PAGE_MASK));
627 }
628 }
629 #endif
630
631 #if defined(CONFIG_USER_ONLY)
632 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
633
634 {
635 }
636
637 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
638 int flags)
639 {
640 return -ENOSYS;
641 }
642
643 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
644 {
645 }
646
647 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
648 int flags, CPUWatchpoint **watchpoint)
649 {
650 return -ENOSYS;
651 }
652 #else
653 /* Add a watchpoint. */
654 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
655 int flags, CPUWatchpoint **watchpoint)
656 {
657 CPUWatchpoint *wp;
658
659 /* forbid ranges which are empty or run off the end of the address space */
660 if (len == 0 || (addr + len - 1) < addr) {
661 error_report("tried to set invalid watchpoint at %"
662 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
663 return -EINVAL;
664 }
665 wp = g_malloc(sizeof(*wp));
666
667 wp->vaddr = addr;
668 wp->len = len;
669 wp->flags = flags;
670
671 /* keep all GDB-injected watchpoints in front */
672 if (flags & BP_GDB) {
673 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
674 } else {
675 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
676 }
677
678 tlb_flush_page(cpu, addr);
679
680 if (watchpoint)
681 *watchpoint = wp;
682 return 0;
683 }
684
685 /* Remove a specific watchpoint. */
686 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
687 int flags)
688 {
689 CPUWatchpoint *wp;
690
691 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
692 if (addr == wp->vaddr && len == wp->len
693 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
694 cpu_watchpoint_remove_by_ref(cpu, wp);
695 return 0;
696 }
697 }
698 return -ENOENT;
699 }
700
701 /* Remove a specific watchpoint by reference. */
702 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
703 {
704 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
705
706 tlb_flush_page(cpu, watchpoint->vaddr);
707
708 g_free(watchpoint);
709 }
710
711 /* Remove all matching watchpoints. */
712 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
713 {
714 CPUWatchpoint *wp, *next;
715
716 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
717 if (wp->flags & mask) {
718 cpu_watchpoint_remove_by_ref(cpu, wp);
719 }
720 }
721 }
722
723 /* Return true if this watchpoint address matches the specified
724 * access (ie the address range covered by the watchpoint overlaps
725 * partially or completely with the address range covered by the
726 * access).
727 */
728 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
729 vaddr addr,
730 vaddr len)
731 {
732 /* We know the lengths are non-zero, but a little caution is
733 * required to avoid errors in the case where the range ends
734 * exactly at the top of the address space and so addr + len
735 * wraps round to zero.
736 */
737 vaddr wpend = wp->vaddr + wp->len - 1;
738 vaddr addrend = addr + len - 1;
739
740 return !(addr > wpend || wp->vaddr > addrend);
741 }
742
743 #endif
744
745 /* Add a breakpoint. */
746 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
747 CPUBreakpoint **breakpoint)
748 {
749 CPUBreakpoint *bp;
750
751 bp = g_malloc(sizeof(*bp));
752
753 bp->pc = pc;
754 bp->flags = flags;
755
756 /* keep all GDB-injected breakpoints in front */
757 if (flags & BP_GDB) {
758 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
759 } else {
760 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
761 }
762
763 breakpoint_invalidate(cpu, pc);
764
765 if (breakpoint) {
766 *breakpoint = bp;
767 }
768 return 0;
769 }
770
771 /* Remove a specific breakpoint. */
772 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
773 {
774 CPUBreakpoint *bp;
775
776 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
777 if (bp->pc == pc && bp->flags == flags) {
778 cpu_breakpoint_remove_by_ref(cpu, bp);
779 return 0;
780 }
781 }
782 return -ENOENT;
783 }
784
785 /* Remove a specific breakpoint by reference. */
786 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
787 {
788 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
789
790 breakpoint_invalidate(cpu, breakpoint->pc);
791
792 g_free(breakpoint);
793 }
794
795 /* Remove all matching breakpoints. */
796 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
797 {
798 CPUBreakpoint *bp, *next;
799
800 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
801 if (bp->flags & mask) {
802 cpu_breakpoint_remove_by_ref(cpu, bp);
803 }
804 }
805 }
806
807 /* enable or disable single step mode. EXCP_DEBUG is returned by the
808 CPU loop after each instruction */
809 void cpu_single_step(CPUState *cpu, int enabled)
810 {
811 if (cpu->singlestep_enabled != enabled) {
812 cpu->singlestep_enabled = enabled;
813 if (kvm_enabled()) {
814 kvm_update_guest_debug(cpu, 0);
815 } else {
816 /* must flush all the translated code to avoid inconsistencies */
817 /* XXX: only flush what is necessary */
818 tb_flush(cpu);
819 }
820 }
821 }
822
823 void cpu_abort(CPUState *cpu, const char *fmt, ...)
824 {
825 va_list ap;
826 va_list ap2;
827
828 va_start(ap, fmt);
829 va_copy(ap2, ap);
830 fprintf(stderr, "qemu: fatal: ");
831 vfprintf(stderr, fmt, ap);
832 fprintf(stderr, "\n");
833 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
834 if (qemu_log_enabled()) {
835 qemu_log("qemu: fatal: ");
836 qemu_log_vprintf(fmt, ap2);
837 qemu_log("\n");
838 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
839 qemu_log_flush();
840 qemu_log_close();
841 }
842 va_end(ap2);
843 va_end(ap);
844 #if defined(CONFIG_USER_ONLY)
845 {
846 struct sigaction act;
847 sigfillset(&act.sa_mask);
848 act.sa_handler = SIG_DFL;
849 sigaction(SIGABRT, &act, NULL);
850 }
851 #endif
852 abort();
853 }
854
855 #if !defined(CONFIG_USER_ONLY)
856 /* Called from RCU critical section */
857 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
858 {
859 RAMBlock *block;
860
861 block = atomic_rcu_read(&ram_list.mru_block);
862 if (block && addr - block->offset < block->max_length) {
863 goto found;
864 }
865 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
866 if (addr - block->offset < block->max_length) {
867 goto found;
868 }
869 }
870
871 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
872 abort();
873
874 found:
875 /* It is safe to write mru_block outside the iothread lock. This
876 * is what happens:
877 *
878 * mru_block = xxx
879 * rcu_read_unlock()
880 * xxx removed from list
881 * rcu_read_lock()
882 * read mru_block
883 * mru_block = NULL;
884 * call_rcu(reclaim_ramblock, xxx);
885 * rcu_read_unlock()
886 *
887 * atomic_rcu_set is not needed here. The block was already published
888 * when it was placed into the list. Here we're just making an extra
889 * copy of the pointer.
890 */
891 ram_list.mru_block = block;
892 return block;
893 }
894
895 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
896 {
897 ram_addr_t start1;
898 RAMBlock *block;
899 ram_addr_t end;
900
901 end = TARGET_PAGE_ALIGN(start + length);
902 start &= TARGET_PAGE_MASK;
903
904 rcu_read_lock();
905 block = qemu_get_ram_block(start);
906 assert(block == qemu_get_ram_block(end - 1));
907 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
908 cpu_tlb_reset_dirty_all(start1, length);
909 rcu_read_unlock();
910 }
911
912 /* Note: start and end must be within the same ram block. */
913 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
914 ram_addr_t length,
915 unsigned client)
916 {
917 unsigned long end, page;
918 bool dirty;
919
920 if (length == 0) {
921 return false;
922 }
923
924 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
925 page = start >> TARGET_PAGE_BITS;
926 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
927 page, end - page);
928
929 if (dirty && tcg_enabled()) {
930 tlb_reset_dirty_range_all(start, length);
931 }
932
933 return dirty;
934 }
935
936 /* Called from RCU critical section */
937 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
938 MemoryRegionSection *section,
939 target_ulong vaddr,
940 hwaddr paddr, hwaddr xlat,
941 int prot,
942 target_ulong *address)
943 {
944 hwaddr iotlb;
945 CPUWatchpoint *wp;
946
947 if (memory_region_is_ram(section->mr)) {
948 /* Normal RAM. */
949 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
950 + xlat;
951 if (!section->readonly) {
952 iotlb |= PHYS_SECTION_NOTDIRTY;
953 } else {
954 iotlb |= PHYS_SECTION_ROM;
955 }
956 } else {
957 iotlb = section - section->address_space->dispatch->map.sections;
958 iotlb += xlat;
959 }
960
961 /* Make accesses to pages with watchpoints go via the
962 watchpoint trap routines. */
963 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
964 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
965 /* Avoid trapping reads of pages with a write breakpoint. */
966 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
967 iotlb = PHYS_SECTION_WATCH + paddr;
968 *address |= TLB_MMIO;
969 break;
970 }
971 }
972 }
973
974 return iotlb;
975 }
976 #endif /* defined(CONFIG_USER_ONLY) */
977
978 #if !defined(CONFIG_USER_ONLY)
979
980 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
981 uint16_t section);
982 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
983
984 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
985 qemu_anon_ram_alloc;
986
987 /*
988 * Set a custom physical guest memory alloator.
989 * Accelerators with unusual needs may need this. Hopefully, we can
990 * get rid of it eventually.
991 */
992 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
993 {
994 phys_mem_alloc = alloc;
995 }
996
997 static uint16_t phys_section_add(PhysPageMap *map,
998 MemoryRegionSection *section)
999 {
1000 /* The physical section number is ORed with a page-aligned
1001 * pointer to produce the iotlb entries. Thus it should
1002 * never overflow into the page-aligned value.
1003 */
1004 assert(map->sections_nb < TARGET_PAGE_SIZE);
1005
1006 if (map->sections_nb == map->sections_nb_alloc) {
1007 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1008 map->sections = g_renew(MemoryRegionSection, map->sections,
1009 map->sections_nb_alloc);
1010 }
1011 map->sections[map->sections_nb] = *section;
1012 memory_region_ref(section->mr);
1013 return map->sections_nb++;
1014 }
1015
1016 static void phys_section_destroy(MemoryRegion *mr)
1017 {
1018 memory_region_unref(mr);
1019
1020 if (mr->subpage) {
1021 subpage_t *subpage = container_of(mr, subpage_t, iomem);
1022 object_unref(OBJECT(&subpage->iomem));
1023 g_free(subpage);
1024 }
1025 }
1026
1027 static void phys_sections_free(PhysPageMap *map)
1028 {
1029 while (map->sections_nb > 0) {
1030 MemoryRegionSection *section = &map->sections[--map->sections_nb];
1031 phys_section_destroy(section->mr);
1032 }
1033 g_free(map->sections);
1034 g_free(map->nodes);
1035 }
1036
1037 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
1038 {
1039 subpage_t *subpage;
1040 hwaddr base = section->offset_within_address_space
1041 & TARGET_PAGE_MASK;
1042 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
1043 d->map.nodes, d->map.sections);
1044 MemoryRegionSection subsection = {
1045 .offset_within_address_space = base,
1046 .size = int128_make64(TARGET_PAGE_SIZE),
1047 };
1048 hwaddr start, end;
1049
1050 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1051
1052 if (!(existing->mr->subpage)) {
1053 subpage = subpage_init(d->as, base);
1054 subsection.address_space = d->as;
1055 subsection.mr = &subpage->iomem;
1056 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1057 phys_section_add(&d->map, &subsection));
1058 } else {
1059 subpage = container_of(existing->mr, subpage_t, iomem);
1060 }
1061 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1062 end = start + int128_get64(section->size) - 1;
1063 subpage_register(subpage, start, end,
1064 phys_section_add(&d->map, section));
1065 }
1066
1067
1068 static void register_multipage(AddressSpaceDispatch *d,
1069 MemoryRegionSection *section)
1070 {
1071 hwaddr start_addr = section->offset_within_address_space;
1072 uint16_t section_index = phys_section_add(&d->map, section);
1073 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1074 TARGET_PAGE_BITS));
1075
1076 assert(num_pages);
1077 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1078 }
1079
1080 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
1081 {
1082 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1083 AddressSpaceDispatch *d = as->next_dispatch;
1084 MemoryRegionSection now = *section, remain = *section;
1085 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1086
1087 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1088 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1089 - now.offset_within_address_space;
1090
1091 now.size = int128_min(int128_make64(left), now.size);
1092 register_subpage(d, &now);
1093 } else {
1094 now.size = int128_zero();
1095 }
1096 while (int128_ne(remain.size, now.size)) {
1097 remain.size = int128_sub(remain.size, now.size);
1098 remain.offset_within_address_space += int128_get64(now.size);
1099 remain.offset_within_region += int128_get64(now.size);
1100 now = remain;
1101 if (int128_lt(remain.size, page_size)) {
1102 register_subpage(d, &now);
1103 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1104 now.size = page_size;
1105 register_subpage(d, &now);
1106 } else {
1107 now.size = int128_and(now.size, int128_neg(page_size));
1108 register_multipage(d, &now);
1109 }
1110 }
1111 }
1112
1113 void qemu_flush_coalesced_mmio_buffer(void)
1114 {
1115 if (kvm_enabled())
1116 kvm_flush_coalesced_mmio_buffer();
1117 }
1118
1119 void qemu_mutex_lock_ramlist(void)
1120 {
1121 qemu_mutex_lock(&ram_list.mutex);
1122 }
1123
1124 void qemu_mutex_unlock_ramlist(void)
1125 {
1126 qemu_mutex_unlock(&ram_list.mutex);
1127 }
1128
1129 #ifdef __linux__
1130
1131 #include <sys/vfs.h>
1132
1133 #define HUGETLBFS_MAGIC 0x958458f6
1134
1135 static long gethugepagesize(const char *path, Error **errp)
1136 {
1137 struct statfs fs;
1138 int ret;
1139
1140 do {
1141 ret = statfs(path, &fs);
1142 } while (ret != 0 && errno == EINTR);
1143
1144 if (ret != 0) {
1145 error_setg_errno(errp, errno, "failed to get page size of file %s",
1146 path);
1147 return 0;
1148 }
1149
1150 if (fs.f_type != HUGETLBFS_MAGIC)
1151 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
1152
1153 return fs.f_bsize;
1154 }
1155
1156 static void *file_ram_alloc(RAMBlock *block,
1157 ram_addr_t memory,
1158 const char *path,
1159 Error **errp)
1160 {
1161 char *filename;
1162 char *sanitized_name;
1163 char *c;
1164 void *area = NULL;
1165 int fd;
1166 uint64_t hpagesize;
1167 Error *local_err = NULL;
1168
1169 hpagesize = gethugepagesize(path, &local_err);
1170 if (local_err) {
1171 error_propagate(errp, local_err);
1172 goto error;
1173 }
1174 block->mr->align = hpagesize;
1175
1176 if (memory < hpagesize) {
1177 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1178 "or larger than huge page size 0x%" PRIx64,
1179 memory, hpagesize);
1180 goto error;
1181 }
1182
1183 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1184 error_setg(errp,
1185 "host lacks kvm mmu notifiers, -mem-path unsupported");
1186 goto error;
1187 }
1188
1189 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1190 sanitized_name = g_strdup(memory_region_name(block->mr));
1191 for (c = sanitized_name; *c != '\0'; c++) {
1192 if (*c == '/')
1193 *c = '_';
1194 }
1195
1196 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1197 sanitized_name);
1198 g_free(sanitized_name);
1199
1200 fd = mkstemp(filename);
1201 if (fd < 0) {
1202 error_setg_errno(errp, errno,
1203 "unable to create backing store for hugepages");
1204 g_free(filename);
1205 goto error;
1206 }
1207 unlink(filename);
1208 g_free(filename);
1209
1210 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1211
1212 /*
1213 * ftruncate is not supported by hugetlbfs in older
1214 * hosts, so don't bother bailing out on errors.
1215 * If anything goes wrong with it under other filesystems,
1216 * mmap will fail.
1217 */
1218 if (ftruncate(fd, memory)) {
1219 perror("ftruncate");
1220 }
1221
1222 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1223 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1224 fd, 0);
1225 if (area == MAP_FAILED) {
1226 error_setg_errno(errp, errno,
1227 "unable to map backing store for hugepages");
1228 close(fd);
1229 goto error;
1230 }
1231
1232 if (mem_prealloc) {
1233 os_mem_prealloc(fd, area, memory);
1234 }
1235
1236 block->fd = fd;
1237 return area;
1238
1239 error:
1240 if (mem_prealloc) {
1241 error_report("%s", error_get_pretty(*errp));
1242 exit(1);
1243 }
1244 return NULL;
1245 }
1246 #endif
1247
1248 /* Called with the ramlist lock held. */
1249 static ram_addr_t find_ram_offset(ram_addr_t size)
1250 {
1251 RAMBlock *block, *next_block;
1252 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1253
1254 assert(size != 0); /* it would hand out same offset multiple times */
1255
1256 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1257 return 0;
1258 }
1259
1260 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1261 ram_addr_t end, next = RAM_ADDR_MAX;
1262
1263 end = block->offset + block->max_length;
1264
1265 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
1266 if (next_block->offset >= end) {
1267 next = MIN(next, next_block->offset);
1268 }
1269 }
1270 if (next - end >= size && next - end < mingap) {
1271 offset = end;
1272 mingap = next - end;
1273 }
1274 }
1275
1276 if (offset == RAM_ADDR_MAX) {
1277 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1278 (uint64_t)size);
1279 abort();
1280 }
1281
1282 return offset;
1283 }
1284
1285 ram_addr_t last_ram_offset(void)
1286 {
1287 RAMBlock *block;
1288 ram_addr_t last = 0;
1289
1290 rcu_read_lock();
1291 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1292 last = MAX(last, block->offset + block->max_length);
1293 }
1294 rcu_read_unlock();
1295 return last;
1296 }
1297
1298 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1299 {
1300 int ret;
1301
1302 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1303 if (!machine_dump_guest_core(current_machine)) {
1304 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1305 if (ret) {
1306 perror("qemu_madvise");
1307 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1308 "but dump_guest_core=off specified\n");
1309 }
1310 }
1311 }
1312
1313 /* Called within an RCU critical section, or while the ramlist lock
1314 * is held.
1315 */
1316 static RAMBlock *find_ram_block(ram_addr_t addr)
1317 {
1318 RAMBlock *block;
1319
1320 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1321 if (block->offset == addr) {
1322 return block;
1323 }
1324 }
1325
1326 return NULL;
1327 }
1328
1329 /* Called with iothread lock held. */
1330 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1331 {
1332 RAMBlock *new_block, *block;
1333
1334 rcu_read_lock();
1335 new_block = find_ram_block(addr);
1336 assert(new_block);
1337 assert(!new_block->idstr[0]);
1338
1339 if (dev) {
1340 char *id = qdev_get_dev_path(dev);
1341 if (id) {
1342 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1343 g_free(id);
1344 }
1345 }
1346 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1347
1348 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1349 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1350 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1351 new_block->idstr);
1352 abort();
1353 }
1354 }
1355 rcu_read_unlock();
1356 }
1357
1358 /* Called with iothread lock held. */
1359 void qemu_ram_unset_idstr(ram_addr_t addr)
1360 {
1361 RAMBlock *block;
1362
1363 /* FIXME: arch_init.c assumes that this is not called throughout
1364 * migration. Ignore the problem since hot-unplug during migration
1365 * does not work anyway.
1366 */
1367
1368 rcu_read_lock();
1369 block = find_ram_block(addr);
1370 if (block) {
1371 memset(block->idstr, 0, sizeof(block->idstr));
1372 }
1373 rcu_read_unlock();
1374 }
1375
1376 static int memory_try_enable_merging(void *addr, size_t len)
1377 {
1378 if (!machine_mem_merge(current_machine)) {
1379 /* disabled by the user */
1380 return 0;
1381 }
1382
1383 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1384 }
1385
1386 /* Only legal before guest might have detected the memory size: e.g. on
1387 * incoming migration, or right after reset.
1388 *
1389 * As memory core doesn't know how is memory accessed, it is up to
1390 * resize callback to update device state and/or add assertions to detect
1391 * misuse, if necessary.
1392 */
1393 int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1394 {
1395 RAMBlock *block = find_ram_block(base);
1396
1397 assert(block);
1398
1399 newsize = TARGET_PAGE_ALIGN(newsize);
1400
1401 if (block->used_length == newsize) {
1402 return 0;
1403 }
1404
1405 if (!(block->flags & RAM_RESIZEABLE)) {
1406 error_setg_errno(errp, EINVAL,
1407 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1408 " in != 0x" RAM_ADDR_FMT, block->idstr,
1409 newsize, block->used_length);
1410 return -EINVAL;
1411 }
1412
1413 if (block->max_length < newsize) {
1414 error_setg_errno(errp, EINVAL,
1415 "Length too large: %s: 0x" RAM_ADDR_FMT
1416 " > 0x" RAM_ADDR_FMT, block->idstr,
1417 newsize, block->max_length);
1418 return -EINVAL;
1419 }
1420
1421 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1422 block->used_length = newsize;
1423 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1424 DIRTY_CLIENTS_ALL);
1425 memory_region_set_size(block->mr, newsize);
1426 if (block->resized) {
1427 block->resized(block->idstr, newsize, block->host);
1428 }
1429 return 0;
1430 }
1431
1432 static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
1433 {
1434 RAMBlock *block;
1435 RAMBlock *last_block = NULL;
1436 ram_addr_t old_ram_size, new_ram_size;
1437
1438 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1439
1440 qemu_mutex_lock_ramlist();
1441 new_block->offset = find_ram_offset(new_block->max_length);
1442
1443 if (!new_block->host) {
1444 if (xen_enabled()) {
1445 xen_ram_alloc(new_block->offset, new_block->max_length,
1446 new_block->mr);
1447 } else {
1448 new_block->host = phys_mem_alloc(new_block->max_length,
1449 &new_block->mr->align);
1450 if (!new_block->host) {
1451 error_setg_errno(errp, errno,
1452 "cannot set up guest memory '%s'",
1453 memory_region_name(new_block->mr));
1454 qemu_mutex_unlock_ramlist();
1455 return -1;
1456 }
1457 memory_try_enable_merging(new_block->host, new_block->max_length);
1458 }
1459 }
1460
1461 new_ram_size = MAX(old_ram_size,
1462 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1463 if (new_ram_size > old_ram_size) {
1464 migration_bitmap_extend(old_ram_size, new_ram_size);
1465 }
1466 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1467 * QLIST (which has an RCU-friendly variant) does not have insertion at
1468 * tail, so save the last element in last_block.
1469 */
1470 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1471 last_block = block;
1472 if (block->max_length < new_block->max_length) {
1473 break;
1474 }
1475 }
1476 if (block) {
1477 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1478 } else if (last_block) {
1479 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1480 } else { /* list is empty */
1481 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1482 }
1483 ram_list.mru_block = NULL;
1484
1485 /* Write list before version */
1486 smp_wmb();
1487 ram_list.version++;
1488 qemu_mutex_unlock_ramlist();
1489
1490 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1491
1492 if (new_ram_size > old_ram_size) {
1493 int i;
1494
1495 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1496 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1497 ram_list.dirty_memory[i] =
1498 bitmap_zero_extend(ram_list.dirty_memory[i],
1499 old_ram_size, new_ram_size);
1500 }
1501 }
1502 cpu_physical_memory_set_dirty_range(new_block->offset,
1503 new_block->used_length,
1504 DIRTY_CLIENTS_ALL);
1505
1506 if (new_block->host) {
1507 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1508 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1509 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1510 if (kvm_enabled()) {
1511 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1512 }
1513 }
1514
1515 return new_block->offset;
1516 }
1517
1518 #ifdef __linux__
1519 ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1520 bool share, const char *mem_path,
1521 Error **errp)
1522 {
1523 RAMBlock *new_block;
1524 ram_addr_t addr;
1525 Error *local_err = NULL;
1526
1527 if (xen_enabled()) {
1528 error_setg(errp, "-mem-path not supported with Xen");
1529 return -1;
1530 }
1531
1532 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1533 /*
1534 * file_ram_alloc() needs to allocate just like
1535 * phys_mem_alloc, but we haven't bothered to provide
1536 * a hook there.
1537 */
1538 error_setg(errp,
1539 "-mem-path not supported with this accelerator");
1540 return -1;
1541 }
1542
1543 size = TARGET_PAGE_ALIGN(size);
1544 new_block = g_malloc0(sizeof(*new_block));
1545 new_block->mr = mr;
1546 new_block->used_length = size;
1547 new_block->max_length = size;
1548 new_block->flags = share ? RAM_SHARED : 0;
1549 new_block->host = file_ram_alloc(new_block, size,
1550 mem_path, errp);
1551 if (!new_block->host) {
1552 g_free(new_block);
1553 return -1;
1554 }
1555
1556 addr = ram_block_add(new_block, &local_err);
1557 if (local_err) {
1558 g_free(new_block);
1559 error_propagate(errp, local_err);
1560 return -1;
1561 }
1562 return addr;
1563 }
1564 #endif
1565
1566 static
1567 ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1568 void (*resized)(const char*,
1569 uint64_t length,
1570 void *host),
1571 void *host, bool resizeable,
1572 MemoryRegion *mr, Error **errp)
1573 {
1574 RAMBlock *new_block;
1575 ram_addr_t addr;
1576 Error *local_err = NULL;
1577
1578 size = TARGET_PAGE_ALIGN(size);
1579 max_size = TARGET_PAGE_ALIGN(max_size);
1580 new_block = g_malloc0(sizeof(*new_block));
1581 new_block->mr = mr;
1582 new_block->resized = resized;
1583 new_block->used_length = size;
1584 new_block->max_length = max_size;
1585 assert(max_size >= size);
1586 new_block->fd = -1;
1587 new_block->host = host;
1588 if (host) {
1589 new_block->flags |= RAM_PREALLOC;
1590 }
1591 if (resizeable) {
1592 new_block->flags |= RAM_RESIZEABLE;
1593 }
1594 addr = ram_block_add(new_block, &local_err);
1595 if (local_err) {
1596 g_free(new_block);
1597 error_propagate(errp, local_err);
1598 return -1;
1599 }
1600 return addr;
1601 }
1602
1603 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1604 MemoryRegion *mr, Error **errp)
1605 {
1606 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1607 }
1608
1609 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
1610 {
1611 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1612 }
1613
1614 ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1615 void (*resized)(const char*,
1616 uint64_t length,
1617 void *host),
1618 MemoryRegion *mr, Error **errp)
1619 {
1620 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
1621 }
1622
1623 void qemu_ram_free_from_ptr(ram_addr_t addr)
1624 {
1625 RAMBlock *block;
1626
1627 qemu_mutex_lock_ramlist();
1628 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1629 if (addr == block->offset) {
1630 QLIST_REMOVE_RCU(block, next);
1631 ram_list.mru_block = NULL;
1632 /* Write list before version */
1633 smp_wmb();
1634 ram_list.version++;
1635 g_free_rcu(block, rcu);
1636 break;
1637 }
1638 }
1639 qemu_mutex_unlock_ramlist();
1640 }
1641
1642 static void reclaim_ramblock(RAMBlock *block)
1643 {
1644 if (block->flags & RAM_PREALLOC) {
1645 ;
1646 } else if (xen_enabled()) {
1647 xen_invalidate_map_cache_entry(block->host);
1648 #ifndef _WIN32
1649 } else if (block->fd >= 0) {
1650 munmap(block->host, block->max_length);
1651 close(block->fd);
1652 #endif
1653 } else {
1654 qemu_anon_ram_free(block->host, block->max_length);
1655 }
1656 g_free(block);
1657 }
1658
1659 void qemu_ram_free(ram_addr_t addr)
1660 {
1661 RAMBlock *block;
1662
1663 qemu_mutex_lock_ramlist();
1664 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1665 if (addr == block->offset) {
1666 QLIST_REMOVE_RCU(block, next);
1667 ram_list.mru_block = NULL;
1668 /* Write list before version */
1669 smp_wmb();
1670 ram_list.version++;
1671 call_rcu(block, reclaim_ramblock, rcu);
1672 break;
1673 }
1674 }
1675 qemu_mutex_unlock_ramlist();
1676 }
1677
1678 #ifndef _WIN32
1679 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1680 {
1681 RAMBlock *block;
1682 ram_addr_t offset;
1683 int flags;
1684 void *area, *vaddr;
1685
1686 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1687 offset = addr - block->offset;
1688 if (offset < block->max_length) {
1689 vaddr = ramblock_ptr(block, offset);
1690 if (block->flags & RAM_PREALLOC) {
1691 ;
1692 } else if (xen_enabled()) {
1693 abort();
1694 } else {
1695 flags = MAP_FIXED;
1696 if (block->fd >= 0) {
1697 flags |= (block->flags & RAM_SHARED ?
1698 MAP_SHARED : MAP_PRIVATE);
1699 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1700 flags, block->fd, offset);
1701 } else {
1702 /*
1703 * Remap needs to match alloc. Accelerators that
1704 * set phys_mem_alloc never remap. If they did,
1705 * we'd need a remap hook here.
1706 */
1707 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1708
1709 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1710 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1711 flags, -1, 0);
1712 }
1713 if (area != vaddr) {
1714 fprintf(stderr, "Could not remap addr: "
1715 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1716 length, addr);
1717 exit(1);
1718 }
1719 memory_try_enable_merging(vaddr, length);
1720 qemu_ram_setup_dump(vaddr, length);
1721 }
1722 }
1723 }
1724 }
1725 #endif /* !_WIN32 */
1726
1727 int qemu_get_ram_fd(ram_addr_t addr)
1728 {
1729 RAMBlock *block;
1730 int fd;
1731
1732 rcu_read_lock();
1733 block = qemu_get_ram_block(addr);
1734 fd = block->fd;
1735 rcu_read_unlock();
1736 return fd;
1737 }
1738
1739 void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1740 {
1741 RAMBlock *block;
1742 void *ptr;
1743
1744 rcu_read_lock();
1745 block = qemu_get_ram_block(addr);
1746 ptr = ramblock_ptr(block, 0);
1747 rcu_read_unlock();
1748 return ptr;
1749 }
1750
1751 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1752 * This should not be used for general purpose DMA. Use address_space_map
1753 * or address_space_rw instead. For local memory (e.g. video ram) that the
1754 * device owns, use memory_region_get_ram_ptr.
1755 *
1756 * By the time this function returns, the returned pointer is not protected
1757 * by RCU anymore. If the caller is not within an RCU critical section and
1758 * does not hold the iothread lock, it must have other means of protecting the
1759 * pointer, such as a reference to the region that includes the incoming
1760 * ram_addr_t.
1761 */
1762 void *qemu_get_ram_ptr(ram_addr_t addr)
1763 {
1764 RAMBlock *block;
1765 void *ptr;
1766
1767 rcu_read_lock();
1768 block = qemu_get_ram_block(addr);
1769
1770 if (xen_enabled() && block->host == NULL) {
1771 /* We need to check if the requested address is in the RAM
1772 * because we don't want to map the entire memory in QEMU.
1773 * In that case just map until the end of the page.
1774 */
1775 if (block->offset == 0) {
1776 ptr = xen_map_cache(addr, 0, 0);
1777 goto unlock;
1778 }
1779
1780 block->host = xen_map_cache(block->offset, block->max_length, 1);
1781 }
1782 ptr = ramblock_ptr(block, addr - block->offset);
1783
1784 unlock:
1785 rcu_read_unlock();
1786 return ptr;
1787 }
1788
1789 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1790 * but takes a size argument.
1791 *
1792 * By the time this function returns, the returned pointer is not protected
1793 * by RCU anymore. If the caller is not within an RCU critical section and
1794 * does not hold the iothread lock, it must have other means of protecting the
1795 * pointer, such as a reference to the region that includes the incoming
1796 * ram_addr_t.
1797 */
1798 static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
1799 {
1800 void *ptr;
1801 if (*size == 0) {
1802 return NULL;
1803 }
1804 if (xen_enabled()) {
1805 return xen_map_cache(addr, *size, 1);
1806 } else {
1807 RAMBlock *block;
1808 rcu_read_lock();
1809 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1810 if (addr - block->offset < block->max_length) {
1811 if (addr - block->offset + *size > block->max_length)
1812 *size = block->max_length - addr + block->offset;
1813 ptr = ramblock_ptr(block, addr - block->offset);
1814 rcu_read_unlock();
1815 return ptr;
1816 }
1817 }
1818
1819 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1820 abort();
1821 }
1822 }
1823
1824 /* Some of the softmmu routines need to translate from a host pointer
1825 * (typically a TLB entry) back to a ram offset.
1826 *
1827 * By the time this function returns, the returned pointer is not protected
1828 * by RCU anymore. If the caller is not within an RCU critical section and
1829 * does not hold the iothread lock, it must have other means of protecting the
1830 * pointer, such as a reference to the region that includes the incoming
1831 * ram_addr_t.
1832 */
1833 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1834 {
1835 RAMBlock *block;
1836 uint8_t *host = ptr;
1837 MemoryRegion *mr;
1838
1839 if (xen_enabled()) {
1840 rcu_read_lock();
1841 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1842 mr = qemu_get_ram_block(*ram_addr)->mr;
1843 rcu_read_unlock();
1844 return mr;
1845 }
1846
1847 rcu_read_lock();
1848 block = atomic_rcu_read(&ram_list.mru_block);
1849 if (block && block->host && host - block->host < block->max_length) {
1850 goto found;
1851 }
1852
1853 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1854 /* This case append when the block is not mapped. */
1855 if (block->host == NULL) {
1856 continue;
1857 }
1858 if (host - block->host < block->max_length) {
1859 goto found;
1860 }
1861 }
1862
1863 rcu_read_unlock();
1864 return NULL;
1865
1866 found:
1867 *ram_addr = block->offset + (host - block->host);
1868 mr = block->mr;
1869 rcu_read_unlock();
1870 return mr;
1871 }
1872
1873 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1874 uint64_t val, unsigned size)
1875 {
1876 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1877 tb_invalidate_phys_page_fast(ram_addr, size);
1878 }
1879 switch (size) {
1880 case 1:
1881 stb_p(qemu_get_ram_ptr(ram_addr), val);
1882 break;
1883 case 2:
1884 stw_p(qemu_get_ram_ptr(ram_addr), val);
1885 break;
1886 case 4:
1887 stl_p(qemu_get_ram_ptr(ram_addr), val);
1888 break;
1889 default:
1890 abort();
1891 }
1892 /* Set both VGA and migration bits for simplicity and to remove
1893 * the notdirty callback faster.
1894 */
1895 cpu_physical_memory_set_dirty_range(ram_addr, size,
1896 DIRTY_CLIENTS_NOCODE);
1897 /* we remove the notdirty callback only if the code has been
1898 flushed */
1899 if (!cpu_physical_memory_is_clean(ram_addr)) {
1900 CPUArchState *env = current_cpu->env_ptr;
1901 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
1902 }
1903 }
1904
1905 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1906 unsigned size, bool is_write)
1907 {
1908 return is_write;
1909 }
1910
1911 static const MemoryRegionOps notdirty_mem_ops = {
1912 .write = notdirty_mem_write,
1913 .valid.accepts = notdirty_mem_accepts,
1914 .endianness = DEVICE_NATIVE_ENDIAN,
1915 };
1916
1917 /* Generate a debug exception if a watchpoint has been hit. */
1918 static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
1919 {
1920 CPUState *cpu = current_cpu;
1921 CPUArchState *env = cpu->env_ptr;
1922 target_ulong pc, cs_base;
1923 target_ulong vaddr;
1924 CPUWatchpoint *wp;
1925 int cpu_flags;
1926
1927 if (cpu->watchpoint_hit) {
1928 /* We re-entered the check after replacing the TB. Now raise
1929 * the debug interrupt so that is will trigger after the
1930 * current instruction. */
1931 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
1932 return;
1933 }
1934 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1935 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1936 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1937 && (wp->flags & flags)) {
1938 if (flags == BP_MEM_READ) {
1939 wp->flags |= BP_WATCHPOINT_HIT_READ;
1940 } else {
1941 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1942 }
1943 wp->hitaddr = vaddr;
1944 wp->hitattrs = attrs;
1945 if (!cpu->watchpoint_hit) {
1946 cpu->watchpoint_hit = wp;
1947 tb_check_watchpoint(cpu);
1948 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1949 cpu->exception_index = EXCP_DEBUG;
1950 cpu_loop_exit(cpu);
1951 } else {
1952 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1953 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
1954 cpu_resume_from_signal(cpu, NULL);
1955 }
1956 }
1957 } else {
1958 wp->flags &= ~BP_WATCHPOINT_HIT;
1959 }
1960 }
1961 }
1962
1963 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1964 so these check for a hit then pass through to the normal out-of-line
1965 phys routines. */
1966 static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1967 unsigned size, MemTxAttrs attrs)
1968 {
1969 MemTxResult res;
1970 uint64_t data;
1971
1972 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
1973 switch (size) {
1974 case 1:
1975 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
1976 break;
1977 case 2:
1978 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
1979 break;
1980 case 4:
1981 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
1982 break;
1983 default: abort();
1984 }
1985 *pdata = data;
1986 return res;
1987 }
1988
1989 static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1990 uint64_t val, unsigned size,
1991 MemTxAttrs attrs)
1992 {
1993 MemTxResult res;
1994
1995 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1996 switch (size) {
1997 case 1:
1998 address_space_stb(&address_space_memory, addr, val, attrs, &res);
1999 break;
2000 case 2:
2001 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2002 break;
2003 case 4:
2004 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2005 break;
2006 default: abort();
2007 }
2008 return res;
2009 }
2010
2011 static const MemoryRegionOps watch_mem_ops = {
2012 .read_with_attrs = watch_mem_read,
2013 .write_with_attrs = watch_mem_write,
2014 .endianness = DEVICE_NATIVE_ENDIAN,
2015 };
2016
2017 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2018 unsigned len, MemTxAttrs attrs)
2019 {
2020 subpage_t *subpage = opaque;
2021 uint8_t buf[8];
2022 MemTxResult res;
2023
2024 #if defined(DEBUG_SUBPAGE)
2025 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2026 subpage, len, addr);
2027 #endif
2028 res = address_space_read(subpage->as, addr + subpage->base,
2029 attrs, buf, len);
2030 if (res) {
2031 return res;
2032 }
2033 switch (len) {
2034 case 1:
2035 *data = ldub_p(buf);
2036 return MEMTX_OK;
2037 case 2:
2038 *data = lduw_p(buf);
2039 return MEMTX_OK;
2040 case 4:
2041 *data = ldl_p(buf);
2042 return MEMTX_OK;
2043 case 8:
2044 *data = ldq_p(buf);
2045 return MEMTX_OK;
2046 default:
2047 abort();
2048 }
2049 }
2050
2051 static MemTxResult subpage_write(void *opaque, hwaddr addr,
2052 uint64_t value, unsigned len, MemTxAttrs attrs)
2053 {
2054 subpage_t *subpage = opaque;
2055 uint8_t buf[8];
2056
2057 #if defined(DEBUG_SUBPAGE)
2058 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2059 " value %"PRIx64"\n",
2060 __func__, subpage, len, addr, value);
2061 #endif
2062 switch (len) {
2063 case 1:
2064 stb_p(buf, value);
2065 break;
2066 case 2:
2067 stw_p(buf, value);
2068 break;
2069 case 4:
2070 stl_p(buf, value);
2071 break;
2072 case 8:
2073 stq_p(buf, value);
2074 break;
2075 default:
2076 abort();
2077 }
2078 return address_space_write(subpage->as, addr + subpage->base,
2079 attrs, buf, len);
2080 }
2081
2082 static bool subpage_accepts(void *opaque, hwaddr addr,
2083 unsigned len, bool is_write)
2084 {
2085 subpage_t *subpage = opaque;
2086 #if defined(DEBUG_SUBPAGE)
2087 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2088 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2089 #endif
2090
2091 return address_space_access_valid(subpage->as, addr + subpage->base,
2092 len, is_write);
2093 }
2094
2095 static const MemoryRegionOps subpage_ops = {
2096 .read_with_attrs = subpage_read,
2097 .write_with_attrs = subpage_write,
2098 .impl.min_access_size = 1,
2099 .impl.max_access_size = 8,
2100 .valid.min_access_size = 1,
2101 .valid.max_access_size = 8,
2102 .valid.accepts = subpage_accepts,
2103 .endianness = DEVICE_NATIVE_ENDIAN,
2104 };
2105
2106 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2107 uint16_t section)
2108 {
2109 int idx, eidx;
2110
2111 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2112 return -1;
2113 idx = SUBPAGE_IDX(start);
2114 eidx = SUBPAGE_IDX(end);
2115 #if defined(DEBUG_SUBPAGE)
2116 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2117 __func__, mmio, start, end, idx, eidx, section);
2118 #endif
2119 for (; idx <= eidx; idx++) {
2120 mmio->sub_section[idx] = section;
2121 }
2122
2123 return 0;
2124 }
2125
2126 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2127 {
2128 subpage_t *mmio;
2129
2130 mmio = g_malloc0(sizeof(subpage_t));
2131
2132 mmio->as = as;
2133 mmio->base = base;
2134 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2135 NULL, TARGET_PAGE_SIZE);
2136 mmio->iomem.subpage = true;
2137 #if defined(DEBUG_SUBPAGE)
2138 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2139 mmio, base, TARGET_PAGE_SIZE);
2140 #endif
2141 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2142
2143 return mmio;
2144 }
2145
2146 static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2147 MemoryRegion *mr)
2148 {
2149 assert(as);
2150 MemoryRegionSection section = {
2151 .address_space = as,
2152 .mr = mr,
2153 .offset_within_address_space = 0,
2154 .offset_within_region = 0,
2155 .size = int128_2_64(),
2156 };
2157
2158 return phys_section_add(map, &section);
2159 }
2160
2161 MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
2162 {
2163 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2164 MemoryRegionSection *sections = d->map.sections;
2165
2166 return sections[index & ~TARGET_PAGE_MASK].mr;
2167 }
2168
2169 static void io_mem_init(void)
2170 {
2171 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2172 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2173 NULL, UINT64_MAX);
2174 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
2175 NULL, UINT64_MAX);
2176 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2177 NULL, UINT64_MAX);
2178 }
2179
2180 static void mem_begin(MemoryListener *listener)
2181 {
2182 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2183 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2184 uint16_t n;
2185
2186 n = dummy_section(&d->map, as, &io_mem_unassigned);
2187 assert(n == PHYS_SECTION_UNASSIGNED);
2188 n = dummy_section(&d->map, as, &io_mem_notdirty);
2189 assert(n == PHYS_SECTION_NOTDIRTY);
2190 n = dummy_section(&d->map, as, &io_mem_rom);
2191 assert(n == PHYS_SECTION_ROM);
2192 n = dummy_section(&d->map, as, &io_mem_watch);
2193 assert(n == PHYS_SECTION_WATCH);
2194
2195 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2196 d->as = as;
2197 as->next_dispatch = d;
2198 }
2199
2200 static void address_space_dispatch_free(AddressSpaceDispatch *d)
2201 {
2202 phys_sections_free(&d->map);
2203 g_free(d);
2204 }
2205
2206 static void mem_commit(MemoryListener *listener)
2207 {
2208 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2209 AddressSpaceDispatch *cur = as->dispatch;
2210 AddressSpaceDispatch *next = as->next_dispatch;
2211
2212 phys_page_compact_all(next, next->map.nodes_nb);
2213
2214 atomic_rcu_set(&as->dispatch, next);
2215 if (cur) {
2216 call_rcu(cur, address_space_dispatch_free, rcu);
2217 }
2218 }
2219
2220 static void tcg_commit(MemoryListener *listener)
2221 {
2222 CPUState *cpu;
2223
2224 /* since each CPU stores ram addresses in its TLB cache, we must
2225 reset the modified entries */
2226 /* XXX: slow ! */
2227 CPU_FOREACH(cpu) {
2228 /* FIXME: Disentangle the cpu.h circular files deps so we can
2229 directly get the right CPU from listener. */
2230 if (cpu->tcg_as_listener != listener) {
2231 continue;
2232 }
2233 cpu_reload_memory_map(cpu);
2234 }
2235 }
2236
2237 void address_space_init_dispatch(AddressSpace *as)
2238 {
2239 as->dispatch = NULL;
2240 as->dispatch_listener = (MemoryListener) {
2241 .begin = mem_begin,
2242 .commit = mem_commit,
2243 .region_add = mem_add,
2244 .region_nop = mem_add,
2245 .priority = 0,
2246 };
2247 memory_listener_register(&as->dispatch_listener, as);
2248 }
2249
2250 void address_space_unregister(AddressSpace *as)
2251 {
2252 memory_listener_unregister(&as->dispatch_listener);
2253 }
2254
2255 void address_space_destroy_dispatch(AddressSpace *as)
2256 {
2257 AddressSpaceDispatch *d = as->dispatch;
2258
2259 atomic_rcu_set(&as->dispatch, NULL);
2260 if (d) {
2261 call_rcu(d, address_space_dispatch_free, rcu);
2262 }
2263 }
2264
2265 static void memory_map_init(void)
2266 {
2267 system_memory = g_malloc(sizeof(*system_memory));
2268
2269 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2270 address_space_init(&address_space_memory, system_memory, "memory");
2271
2272 system_io = g_malloc(sizeof(*system_io));
2273 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2274 65536);
2275 address_space_init(&address_space_io, system_io, "I/O");
2276 }
2277
2278 MemoryRegion *get_system_memory(void)
2279 {
2280 return system_memory;
2281 }
2282
2283 MemoryRegion *get_system_io(void)
2284 {
2285 return system_io;
2286 }
2287
2288 #endif /* !defined(CONFIG_USER_ONLY) */
2289
2290 /* physical memory access (slow version, mainly for debug) */
2291 #if defined(CONFIG_USER_ONLY)
2292 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2293 uint8_t *buf, int len, int is_write)
2294 {
2295 int l, flags;
2296 target_ulong page;
2297 void * p;
2298
2299 while (len > 0) {
2300 page = addr & TARGET_PAGE_MASK;
2301 l = (page + TARGET_PAGE_SIZE) - addr;
2302 if (l > len)
2303 l = len;
2304 flags = page_get_flags(page);
2305 if (!(flags & PAGE_VALID))
2306 return -1;
2307 if (is_write) {
2308 if (!(flags & PAGE_WRITE))
2309 return -1;
2310 /* XXX: this code should not depend on lock_user */
2311 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2312 return -1;
2313 memcpy(p, buf, l);
2314 unlock_user(p, addr, l);
2315 } else {
2316 if (!(flags & PAGE_READ))
2317 return -1;
2318 /* XXX: this code should not depend on lock_user */
2319 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2320 return -1;
2321 memcpy(buf, p, l);
2322 unlock_user(p, addr, 0);
2323 }
2324 len -= l;
2325 buf += l;
2326 addr += l;
2327 }
2328 return 0;
2329 }
2330
2331 #else
2332
2333 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2334 hwaddr length)
2335 {
2336 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2337 /* No early return if dirty_log_mask is or becomes 0, because
2338 * cpu_physical_memory_set_dirty_range will still call
2339 * xen_modified_memory.
2340 */
2341 if (dirty_log_mask) {
2342 dirty_log_mask =
2343 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2344 }
2345 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2346 tb_invalidate_phys_range(addr, addr + length);
2347 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2348 }
2349 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
2350 }
2351
2352 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2353 {
2354 unsigned access_size_max = mr->ops->valid.max_access_size;
2355
2356 /* Regions are assumed to support 1-4 byte accesses unless
2357 otherwise specified. */
2358 if (access_size_max == 0) {
2359 access_size_max = 4;
2360 }
2361
2362 /* Bound the maximum access by the alignment of the address. */
2363 if (!mr->ops->impl.unaligned) {
2364 unsigned align_size_max = addr & -addr;
2365 if (align_size_max != 0 && align_size_max < access_size_max) {
2366 access_size_max = align_size_max;
2367 }
2368 }
2369
2370 /* Don't attempt accesses larger than the maximum. */
2371 if (l > access_size_max) {
2372 l = access_size_max;
2373 }
2374 if (l & (l - 1)) {
2375 l = 1 << (qemu_fls(l) - 1);
2376 }
2377
2378 return l;
2379 }
2380
2381 static bool prepare_mmio_access(MemoryRegion *mr)
2382 {
2383 bool unlocked = !qemu_mutex_iothread_locked();
2384 bool release_lock = false;
2385
2386 if (unlocked && mr->global_locking) {
2387 qemu_mutex_lock_iothread();
2388 unlocked = false;
2389 release_lock = true;
2390 }
2391 if (mr->flush_coalesced_mmio) {
2392 if (unlocked) {
2393 qemu_mutex_lock_iothread();
2394 }
2395 qemu_flush_coalesced_mmio_buffer();
2396 if (unlocked) {
2397 qemu_mutex_unlock_iothread();
2398 }
2399 }
2400
2401 return release_lock;
2402 }
2403
2404 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2405 uint8_t *buf, int len, bool is_write)
2406 {
2407 hwaddr l;
2408 uint8_t *ptr;
2409 uint64_t val;
2410 hwaddr addr1;
2411 MemoryRegion *mr;
2412 MemTxResult result = MEMTX_OK;
2413 bool release_lock = false;
2414
2415 rcu_read_lock();
2416 while (len > 0) {
2417 l = len;
2418 mr = address_space_translate(as, addr, &addr1, &l, is_write);
2419
2420 if (is_write) {
2421 if (!memory_access_is_direct(mr, is_write)) {
2422 release_lock |= prepare_mmio_access(mr);
2423 l = memory_access_size(mr, l, addr1);
2424 /* XXX: could force current_cpu to NULL to avoid
2425 potential bugs */
2426 switch (l) {
2427 case 8:
2428 /* 64 bit write access */
2429 val = ldq_p(buf);
2430 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2431 attrs);
2432 break;
2433 case 4:
2434 /* 32 bit write access */
2435 val = ldl_p(buf);
2436 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2437 attrs);
2438 break;
2439 case 2:
2440 /* 16 bit write access */
2441 val = lduw_p(buf);
2442 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2443 attrs);
2444 break;
2445 case 1:
2446 /* 8 bit write access */
2447 val = ldub_p(buf);
2448 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2449 attrs);
2450 break;
2451 default:
2452 abort();
2453 }
2454 } else {
2455 addr1 += memory_region_get_ram_addr(mr);
2456 /* RAM case */
2457 ptr = qemu_get_ram_ptr(addr1);
2458 memcpy(ptr, buf, l);
2459 invalidate_and_set_dirty(mr, addr1, l);
2460 }
2461 } else {
2462 if (!memory_access_is_direct(mr, is_write)) {
2463 /* I/O case */
2464 release_lock |= prepare_mmio_access(mr);
2465 l = memory_access_size(mr, l, addr1);
2466 switch (l) {
2467 case 8:
2468 /* 64 bit read access */
2469 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2470 attrs);
2471 stq_p(buf, val);
2472 break;
2473 case 4:
2474 /* 32 bit read access */
2475 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2476 attrs);
2477 stl_p(buf, val);
2478 break;
2479 case 2:
2480 /* 16 bit read access */
2481 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2482 attrs);
2483 stw_p(buf, val);
2484 break;
2485 case 1:
2486 /* 8 bit read access */
2487 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2488 attrs);
2489 stb_p(buf, val);
2490 break;
2491 default:
2492 abort();
2493 }
2494 } else {
2495 /* RAM case */
2496 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2497 memcpy(buf, ptr, l);
2498 }
2499 }
2500
2501 if (release_lock) {
2502 qemu_mutex_unlock_iothread();
2503 release_lock = false;
2504 }
2505
2506 len -= l;
2507 buf += l;
2508 addr += l;
2509 }
2510 rcu_read_unlock();
2511
2512 return result;
2513 }
2514
2515 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2516 const uint8_t *buf, int len)
2517 {
2518 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
2519 }
2520
2521 MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2522 uint8_t *buf, int len)
2523 {
2524 return address_space_rw(as, addr, attrs, buf, len, false);
2525 }
2526
2527
2528 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2529 int len, int is_write)
2530 {
2531 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2532 buf, len, is_write);
2533 }
2534
2535 enum write_rom_type {
2536 WRITE_DATA,
2537 FLUSH_CACHE,
2538 };
2539
2540 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2541 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2542 {
2543 hwaddr l;
2544 uint8_t *ptr;
2545 hwaddr addr1;
2546 MemoryRegion *mr;
2547
2548 rcu_read_lock();
2549 while (len > 0) {
2550 l = len;
2551 mr = address_space_translate(as, addr, &addr1, &l, true);
2552
2553 if (!(memory_region_is_ram(mr) ||
2554 memory_region_is_romd(mr))) {
2555 l = memory_access_size(mr, l, addr1);
2556 } else {
2557 addr1 += memory_region_get_ram_addr(mr);
2558 /* ROM/RAM case */
2559 ptr = qemu_get_ram_ptr(addr1);
2560 switch (type) {
2561 case WRITE_DATA:
2562 memcpy(ptr, buf, l);
2563 invalidate_and_set_dirty(mr, addr1, l);
2564 break;
2565 case FLUSH_CACHE:
2566 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2567 break;
2568 }
2569 }
2570 len -= l;
2571 buf += l;
2572 addr += l;
2573 }
2574 rcu_read_unlock();
2575 }
2576
2577 /* used for ROM loading : can write in RAM and ROM */
2578 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2579 const uint8_t *buf, int len)
2580 {
2581 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2582 }
2583
2584 void cpu_flush_icache_range(hwaddr start, int len)
2585 {
2586 /*
2587 * This function should do the same thing as an icache flush that was
2588 * triggered from within the guest. For TCG we are always cache coherent,
2589 * so there is no need to flush anything. For KVM / Xen we need to flush
2590 * the host's instruction cache at least.
2591 */
2592 if (tcg_enabled()) {
2593 return;
2594 }
2595
2596 cpu_physical_memory_write_rom_internal(&address_space_memory,
2597 start, NULL, len, FLUSH_CACHE);
2598 }
2599
2600 typedef struct {
2601 MemoryRegion *mr;
2602 void *buffer;
2603 hwaddr addr;
2604 hwaddr len;
2605 bool in_use;
2606 } BounceBuffer;
2607
2608 static BounceBuffer bounce;
2609
2610 typedef struct MapClient {
2611 QEMUBH *bh;
2612 QLIST_ENTRY(MapClient) link;
2613 } MapClient;
2614
2615 QemuMutex map_client_list_lock;
2616 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2617 = QLIST_HEAD_INITIALIZER(map_client_list);
2618
2619 static void cpu_unregister_map_client_do(MapClient *client)
2620 {
2621 QLIST_REMOVE(client, link);
2622 g_free(client);
2623 }
2624
2625 static void cpu_notify_map_clients_locked(void)
2626 {
2627 MapClient *client;
2628
2629 while (!QLIST_EMPTY(&map_client_list)) {
2630 client = QLIST_FIRST(&map_client_list);
2631 qemu_bh_schedule(client->bh);
2632 cpu_unregister_map_client_do(client);
2633 }
2634 }
2635
2636 void cpu_register_map_client(QEMUBH *bh)
2637 {
2638 MapClient *client = g_malloc(sizeof(*client));
2639
2640 qemu_mutex_lock(&map_client_list_lock);
2641 client->bh = bh;
2642 QLIST_INSERT_HEAD(&map_client_list, client, link);
2643 if (!atomic_read(&bounce.in_use)) {
2644 cpu_notify_map_clients_locked();
2645 }
2646 qemu_mutex_unlock(&map_client_list_lock);
2647 }
2648
2649 void cpu_exec_init_all(void)
2650 {
2651 qemu_mutex_init(&ram_list.mutex);
2652 memory_map_init();
2653 io_mem_init();
2654 qemu_mutex_init(&map_client_list_lock);
2655 }
2656
2657 void cpu_unregister_map_client(QEMUBH *bh)
2658 {
2659 MapClient *client;
2660
2661 qemu_mutex_lock(&map_client_list_lock);
2662 QLIST_FOREACH(client, &map_client_list, link) {
2663 if (client->bh == bh) {
2664 cpu_unregister_map_client_do(client);
2665 break;
2666 }
2667 }
2668 qemu_mutex_unlock(&map_client_list_lock);
2669 }
2670
2671 static void cpu_notify_map_clients(void)
2672 {
2673 qemu_mutex_lock(&map_client_list_lock);
2674 cpu_notify_map_clients_locked();
2675 qemu_mutex_unlock(&map_client_list_lock);
2676 }
2677
2678 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2679 {
2680 MemoryRegion *mr;
2681 hwaddr l, xlat;
2682
2683 rcu_read_lock();
2684 while (len > 0) {
2685 l = len;
2686 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2687 if (!memory_access_is_direct(mr, is_write)) {
2688 l = memory_access_size(mr, l, addr);
2689 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2690 return false;
2691 }
2692 }
2693
2694 len -= l;
2695 addr += l;
2696 }
2697 rcu_read_unlock();
2698 return true;
2699 }
2700
2701 /* Map a physical memory region into a host virtual address.
2702 * May map a subset of the requested range, given by and returned in *plen.
2703 * May return NULL if resources needed to perform the mapping are exhausted.
2704 * Use only for reads OR writes - not for read-modify-write operations.
2705 * Use cpu_register_map_client() to know when retrying the map operation is
2706 * likely to succeed.
2707 */
2708 void *address_space_map(AddressSpace *as,
2709 hwaddr addr,
2710 hwaddr *plen,
2711 bool is_write)
2712 {
2713 hwaddr len = *plen;
2714 hwaddr done = 0;
2715 hwaddr l, xlat, base;
2716 MemoryRegion *mr, *this_mr;
2717 ram_addr_t raddr;
2718
2719 if (len == 0) {
2720 return NULL;
2721 }
2722
2723 l = len;
2724 rcu_read_lock();
2725 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2726
2727 if (!memory_access_is_direct(mr, is_write)) {
2728 if (atomic_xchg(&bounce.in_use, true)) {
2729 rcu_read_unlock();
2730 return NULL;
2731 }
2732 /* Avoid unbounded allocations */
2733 l = MIN(l, TARGET_PAGE_SIZE);
2734 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2735 bounce.addr = addr;
2736 bounce.len = l;
2737
2738 memory_region_ref(mr);
2739 bounce.mr = mr;
2740 if (!is_write) {
2741 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2742 bounce.buffer, l);
2743 }
2744
2745 rcu_read_unlock();
2746 *plen = l;
2747 return bounce.buffer;
2748 }
2749
2750 base = xlat;
2751 raddr = memory_region_get_ram_addr(mr);
2752
2753 for (;;) {
2754 len -= l;
2755 addr += l;
2756 done += l;
2757 if (len == 0) {
2758 break;
2759 }
2760
2761 l = len;
2762 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2763 if (this_mr != mr || xlat != base + done) {
2764 break;
2765 }
2766 }
2767
2768 memory_region_ref(mr);
2769 rcu_read_unlock();
2770 *plen = done;
2771 return qemu_ram_ptr_length(raddr + base, plen);
2772 }
2773
2774 /* Unmaps a memory region previously mapped by address_space_map().
2775 * Will also mark the memory as dirty if is_write == 1. access_len gives
2776 * the amount of memory that was actually read or written by the caller.
2777 */
2778 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2779 int is_write, hwaddr access_len)
2780 {
2781 if (buffer != bounce.buffer) {
2782 MemoryRegion *mr;
2783 ram_addr_t addr1;
2784
2785 mr = qemu_ram_addr_from_host(buffer, &addr1);
2786 assert(mr != NULL);
2787 if (is_write) {
2788 invalidate_and_set_dirty(mr, addr1, access_len);
2789 }
2790 if (xen_enabled()) {
2791 xen_invalidate_map_cache_entry(buffer);
2792 }
2793 memory_region_unref(mr);
2794 return;
2795 }
2796 if (is_write) {
2797 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2798 bounce.buffer, access_len);
2799 }
2800 qemu_vfree(bounce.buffer);
2801 bounce.buffer = NULL;
2802 memory_region_unref(bounce.mr);
2803 atomic_mb_set(&bounce.in_use, false);
2804 cpu_notify_map_clients();
2805 }
2806
2807 void *cpu_physical_memory_map(hwaddr addr,
2808 hwaddr *plen,
2809 int is_write)
2810 {
2811 return address_space_map(&address_space_memory, addr, plen, is_write);
2812 }
2813
2814 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2815 int is_write, hwaddr access_len)
2816 {
2817 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2818 }
2819
2820 /* warning: addr must be aligned */
2821 static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2822 MemTxAttrs attrs,
2823 MemTxResult *result,
2824 enum device_endian endian)
2825 {
2826 uint8_t *ptr;
2827 uint64_t val;
2828 MemoryRegion *mr;
2829 hwaddr l = 4;
2830 hwaddr addr1;
2831 MemTxResult r;
2832 bool release_lock = false;
2833
2834 rcu_read_lock();
2835 mr = address_space_translate(as, addr, &addr1, &l, false);
2836 if (l < 4 || !memory_access_is_direct(mr, false)) {
2837 release_lock |= prepare_mmio_access(mr);
2838
2839 /* I/O case */
2840 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
2841 #if defined(TARGET_WORDS_BIGENDIAN)
2842 if (endian == DEVICE_LITTLE_ENDIAN) {
2843 val = bswap32(val);
2844 }
2845 #else
2846 if (endian == DEVICE_BIG_ENDIAN) {
2847 val = bswap32(val);
2848 }
2849 #endif
2850 } else {
2851 /* RAM case */
2852 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2853 & TARGET_PAGE_MASK)
2854 + addr1);
2855 switch (endian) {
2856 case DEVICE_LITTLE_ENDIAN:
2857 val = ldl_le_p(ptr);
2858 break;
2859 case DEVICE_BIG_ENDIAN:
2860 val = ldl_be_p(ptr);
2861 break;
2862 default:
2863 val = ldl_p(ptr);
2864 break;
2865 }
2866 r = MEMTX_OK;
2867 }
2868 if (result) {
2869 *result = r;
2870 }
2871 if (release_lock) {
2872 qemu_mutex_unlock_iothread();
2873 }
2874 rcu_read_unlock();
2875 return val;
2876 }
2877
2878 uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2879 MemTxAttrs attrs, MemTxResult *result)
2880 {
2881 return address_space_ldl_internal(as, addr, attrs, result,
2882 DEVICE_NATIVE_ENDIAN);
2883 }
2884
2885 uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2886 MemTxAttrs attrs, MemTxResult *result)
2887 {
2888 return address_space_ldl_internal(as, addr, attrs, result,
2889 DEVICE_LITTLE_ENDIAN);
2890 }
2891
2892 uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2893 MemTxAttrs attrs, MemTxResult *result)
2894 {
2895 return address_space_ldl_internal(as, addr, attrs, result,
2896 DEVICE_BIG_ENDIAN);
2897 }
2898
2899 uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
2900 {
2901 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2902 }
2903
2904 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
2905 {
2906 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2907 }
2908
2909 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
2910 {
2911 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2912 }
2913
2914 /* warning: addr must be aligned */
2915 static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2916 MemTxAttrs attrs,
2917 MemTxResult *result,
2918 enum device_endian endian)
2919 {
2920 uint8_t *ptr;
2921 uint64_t val;
2922 MemoryRegion *mr;
2923 hwaddr l = 8;
2924 hwaddr addr1;
2925 MemTxResult r;
2926 bool release_lock = false;
2927
2928 rcu_read_lock();
2929 mr = address_space_translate(as, addr, &addr1, &l,
2930 false);
2931 if (l < 8 || !memory_access_is_direct(mr, false)) {
2932 release_lock |= prepare_mmio_access(mr);
2933
2934 /* I/O case */
2935 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
2936 #if defined(TARGET_WORDS_BIGENDIAN)
2937 if (endian == DEVICE_LITTLE_ENDIAN) {
2938 val = bswap64(val);
2939 }
2940 #else
2941 if (endian == DEVICE_BIG_ENDIAN) {
2942 val = bswap64(val);
2943 }
2944 #endif
2945 } else {
2946 /* RAM case */
2947 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2948 & TARGET_PAGE_MASK)
2949 + addr1);
2950 switch (endian) {
2951 case DEVICE_LITTLE_ENDIAN:
2952 val = ldq_le_p(ptr);
2953 break;
2954 case DEVICE_BIG_ENDIAN:
2955 val = ldq_be_p(ptr);
2956 break;
2957 default:
2958 val = ldq_p(ptr);
2959 break;
2960 }
2961 r = MEMTX_OK;
2962 }
2963 if (result) {
2964 *result = r;
2965 }
2966 if (release_lock) {
2967 qemu_mutex_unlock_iothread();
2968 }
2969 rcu_read_unlock();
2970 return val;
2971 }
2972
2973 uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2974 MemTxAttrs attrs, MemTxResult *result)
2975 {
2976 return address_space_ldq_internal(as, addr, attrs, result,
2977 DEVICE_NATIVE_ENDIAN);
2978 }
2979
2980 uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2981 MemTxAttrs attrs, MemTxResult *result)
2982 {
2983 return address_space_ldq_internal(as, addr, attrs, result,
2984 DEVICE_LITTLE_ENDIAN);
2985 }
2986
2987 uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2988 MemTxAttrs attrs, MemTxResult *result)
2989 {
2990 return address_space_ldq_internal(as, addr, attrs, result,
2991 DEVICE_BIG_ENDIAN);
2992 }
2993
2994 uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
2995 {
2996 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2997 }
2998
2999 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
3000 {
3001 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3002 }
3003
3004 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
3005 {
3006 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3007 }
3008
3009 /* XXX: optimize */
3010 uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3011 MemTxAttrs attrs, MemTxResult *result)
3012 {
3013 uint8_t val;
3014 MemTxResult r;
3015
3016 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3017 if (result) {
3018 *result = r;
3019 }
3020 return val;
3021 }
3022
3023 uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3024 {
3025 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3026 }
3027
3028 /* warning: addr must be aligned */
3029 static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3030 hwaddr addr,
3031 MemTxAttrs attrs,
3032 MemTxResult *result,
3033 enum device_endian endian)
3034 {
3035 uint8_t *ptr;
3036 uint64_t val;
3037 MemoryRegion *mr;
3038 hwaddr l = 2;
3039 hwaddr addr1;
3040 MemTxResult r;
3041 bool release_lock = false;
3042
3043 rcu_read_lock();
3044 mr = address_space_translate(as, addr, &addr1, &l,
3045 false);
3046 if (l < 2 || !memory_access_is_direct(mr, false)) {
3047 release_lock |= prepare_mmio_access(mr);
3048
3049 /* I/O case */
3050 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
3051 #if defined(TARGET_WORDS_BIGENDIAN)
3052 if (endian == DEVICE_LITTLE_ENDIAN) {
3053 val = bswap16(val);
3054 }
3055 #else
3056 if (endian == DEVICE_BIG_ENDIAN) {
3057 val = bswap16(val);
3058 }
3059 #endif
3060 } else {
3061 /* RAM case */
3062 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
3063 & TARGET_PAGE_MASK)
3064 + addr1);
3065 switch (endian) {
3066 case DEVICE_LITTLE_ENDIAN:
3067 val = lduw_le_p(ptr);
3068 break;
3069 case DEVICE_BIG_ENDIAN:
3070 val = lduw_be_p(ptr);
3071 break;
3072 default:
3073 val = lduw_p(ptr);
3074 break;
3075 }
3076 r = MEMTX_OK;
3077 }
3078 if (result) {
3079 *result = r;
3080 }
3081 if (release_lock) {
3082 qemu_mutex_unlock_iothread();
3083 }
3084 rcu_read_unlock();
3085 return val;
3086 }
3087
3088 uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3089 MemTxAttrs attrs, MemTxResult *result)
3090 {
3091 return address_space_lduw_internal(as, addr, attrs, result,
3092 DEVICE_NATIVE_ENDIAN);
3093 }
3094
3095 uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3096 MemTxAttrs attrs, MemTxResult *result)
3097 {
3098 return address_space_lduw_internal(as, addr, attrs, result,
3099 DEVICE_LITTLE_ENDIAN);
3100 }
3101
3102 uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3103 MemTxAttrs attrs, MemTxResult *result)
3104 {
3105 return address_space_lduw_internal(as, addr, attrs, result,
3106 DEVICE_BIG_ENDIAN);
3107 }
3108
3109 uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
3110 {
3111 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3112 }
3113
3114 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
3115 {
3116 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3117 }
3118
3119 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
3120 {
3121 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3122 }
3123
3124 /* warning: addr must be aligned. The ram page is not masked as dirty
3125 and the code inside is not invalidated. It is useful if the dirty
3126 bits are used to track modified PTEs */
3127 void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3128 MemTxAttrs attrs, MemTxResult *result)
3129 {
3130 uint8_t *ptr;
3131 MemoryRegion *mr;
3132 hwaddr l = 4;
3133 hwaddr addr1;
3134 MemTxResult r;
3135 uint8_t dirty_log_mask;
3136 bool release_lock = false;
3137
3138 rcu_read_lock();
3139 mr = address_space_translate(as, addr, &addr1, &l,
3140 true);
3141 if (l < 4 || !memory_access_is_direct(mr, true)) {
3142 release_lock |= prepare_mmio_access(mr);
3143
3144 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3145 } else {
3146 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3147 ptr = qemu_get_ram_ptr(addr1);
3148 stl_p(ptr, val);
3149
3150 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3151 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
3152 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
3153 r = MEMTX_OK;
3154 }
3155 if (result) {
3156 *result = r;
3157 }
3158 if (release_lock) {
3159 qemu_mutex_unlock_iothread();
3160 }
3161 rcu_read_unlock();
3162 }
3163
3164 void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3165 {
3166 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3167 }
3168
3169 /* warning: addr must be aligned */
3170 static inline void address_space_stl_internal(AddressSpace *as,
3171 hwaddr addr, uint32_t val,
3172 MemTxAttrs attrs,
3173 MemTxResult *result,
3174 enum device_endian endian)
3175 {
3176 uint8_t *ptr;
3177 MemoryRegion *mr;
3178 hwaddr l = 4;
3179 hwaddr addr1;
3180 MemTxResult r;
3181 bool release_lock = false;
3182
3183 rcu_read_lock();
3184 mr = address_space_translate(as, addr, &addr1, &l,
3185 true);
3186 if (l < 4 || !memory_access_is_direct(mr, true)) {
3187 release_lock |= prepare_mmio_access(mr);
3188
3189 #if defined(TARGET_WORDS_BIGENDIAN)
3190 if (endian == DEVICE_LITTLE_ENDIAN) {
3191 val = bswap32(val);
3192 }
3193 #else
3194 if (endian == DEVICE_BIG_ENDIAN) {
3195 val = bswap32(val);
3196 }
3197 #endif
3198 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3199 } else {
3200 /* RAM case */
3201 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3202 ptr = qemu_get_ram_ptr(addr1);
3203 switch (endian) {
3204 case DEVICE_LITTLE_ENDIAN:
3205 stl_le_p(ptr, val);
3206 break;
3207 case DEVICE_BIG_ENDIAN:
3208 stl_be_p(ptr, val);
3209 break;
3210 default:
3211 stl_p(ptr, val);
3212 break;
3213 }
3214 invalidate_and_set_dirty(mr, addr1, 4);
3215 r = MEMTX_OK;
3216 }
3217 if (result) {
3218 *result = r;
3219 }
3220 if (release_lock) {
3221 qemu_mutex_unlock_iothread();
3222 }
3223 rcu_read_unlock();
3224 }
3225
3226 void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3227 MemTxAttrs attrs, MemTxResult *result)
3228 {
3229 address_space_stl_internal(as, addr, val, attrs, result,
3230 DEVICE_NATIVE_ENDIAN);
3231 }
3232
3233 void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3234 MemTxAttrs attrs, MemTxResult *result)
3235 {
3236 address_space_stl_internal(as, addr, val, attrs, result,
3237 DEVICE_LITTLE_ENDIAN);
3238 }
3239
3240 void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3241 MemTxAttrs attrs, MemTxResult *result)
3242 {
3243 address_space_stl_internal(as, addr, val, attrs, result,
3244 DEVICE_BIG_ENDIAN);
3245 }
3246
3247 void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3248 {
3249 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3250 }
3251
3252 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3253 {
3254 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3255 }
3256
3257 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3258 {
3259 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3260 }
3261
3262 /* XXX: optimize */
3263 void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3264 MemTxAttrs attrs, MemTxResult *result)
3265 {
3266 uint8_t v = val;
3267 MemTxResult r;
3268
3269 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3270 if (result) {
3271 *result = r;
3272 }
3273 }
3274
3275 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3276 {
3277 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3278 }
3279
3280 /* warning: addr must be aligned */
3281 static inline void address_space_stw_internal(AddressSpace *as,
3282 hwaddr addr, uint32_t val,
3283 MemTxAttrs attrs,
3284 MemTxResult *result,
3285 enum device_endian endian)
3286 {
3287 uint8_t *ptr;
3288 MemoryRegion *mr;
3289 hwaddr l = 2;
3290 hwaddr addr1;
3291 MemTxResult r;
3292 bool release_lock = false;
3293
3294 rcu_read_lock();
3295 mr = address_space_translate(as, addr, &addr1, &l, true);
3296 if (l < 2 || !memory_access_is_direct(mr, true)) {
3297 release_lock |= prepare_mmio_access(mr);
3298
3299 #if defined(TARGET_WORDS_BIGENDIAN)
3300 if (endian == DEVICE_LITTLE_ENDIAN) {
3301 val = bswap16(val);
3302 }
3303 #else
3304 if (endian == DEVICE_BIG_ENDIAN) {
3305 val = bswap16(val);
3306 }
3307 #endif
3308 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
3309 } else {
3310 /* RAM case */
3311 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3312 ptr = qemu_get_ram_ptr(addr1);
3313 switch (endian) {
3314 case DEVICE_LITTLE_ENDIAN:
3315 stw_le_p(ptr, val);
3316 break;
3317 case DEVICE_BIG_ENDIAN:
3318 stw_be_p(ptr, val);
3319 break;
3320 default:
3321 stw_p(ptr, val);
3322 break;
3323 }
3324 invalidate_and_set_dirty(mr, addr1, 2);
3325 r = MEMTX_OK;
3326 }
3327 if (result) {
3328 *result = r;
3329 }
3330 if (release_lock) {
3331 qemu_mutex_unlock_iothread();
3332 }
3333 rcu_read_unlock();
3334 }
3335
3336 void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3337 MemTxAttrs attrs, MemTxResult *result)
3338 {
3339 address_space_stw_internal(as, addr, val, attrs, result,
3340 DEVICE_NATIVE_ENDIAN);
3341 }
3342
3343 void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3344 MemTxAttrs attrs, MemTxResult *result)
3345 {
3346 address_space_stw_internal(as, addr, val, attrs, result,
3347 DEVICE_LITTLE_ENDIAN);
3348 }
3349
3350 void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3351 MemTxAttrs attrs, MemTxResult *result)
3352 {
3353 address_space_stw_internal(as, addr, val, attrs, result,
3354 DEVICE_BIG_ENDIAN);
3355 }
3356
3357 void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3358 {
3359 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3360 }
3361
3362 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3363 {
3364 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3365 }
3366
3367 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3368 {
3369 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3370 }
3371
3372 /* XXX: optimize */
3373 void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3374 MemTxAttrs attrs, MemTxResult *result)
3375 {
3376 MemTxResult r;
3377 val = tswap64(val);
3378 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3379 if (result) {
3380 *result = r;
3381 }
3382 }
3383
3384 void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3385 MemTxAttrs attrs, MemTxResult *result)
3386 {
3387 MemTxResult r;
3388 val = cpu_to_le64(val);
3389 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3390 if (result) {
3391 *result = r;
3392 }
3393 }
3394 void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3395 MemTxAttrs attrs, MemTxResult *result)
3396 {
3397 MemTxResult r;
3398 val = cpu_to_be64(val);
3399 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3400 if (result) {
3401 *result = r;
3402 }
3403 }
3404
3405 void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3406 {
3407 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3408 }
3409
3410 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3411 {
3412 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3413 }
3414
3415 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3416 {
3417 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3418 }
3419
3420 /* virtual memory access for debug (includes writing to ROM) */
3421 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3422 uint8_t *buf, int len, int is_write)
3423 {
3424 int l;
3425 hwaddr phys_addr;
3426 target_ulong page;
3427
3428 while (len > 0) {
3429 page = addr & TARGET_PAGE_MASK;
3430 phys_addr = cpu_get_phys_page_debug(cpu, page);
3431 /* if no physical page mapped, return an error */
3432 if (phys_addr == -1)
3433 return -1;
3434 l = (page + TARGET_PAGE_SIZE) - addr;
3435 if (l > len)
3436 l = len;
3437 phys_addr += (addr & ~TARGET_PAGE_MASK);
3438 if (is_write) {
3439 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3440 } else {
3441 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3442 buf, l, 0);
3443 }
3444 len -= l;
3445 buf += l;
3446 addr += l;
3447 }
3448 return 0;
3449 }
3450 #endif
3451
3452 /*
3453 * A helper function for the _utterly broken_ virtio device model to find out if
3454 * it's running on a big endian machine. Don't do this at home kids!
3455 */
3456 bool target_words_bigendian(void);
3457 bool target_words_bigendian(void)
3458 {
3459 #if defined(TARGET_WORDS_BIGENDIAN)
3460 return true;
3461 #else
3462 return false;
3463 #endif
3464 }
3465
3466 #ifndef CONFIG_USER_ONLY
3467 bool cpu_physical_memory_is_io(hwaddr phys_addr)
3468 {
3469 MemoryRegion*mr;
3470 hwaddr l = 1;
3471 bool res;
3472
3473 rcu_read_lock();
3474 mr = address_space_translate(&address_space_memory,
3475 phys_addr, &phys_addr, &l, false);
3476
3477 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3478 rcu_read_unlock();
3479 return res;
3480 }
3481
3482 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3483 {
3484 RAMBlock *block;
3485 int ret = 0;
3486
3487 rcu_read_lock();
3488 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
3489 ret = func(block->idstr, block->host, block->offset,
3490 block->used_length, opaque);
3491 if (ret) {
3492 break;
3493 }
3494 }
3495 rcu_read_unlock();
3496 return ret;
3497 }
3498 #endif