]> git.proxmox.com Git - mirror_qemu.git/blob - exec.c
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
[mirror_qemu.git] / exec.c
1 /*
2 * Virtual page mapping
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #ifndef _WIN32
21 #include <sys/mman.h>
22 #endif
23
24 #include "qemu-common.h"
25 #include "cpu.h"
26 #include "tcg.h"
27 #include "hw/hw.h"
28 #if !defined(CONFIG_USER_ONLY)
29 #include "hw/boards.h"
30 #endif
31 #include "hw/qdev.h"
32 #include "sysemu/kvm.h"
33 #include "sysemu/sysemu.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
42 #include <qemu.h>
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
45 #include "trace.h"
46 #endif
47 #include "exec/cpu-all.h"
48 #include "qemu/rcu_queue.h"
49 #include "qemu/main-loop.h"
50 #include "translate-all.h"
51 #include "sysemu/replay.h"
52
53 #include "exec/memory-internal.h"
54 #include "exec/ram_addr.h"
55 #include "exec/log.h"
56
57 #include "qemu/range.h"
58 #ifndef _WIN32
59 #include "qemu/mmap-alloc.h"
60 #endif
61
62 //#define DEBUG_SUBPAGE
63
64 #if !defined(CONFIG_USER_ONLY)
65 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
66 * are protected by the ramlist lock.
67 */
68 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
69
70 static MemoryRegion *system_memory;
71 static MemoryRegion *system_io;
72
73 AddressSpace address_space_io;
74 AddressSpace address_space_memory;
75
76 MemoryRegion io_mem_rom, io_mem_notdirty;
77 static MemoryRegion io_mem_unassigned;
78
79 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
80 #define RAM_PREALLOC (1 << 0)
81
82 /* RAM is mmap-ed with MAP_SHARED */
83 #define RAM_SHARED (1 << 1)
84
85 /* Only a portion of RAM (used_length) is actually used, and migrated.
86 * This used_length size can change across reboots.
87 */
88 #define RAM_RESIZEABLE (1 << 2)
89
90 #endif
91
92 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
93 /* current CPU in the current thread. It is only valid inside
94 cpu_exec() */
95 __thread CPUState *current_cpu;
96 /* 0 = Do not count executed instructions.
97 1 = Precise instruction counting.
98 2 = Adaptive rate instruction counting. */
99 int use_icount;
100
101 #if !defined(CONFIG_USER_ONLY)
102
103 typedef struct PhysPageEntry PhysPageEntry;
104
105 struct PhysPageEntry {
106 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
107 uint32_t skip : 6;
108 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
109 uint32_t ptr : 26;
110 };
111
112 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
113
114 /* Size of the L2 (and L3, etc) page tables. */
115 #define ADDR_SPACE_BITS 64
116
117 #define P_L2_BITS 9
118 #define P_L2_SIZE (1 << P_L2_BITS)
119
120 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
121
122 typedef PhysPageEntry Node[P_L2_SIZE];
123
124 typedef struct PhysPageMap {
125 struct rcu_head rcu;
126
127 unsigned sections_nb;
128 unsigned sections_nb_alloc;
129 unsigned nodes_nb;
130 unsigned nodes_nb_alloc;
131 Node *nodes;
132 MemoryRegionSection *sections;
133 } PhysPageMap;
134
135 struct AddressSpaceDispatch {
136 struct rcu_head rcu;
137
138 MemoryRegionSection *mru_section;
139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
141 */
142 PhysPageEntry phys_map;
143 PhysPageMap map;
144 AddressSpace *as;
145 };
146
147 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148 typedef struct subpage_t {
149 MemoryRegion iomem;
150 AddressSpace *as;
151 hwaddr base;
152 uint16_t sub_section[TARGET_PAGE_SIZE];
153 } subpage_t;
154
155 #define PHYS_SECTION_UNASSIGNED 0
156 #define PHYS_SECTION_NOTDIRTY 1
157 #define PHYS_SECTION_ROM 2
158 #define PHYS_SECTION_WATCH 3
159
160 static void io_mem_init(void);
161 static void memory_map_init(void);
162 static void tcg_commit(MemoryListener *listener);
163
164 static MemoryRegion io_mem_watch;
165
166 /**
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
172 */
173 struct CPUAddressSpace {
174 CPUState *cpu;
175 AddressSpace *as;
176 struct AddressSpaceDispatch *memory_dispatch;
177 MemoryListener tcg_as_listener;
178 };
179
180 #endif
181
182 #if !defined(CONFIG_USER_ONLY)
183
184 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
185 {
186 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
187 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
188 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
189 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
190 }
191 }
192
193 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
194 {
195 unsigned i;
196 uint32_t ret;
197 PhysPageEntry e;
198 PhysPageEntry *p;
199
200 ret = map->nodes_nb++;
201 p = map->nodes[ret];
202 assert(ret != PHYS_MAP_NODE_NIL);
203 assert(ret != map->nodes_nb_alloc);
204
205 e.skip = leaf ? 0 : 1;
206 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
207 for (i = 0; i < P_L2_SIZE; ++i) {
208 memcpy(&p[i], &e, sizeof(e));
209 }
210 return ret;
211 }
212
213 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
214 hwaddr *index, hwaddr *nb, uint16_t leaf,
215 int level)
216 {
217 PhysPageEntry *p;
218 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
219
220 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
221 lp->ptr = phys_map_node_alloc(map, level == 0);
222 }
223 p = map->nodes[lp->ptr];
224 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
225
226 while (*nb && lp < &p[P_L2_SIZE]) {
227 if ((*index & (step - 1)) == 0 && *nb >= step) {
228 lp->skip = 0;
229 lp->ptr = leaf;
230 *index += step;
231 *nb -= step;
232 } else {
233 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
234 }
235 ++lp;
236 }
237 }
238
239 static void phys_page_set(AddressSpaceDispatch *d,
240 hwaddr index, hwaddr nb,
241 uint16_t leaf)
242 {
243 /* Wildly overreserve - it doesn't matter much. */
244 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
245
246 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
247 }
248
249 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
251 */
252 static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
253 {
254 unsigned valid_ptr = P_L2_SIZE;
255 int valid = 0;
256 PhysPageEntry *p;
257 int i;
258
259 if (lp->ptr == PHYS_MAP_NODE_NIL) {
260 return;
261 }
262
263 p = nodes[lp->ptr];
264 for (i = 0; i < P_L2_SIZE; i++) {
265 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
266 continue;
267 }
268
269 valid_ptr = i;
270 valid++;
271 if (p[i].skip) {
272 phys_page_compact(&p[i], nodes, compacted);
273 }
274 }
275
276 /* We can only compress if there's only one child. */
277 if (valid != 1) {
278 return;
279 }
280
281 assert(valid_ptr < P_L2_SIZE);
282
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
285 return;
286 }
287
288 lp->ptr = p[valid_ptr].ptr;
289 if (!p[valid_ptr].skip) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
294 * change this rule.
295 */
296 lp->skip = 0;
297 } else {
298 lp->skip += p[valid_ptr].skip;
299 }
300 }
301
302 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
303 {
304 DECLARE_BITMAP(compacted, nodes_nb);
305
306 if (d->phys_map.skip) {
307 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
308 }
309 }
310
311 static inline bool section_covers_addr(const MemoryRegionSection *section,
312 hwaddr addr)
313 {
314 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
315 * the section must cover the entire address space.
316 */
317 return section->size.hi ||
318 range_covers_byte(section->offset_within_address_space,
319 section->size.lo, addr);
320 }
321
322 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
323 Node *nodes, MemoryRegionSection *sections)
324 {
325 PhysPageEntry *p;
326 hwaddr index = addr >> TARGET_PAGE_BITS;
327 int i;
328
329 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
330 if (lp.ptr == PHYS_MAP_NODE_NIL) {
331 return &sections[PHYS_SECTION_UNASSIGNED];
332 }
333 p = nodes[lp.ptr];
334 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
335 }
336
337 if (section_covers_addr(&sections[lp.ptr], addr)) {
338 return &sections[lp.ptr];
339 } else {
340 return &sections[PHYS_SECTION_UNASSIGNED];
341 }
342 }
343
344 bool memory_region_is_unassigned(MemoryRegion *mr)
345 {
346 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
347 && mr != &io_mem_watch;
348 }
349
350 /* Called from RCU critical section */
351 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
352 hwaddr addr,
353 bool resolve_subpage)
354 {
355 MemoryRegionSection *section = atomic_read(&d->mru_section);
356 subpage_t *subpage;
357 bool update;
358
359 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
360 section_covers_addr(section, addr)) {
361 update = false;
362 } else {
363 section = phys_page_find(d->phys_map, addr, d->map.nodes,
364 d->map.sections);
365 update = true;
366 }
367 if (resolve_subpage && section->mr->subpage) {
368 subpage = container_of(section->mr, subpage_t, iomem);
369 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
370 }
371 if (update) {
372 atomic_set(&d->mru_section, section);
373 }
374 return section;
375 }
376
377 /* Called from RCU critical section */
378 static MemoryRegionSection *
379 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
380 hwaddr *plen, bool resolve_subpage)
381 {
382 MemoryRegionSection *section;
383 MemoryRegion *mr;
384 Int128 diff;
385
386 section = address_space_lookup_region(d, addr, resolve_subpage);
387 /* Compute offset within MemoryRegionSection */
388 addr -= section->offset_within_address_space;
389
390 /* Compute offset within MemoryRegion */
391 *xlat = addr + section->offset_within_region;
392
393 mr = section->mr;
394
395 /* MMIO registers can be expected to perform full-width accesses based only
396 * on their address, without considering adjacent registers that could
397 * decode to completely different MemoryRegions. When such registers
398 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
399 * regions overlap wildly. For this reason we cannot clamp the accesses
400 * here.
401 *
402 * If the length is small (as is the case for address_space_ldl/stl),
403 * everything works fine. If the incoming length is large, however,
404 * the caller really has to do the clamping through memory_access_size.
405 */
406 if (memory_region_is_ram(mr)) {
407 diff = int128_sub(section->size, int128_make64(addr));
408 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
409 }
410 return section;
411 }
412
413 /* Called from RCU critical section */
414 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
415 hwaddr *xlat, hwaddr *plen,
416 bool is_write)
417 {
418 IOMMUTLBEntry iotlb;
419 MemoryRegionSection *section;
420 MemoryRegion *mr;
421
422 for (;;) {
423 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
424 section = address_space_translate_internal(d, addr, &addr, plen, true);
425 mr = section->mr;
426
427 if (!mr->iommu_ops) {
428 break;
429 }
430
431 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
432 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
433 | (addr & iotlb.addr_mask));
434 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
435 if (!(iotlb.perm & (1 << is_write))) {
436 mr = &io_mem_unassigned;
437 break;
438 }
439
440 as = iotlb.target_as;
441 }
442
443 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
444 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
445 *plen = MIN(page, *plen);
446 }
447
448 *xlat = addr;
449 return mr;
450 }
451
452 /* Called from RCU critical section */
453 MemoryRegionSection *
454 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
455 hwaddr *xlat, hwaddr *plen)
456 {
457 MemoryRegionSection *section;
458 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
459
460 section = address_space_translate_internal(d, addr, xlat, plen, false);
461
462 assert(!section->mr->iommu_ops);
463 return section;
464 }
465 #endif
466
467 #if !defined(CONFIG_USER_ONLY)
468
469 static int cpu_common_post_load(void *opaque, int version_id)
470 {
471 CPUState *cpu = opaque;
472
473 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
474 version_id is increased. */
475 cpu->interrupt_request &= ~0x01;
476 tlb_flush(cpu, 1);
477
478 return 0;
479 }
480
481 static int cpu_common_pre_load(void *opaque)
482 {
483 CPUState *cpu = opaque;
484
485 cpu->exception_index = -1;
486
487 return 0;
488 }
489
490 static bool cpu_common_exception_index_needed(void *opaque)
491 {
492 CPUState *cpu = opaque;
493
494 return tcg_enabled() && cpu->exception_index != -1;
495 }
496
497 static const VMStateDescription vmstate_cpu_common_exception_index = {
498 .name = "cpu_common/exception_index",
499 .version_id = 1,
500 .minimum_version_id = 1,
501 .needed = cpu_common_exception_index_needed,
502 .fields = (VMStateField[]) {
503 VMSTATE_INT32(exception_index, CPUState),
504 VMSTATE_END_OF_LIST()
505 }
506 };
507
508 static bool cpu_common_crash_occurred_needed(void *opaque)
509 {
510 CPUState *cpu = opaque;
511
512 return cpu->crash_occurred;
513 }
514
515 static const VMStateDescription vmstate_cpu_common_crash_occurred = {
516 .name = "cpu_common/crash_occurred",
517 .version_id = 1,
518 .minimum_version_id = 1,
519 .needed = cpu_common_crash_occurred_needed,
520 .fields = (VMStateField[]) {
521 VMSTATE_BOOL(crash_occurred, CPUState),
522 VMSTATE_END_OF_LIST()
523 }
524 };
525
526 const VMStateDescription vmstate_cpu_common = {
527 .name = "cpu_common",
528 .version_id = 1,
529 .minimum_version_id = 1,
530 .pre_load = cpu_common_pre_load,
531 .post_load = cpu_common_post_load,
532 .fields = (VMStateField[]) {
533 VMSTATE_UINT32(halted, CPUState),
534 VMSTATE_UINT32(interrupt_request, CPUState),
535 VMSTATE_END_OF_LIST()
536 },
537 .subsections = (const VMStateDescription*[]) {
538 &vmstate_cpu_common_exception_index,
539 &vmstate_cpu_common_crash_occurred,
540 NULL
541 }
542 };
543
544 #endif
545
546 CPUState *qemu_get_cpu(int index)
547 {
548 CPUState *cpu;
549
550 CPU_FOREACH(cpu) {
551 if (cpu->cpu_index == index) {
552 return cpu;
553 }
554 }
555
556 return NULL;
557 }
558
559 #if !defined(CONFIG_USER_ONLY)
560 void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
561 {
562 CPUAddressSpace *newas;
563
564 /* Target code should have set num_ases before calling us */
565 assert(asidx < cpu->num_ases);
566
567 if (asidx == 0) {
568 /* address space 0 gets the convenience alias */
569 cpu->as = as;
570 }
571
572 /* KVM cannot currently support multiple address spaces. */
573 assert(asidx == 0 || !kvm_enabled());
574
575 if (!cpu->cpu_ases) {
576 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
577 }
578
579 newas = &cpu->cpu_ases[asidx];
580 newas->cpu = cpu;
581 newas->as = as;
582 if (tcg_enabled()) {
583 newas->tcg_as_listener.commit = tcg_commit;
584 memory_listener_register(&newas->tcg_as_listener, as);
585 }
586 }
587
588 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
589 {
590 /* Return the AddressSpace corresponding to the specified index */
591 return cpu->cpu_ases[asidx].as;
592 }
593 #endif
594
595 #ifndef CONFIG_USER_ONLY
596 static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
597
598 static int cpu_get_free_index(Error **errp)
599 {
600 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
601
602 if (cpu >= MAX_CPUMASK_BITS) {
603 error_setg(errp, "Trying to use more CPUs than max of %d",
604 MAX_CPUMASK_BITS);
605 return -1;
606 }
607
608 bitmap_set(cpu_index_map, cpu, 1);
609 return cpu;
610 }
611
612 void cpu_exec_exit(CPUState *cpu)
613 {
614 if (cpu->cpu_index == -1) {
615 /* cpu_index was never allocated by this @cpu or was already freed. */
616 return;
617 }
618
619 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
620 cpu->cpu_index = -1;
621 }
622 #else
623
624 static int cpu_get_free_index(Error **errp)
625 {
626 CPUState *some_cpu;
627 int cpu_index = 0;
628
629 CPU_FOREACH(some_cpu) {
630 cpu_index++;
631 }
632 return cpu_index;
633 }
634
635 void cpu_exec_exit(CPUState *cpu)
636 {
637 }
638 #endif
639
640 void cpu_exec_init(CPUState *cpu, Error **errp)
641 {
642 CPUClass *cc = CPU_GET_CLASS(cpu);
643 int cpu_index;
644 Error *local_err = NULL;
645
646 cpu->as = NULL;
647 cpu->num_ases = 0;
648
649 #ifndef CONFIG_USER_ONLY
650 cpu->thread_id = qemu_get_thread_id();
651
652 /* This is a softmmu CPU object, so create a property for it
653 * so users can wire up its memory. (This can't go in qom/cpu.c
654 * because that file is compiled only once for both user-mode
655 * and system builds.) The default if no link is set up is to use
656 * the system address space.
657 */
658 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
659 (Object **)&cpu->memory,
660 qdev_prop_allow_set_link_before_realize,
661 OBJ_PROP_LINK_UNREF_ON_RELEASE,
662 &error_abort);
663 cpu->memory = system_memory;
664 object_ref(OBJECT(cpu->memory));
665 #endif
666
667 #if defined(CONFIG_USER_ONLY)
668 cpu_list_lock();
669 #endif
670 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
671 if (local_err) {
672 error_propagate(errp, local_err);
673 #if defined(CONFIG_USER_ONLY)
674 cpu_list_unlock();
675 #endif
676 return;
677 }
678 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
679 #if defined(CONFIG_USER_ONLY)
680 cpu_list_unlock();
681 #endif
682 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
683 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
684 }
685 if (cc->vmsd != NULL) {
686 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
687 }
688 }
689
690 #if defined(CONFIG_USER_ONLY)
691 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
692 {
693 tb_invalidate_phys_page_range(pc, pc + 1, 0);
694 }
695 #else
696 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
697 {
698 MemTxAttrs attrs;
699 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
700 int asidx = cpu_asidx_from_attrs(cpu, attrs);
701 if (phys != -1) {
702 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
703 phys | (pc & ~TARGET_PAGE_MASK));
704 }
705 }
706 #endif
707
708 #if defined(CONFIG_USER_ONLY)
709 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
710
711 {
712 }
713
714 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
715 int flags)
716 {
717 return -ENOSYS;
718 }
719
720 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
721 {
722 }
723
724 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
725 int flags, CPUWatchpoint **watchpoint)
726 {
727 return -ENOSYS;
728 }
729 #else
730 /* Add a watchpoint. */
731 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
732 int flags, CPUWatchpoint **watchpoint)
733 {
734 CPUWatchpoint *wp;
735
736 /* forbid ranges which are empty or run off the end of the address space */
737 if (len == 0 || (addr + len - 1) < addr) {
738 error_report("tried to set invalid watchpoint at %"
739 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
740 return -EINVAL;
741 }
742 wp = g_malloc(sizeof(*wp));
743
744 wp->vaddr = addr;
745 wp->len = len;
746 wp->flags = flags;
747
748 /* keep all GDB-injected watchpoints in front */
749 if (flags & BP_GDB) {
750 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
751 } else {
752 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
753 }
754
755 tlb_flush_page(cpu, addr);
756
757 if (watchpoint)
758 *watchpoint = wp;
759 return 0;
760 }
761
762 /* Remove a specific watchpoint. */
763 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
764 int flags)
765 {
766 CPUWatchpoint *wp;
767
768 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
769 if (addr == wp->vaddr && len == wp->len
770 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
771 cpu_watchpoint_remove_by_ref(cpu, wp);
772 return 0;
773 }
774 }
775 return -ENOENT;
776 }
777
778 /* Remove a specific watchpoint by reference. */
779 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
780 {
781 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
782
783 tlb_flush_page(cpu, watchpoint->vaddr);
784
785 g_free(watchpoint);
786 }
787
788 /* Remove all matching watchpoints. */
789 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
790 {
791 CPUWatchpoint *wp, *next;
792
793 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
794 if (wp->flags & mask) {
795 cpu_watchpoint_remove_by_ref(cpu, wp);
796 }
797 }
798 }
799
800 /* Return true if this watchpoint address matches the specified
801 * access (ie the address range covered by the watchpoint overlaps
802 * partially or completely with the address range covered by the
803 * access).
804 */
805 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
806 vaddr addr,
807 vaddr len)
808 {
809 /* We know the lengths are non-zero, but a little caution is
810 * required to avoid errors in the case where the range ends
811 * exactly at the top of the address space and so addr + len
812 * wraps round to zero.
813 */
814 vaddr wpend = wp->vaddr + wp->len - 1;
815 vaddr addrend = addr + len - 1;
816
817 return !(addr > wpend || wp->vaddr > addrend);
818 }
819
820 #endif
821
822 /* Add a breakpoint. */
823 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
824 CPUBreakpoint **breakpoint)
825 {
826 CPUBreakpoint *bp;
827
828 bp = g_malloc(sizeof(*bp));
829
830 bp->pc = pc;
831 bp->flags = flags;
832
833 /* keep all GDB-injected breakpoints in front */
834 if (flags & BP_GDB) {
835 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
836 } else {
837 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
838 }
839
840 breakpoint_invalidate(cpu, pc);
841
842 if (breakpoint) {
843 *breakpoint = bp;
844 }
845 return 0;
846 }
847
848 /* Remove a specific breakpoint. */
849 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
850 {
851 CPUBreakpoint *bp;
852
853 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
854 if (bp->pc == pc && bp->flags == flags) {
855 cpu_breakpoint_remove_by_ref(cpu, bp);
856 return 0;
857 }
858 }
859 return -ENOENT;
860 }
861
862 /* Remove a specific breakpoint by reference. */
863 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
864 {
865 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
866
867 breakpoint_invalidate(cpu, breakpoint->pc);
868
869 g_free(breakpoint);
870 }
871
872 /* Remove all matching breakpoints. */
873 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
874 {
875 CPUBreakpoint *bp, *next;
876
877 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
878 if (bp->flags & mask) {
879 cpu_breakpoint_remove_by_ref(cpu, bp);
880 }
881 }
882 }
883
884 /* enable or disable single step mode. EXCP_DEBUG is returned by the
885 CPU loop after each instruction */
886 void cpu_single_step(CPUState *cpu, int enabled)
887 {
888 if (cpu->singlestep_enabled != enabled) {
889 cpu->singlestep_enabled = enabled;
890 if (kvm_enabled()) {
891 kvm_update_guest_debug(cpu, 0);
892 } else {
893 /* must flush all the translated code to avoid inconsistencies */
894 /* XXX: only flush what is necessary */
895 tb_flush(cpu);
896 }
897 }
898 }
899
900 void cpu_abort(CPUState *cpu, const char *fmt, ...)
901 {
902 va_list ap;
903 va_list ap2;
904
905 va_start(ap, fmt);
906 va_copy(ap2, ap);
907 fprintf(stderr, "qemu: fatal: ");
908 vfprintf(stderr, fmt, ap);
909 fprintf(stderr, "\n");
910 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
911 if (qemu_log_separate()) {
912 qemu_log("qemu: fatal: ");
913 qemu_log_vprintf(fmt, ap2);
914 qemu_log("\n");
915 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
916 qemu_log_flush();
917 qemu_log_close();
918 }
919 va_end(ap2);
920 va_end(ap);
921 replay_finish();
922 #if defined(CONFIG_USER_ONLY)
923 {
924 struct sigaction act;
925 sigfillset(&act.sa_mask);
926 act.sa_handler = SIG_DFL;
927 sigaction(SIGABRT, &act, NULL);
928 }
929 #endif
930 abort();
931 }
932
933 #if !defined(CONFIG_USER_ONLY)
934 /* Called from RCU critical section */
935 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
936 {
937 RAMBlock *block;
938
939 block = atomic_rcu_read(&ram_list.mru_block);
940 if (block && addr - block->offset < block->max_length) {
941 return block;
942 }
943 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
944 if (addr - block->offset < block->max_length) {
945 goto found;
946 }
947 }
948
949 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
950 abort();
951
952 found:
953 /* It is safe to write mru_block outside the iothread lock. This
954 * is what happens:
955 *
956 * mru_block = xxx
957 * rcu_read_unlock()
958 * xxx removed from list
959 * rcu_read_lock()
960 * read mru_block
961 * mru_block = NULL;
962 * call_rcu(reclaim_ramblock, xxx);
963 * rcu_read_unlock()
964 *
965 * atomic_rcu_set is not needed here. The block was already published
966 * when it was placed into the list. Here we're just making an extra
967 * copy of the pointer.
968 */
969 ram_list.mru_block = block;
970 return block;
971 }
972
973 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
974 {
975 CPUState *cpu;
976 ram_addr_t start1;
977 RAMBlock *block;
978 ram_addr_t end;
979
980 end = TARGET_PAGE_ALIGN(start + length);
981 start &= TARGET_PAGE_MASK;
982
983 rcu_read_lock();
984 block = qemu_get_ram_block(start);
985 assert(block == qemu_get_ram_block(end - 1));
986 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
987 CPU_FOREACH(cpu) {
988 tlb_reset_dirty(cpu, start1, length);
989 }
990 rcu_read_unlock();
991 }
992
993 /* Note: start and end must be within the same ram block. */
994 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
995 ram_addr_t length,
996 unsigned client)
997 {
998 DirtyMemoryBlocks *blocks;
999 unsigned long end, page;
1000 bool dirty = false;
1001
1002 if (length == 0) {
1003 return false;
1004 }
1005
1006 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1007 page = start >> TARGET_PAGE_BITS;
1008
1009 rcu_read_lock();
1010
1011 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1012
1013 while (page < end) {
1014 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1015 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1016 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1017
1018 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1019 offset, num);
1020 page += num;
1021 }
1022
1023 rcu_read_unlock();
1024
1025 if (dirty && tcg_enabled()) {
1026 tlb_reset_dirty_range_all(start, length);
1027 }
1028
1029 return dirty;
1030 }
1031
1032 /* Called from RCU critical section */
1033 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
1034 MemoryRegionSection *section,
1035 target_ulong vaddr,
1036 hwaddr paddr, hwaddr xlat,
1037 int prot,
1038 target_ulong *address)
1039 {
1040 hwaddr iotlb;
1041 CPUWatchpoint *wp;
1042
1043 if (memory_region_is_ram(section->mr)) {
1044 /* Normal RAM. */
1045 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1046 + xlat;
1047 if (!section->readonly) {
1048 iotlb |= PHYS_SECTION_NOTDIRTY;
1049 } else {
1050 iotlb |= PHYS_SECTION_ROM;
1051 }
1052 } else {
1053 AddressSpaceDispatch *d;
1054
1055 d = atomic_rcu_read(&section->address_space->dispatch);
1056 iotlb = section - d->map.sections;
1057 iotlb += xlat;
1058 }
1059
1060 /* Make accesses to pages with watchpoints go via the
1061 watchpoint trap routines. */
1062 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1063 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1064 /* Avoid trapping reads of pages with a write breakpoint. */
1065 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1066 iotlb = PHYS_SECTION_WATCH + paddr;
1067 *address |= TLB_MMIO;
1068 break;
1069 }
1070 }
1071 }
1072
1073 return iotlb;
1074 }
1075 #endif /* defined(CONFIG_USER_ONLY) */
1076
1077 #if !defined(CONFIG_USER_ONLY)
1078
1079 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1080 uint16_t section);
1081 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
1082
1083 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1084 qemu_anon_ram_alloc;
1085
1086 /*
1087 * Set a custom physical guest memory alloator.
1088 * Accelerators with unusual needs may need this. Hopefully, we can
1089 * get rid of it eventually.
1090 */
1091 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
1092 {
1093 phys_mem_alloc = alloc;
1094 }
1095
1096 static uint16_t phys_section_add(PhysPageMap *map,
1097 MemoryRegionSection *section)
1098 {
1099 /* The physical section number is ORed with a page-aligned
1100 * pointer to produce the iotlb entries. Thus it should
1101 * never overflow into the page-aligned value.
1102 */
1103 assert(map->sections_nb < TARGET_PAGE_SIZE);
1104
1105 if (map->sections_nb == map->sections_nb_alloc) {
1106 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1107 map->sections = g_renew(MemoryRegionSection, map->sections,
1108 map->sections_nb_alloc);
1109 }
1110 map->sections[map->sections_nb] = *section;
1111 memory_region_ref(section->mr);
1112 return map->sections_nb++;
1113 }
1114
1115 static void phys_section_destroy(MemoryRegion *mr)
1116 {
1117 bool have_sub_page = mr->subpage;
1118
1119 memory_region_unref(mr);
1120
1121 if (have_sub_page) {
1122 subpage_t *subpage = container_of(mr, subpage_t, iomem);
1123 object_unref(OBJECT(&subpage->iomem));
1124 g_free(subpage);
1125 }
1126 }
1127
1128 static void phys_sections_free(PhysPageMap *map)
1129 {
1130 while (map->sections_nb > 0) {
1131 MemoryRegionSection *section = &map->sections[--map->sections_nb];
1132 phys_section_destroy(section->mr);
1133 }
1134 g_free(map->sections);
1135 g_free(map->nodes);
1136 }
1137
1138 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
1139 {
1140 subpage_t *subpage;
1141 hwaddr base = section->offset_within_address_space
1142 & TARGET_PAGE_MASK;
1143 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
1144 d->map.nodes, d->map.sections);
1145 MemoryRegionSection subsection = {
1146 .offset_within_address_space = base,
1147 .size = int128_make64(TARGET_PAGE_SIZE),
1148 };
1149 hwaddr start, end;
1150
1151 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1152
1153 if (!(existing->mr->subpage)) {
1154 subpage = subpage_init(d->as, base);
1155 subsection.address_space = d->as;
1156 subsection.mr = &subpage->iomem;
1157 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1158 phys_section_add(&d->map, &subsection));
1159 } else {
1160 subpage = container_of(existing->mr, subpage_t, iomem);
1161 }
1162 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1163 end = start + int128_get64(section->size) - 1;
1164 subpage_register(subpage, start, end,
1165 phys_section_add(&d->map, section));
1166 }
1167
1168
1169 static void register_multipage(AddressSpaceDispatch *d,
1170 MemoryRegionSection *section)
1171 {
1172 hwaddr start_addr = section->offset_within_address_space;
1173 uint16_t section_index = phys_section_add(&d->map, section);
1174 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1175 TARGET_PAGE_BITS));
1176
1177 assert(num_pages);
1178 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1179 }
1180
1181 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
1182 {
1183 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1184 AddressSpaceDispatch *d = as->next_dispatch;
1185 MemoryRegionSection now = *section, remain = *section;
1186 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1187
1188 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1189 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1190 - now.offset_within_address_space;
1191
1192 now.size = int128_min(int128_make64(left), now.size);
1193 register_subpage(d, &now);
1194 } else {
1195 now.size = int128_zero();
1196 }
1197 while (int128_ne(remain.size, now.size)) {
1198 remain.size = int128_sub(remain.size, now.size);
1199 remain.offset_within_address_space += int128_get64(now.size);
1200 remain.offset_within_region += int128_get64(now.size);
1201 now = remain;
1202 if (int128_lt(remain.size, page_size)) {
1203 register_subpage(d, &now);
1204 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1205 now.size = page_size;
1206 register_subpage(d, &now);
1207 } else {
1208 now.size = int128_and(now.size, int128_neg(page_size));
1209 register_multipage(d, &now);
1210 }
1211 }
1212 }
1213
1214 void qemu_flush_coalesced_mmio_buffer(void)
1215 {
1216 if (kvm_enabled())
1217 kvm_flush_coalesced_mmio_buffer();
1218 }
1219
1220 void qemu_mutex_lock_ramlist(void)
1221 {
1222 qemu_mutex_lock(&ram_list.mutex);
1223 }
1224
1225 void qemu_mutex_unlock_ramlist(void)
1226 {
1227 qemu_mutex_unlock(&ram_list.mutex);
1228 }
1229
1230 #ifdef __linux__
1231 static void *file_ram_alloc(RAMBlock *block,
1232 ram_addr_t memory,
1233 const char *path,
1234 Error **errp)
1235 {
1236 bool unlink_on_error = false;
1237 char *filename;
1238 char *sanitized_name;
1239 char *c;
1240 void *area;
1241 int fd;
1242 int64_t page_size;
1243
1244 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1245 error_setg(errp,
1246 "host lacks kvm mmu notifiers, -mem-path unsupported");
1247 return NULL;
1248 }
1249
1250 for (;;) {
1251 fd = open(path, O_RDWR);
1252 if (fd >= 0) {
1253 /* @path names an existing file, use it */
1254 break;
1255 }
1256 if (errno == ENOENT) {
1257 /* @path names a file that doesn't exist, create it */
1258 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1259 if (fd >= 0) {
1260 unlink_on_error = true;
1261 break;
1262 }
1263 } else if (errno == EISDIR) {
1264 /* @path names a directory, create a file there */
1265 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1266 sanitized_name = g_strdup(memory_region_name(block->mr));
1267 for (c = sanitized_name; *c != '\0'; c++) {
1268 if (*c == '/') {
1269 *c = '_';
1270 }
1271 }
1272
1273 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1274 sanitized_name);
1275 g_free(sanitized_name);
1276
1277 fd = mkstemp(filename);
1278 if (fd >= 0) {
1279 unlink(filename);
1280 g_free(filename);
1281 break;
1282 }
1283 g_free(filename);
1284 }
1285 if (errno != EEXIST && errno != EINTR) {
1286 error_setg_errno(errp, errno,
1287 "can't open backing store %s for guest RAM",
1288 path);
1289 goto error;
1290 }
1291 /*
1292 * Try again on EINTR and EEXIST. The latter happens when
1293 * something else creates the file between our two open().
1294 */
1295 }
1296
1297 page_size = qemu_fd_getpagesize(fd);
1298 block->mr->align = page_size;
1299
1300 if (memory < page_size) {
1301 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1302 "or larger than page size 0x%" PRIx64,
1303 memory, page_size);
1304 goto error;
1305 }
1306
1307 memory = ROUND_UP(memory, page_size);
1308
1309 /*
1310 * ftruncate is not supported by hugetlbfs in older
1311 * hosts, so don't bother bailing out on errors.
1312 * If anything goes wrong with it under other filesystems,
1313 * mmap will fail.
1314 */
1315 if (ftruncate(fd, memory)) {
1316 perror("ftruncate");
1317 }
1318
1319 area = qemu_ram_mmap(fd, memory, page_size, block->flags & RAM_SHARED);
1320 if (area == MAP_FAILED) {
1321 error_setg_errno(errp, errno,
1322 "unable to map backing store for guest RAM");
1323 close(fd);
1324 goto error;
1325 }
1326
1327 if (mem_prealloc) {
1328 os_mem_prealloc(fd, area, memory);
1329 }
1330
1331 block->fd = fd;
1332 return area;
1333
1334 error:
1335 if (unlink_on_error) {
1336 unlink(path);
1337 }
1338 close(fd);
1339 return NULL;
1340 }
1341 #endif
1342
1343 /* Called with the ramlist lock held. */
1344 static ram_addr_t find_ram_offset(ram_addr_t size)
1345 {
1346 RAMBlock *block, *next_block;
1347 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1348
1349 assert(size != 0); /* it would hand out same offset multiple times */
1350
1351 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1352 return 0;
1353 }
1354
1355 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1356 ram_addr_t end, next = RAM_ADDR_MAX;
1357
1358 end = block->offset + block->max_length;
1359
1360 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
1361 if (next_block->offset >= end) {
1362 next = MIN(next, next_block->offset);
1363 }
1364 }
1365 if (next - end >= size && next - end < mingap) {
1366 offset = end;
1367 mingap = next - end;
1368 }
1369 }
1370
1371 if (offset == RAM_ADDR_MAX) {
1372 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1373 (uint64_t)size);
1374 abort();
1375 }
1376
1377 return offset;
1378 }
1379
1380 ram_addr_t last_ram_offset(void)
1381 {
1382 RAMBlock *block;
1383 ram_addr_t last = 0;
1384
1385 rcu_read_lock();
1386 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1387 last = MAX(last, block->offset + block->max_length);
1388 }
1389 rcu_read_unlock();
1390 return last;
1391 }
1392
1393 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1394 {
1395 int ret;
1396
1397 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1398 if (!machine_dump_guest_core(current_machine)) {
1399 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1400 if (ret) {
1401 perror("qemu_madvise");
1402 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1403 "but dump_guest_core=off specified\n");
1404 }
1405 }
1406 }
1407
1408 /* Called within an RCU critical section, or while the ramlist lock
1409 * is held.
1410 */
1411 static RAMBlock *find_ram_block(ram_addr_t addr)
1412 {
1413 RAMBlock *block;
1414
1415 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1416 if (block->offset == addr) {
1417 return block;
1418 }
1419 }
1420
1421 return NULL;
1422 }
1423
1424 const char *qemu_ram_get_idstr(RAMBlock *rb)
1425 {
1426 return rb->idstr;
1427 }
1428
1429 /* Called with iothread lock held. */
1430 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1431 {
1432 RAMBlock *new_block, *block;
1433
1434 rcu_read_lock();
1435 new_block = find_ram_block(addr);
1436 assert(new_block);
1437 assert(!new_block->idstr[0]);
1438
1439 if (dev) {
1440 char *id = qdev_get_dev_path(dev);
1441 if (id) {
1442 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1443 g_free(id);
1444 }
1445 }
1446 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1447
1448 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1449 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1450 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1451 new_block->idstr);
1452 abort();
1453 }
1454 }
1455 rcu_read_unlock();
1456 }
1457
1458 /* Called with iothread lock held. */
1459 void qemu_ram_unset_idstr(ram_addr_t addr)
1460 {
1461 RAMBlock *block;
1462
1463 /* FIXME: arch_init.c assumes that this is not called throughout
1464 * migration. Ignore the problem since hot-unplug during migration
1465 * does not work anyway.
1466 */
1467
1468 rcu_read_lock();
1469 block = find_ram_block(addr);
1470 if (block) {
1471 memset(block->idstr, 0, sizeof(block->idstr));
1472 }
1473 rcu_read_unlock();
1474 }
1475
1476 static int memory_try_enable_merging(void *addr, size_t len)
1477 {
1478 if (!machine_mem_merge(current_machine)) {
1479 /* disabled by the user */
1480 return 0;
1481 }
1482
1483 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1484 }
1485
1486 /* Only legal before guest might have detected the memory size: e.g. on
1487 * incoming migration, or right after reset.
1488 *
1489 * As memory core doesn't know how is memory accessed, it is up to
1490 * resize callback to update device state and/or add assertions to detect
1491 * misuse, if necessary.
1492 */
1493 int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1494 {
1495 RAMBlock *block = find_ram_block(base);
1496
1497 assert(block);
1498
1499 newsize = HOST_PAGE_ALIGN(newsize);
1500
1501 if (block->used_length == newsize) {
1502 return 0;
1503 }
1504
1505 if (!(block->flags & RAM_RESIZEABLE)) {
1506 error_setg_errno(errp, EINVAL,
1507 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1508 " in != 0x" RAM_ADDR_FMT, block->idstr,
1509 newsize, block->used_length);
1510 return -EINVAL;
1511 }
1512
1513 if (block->max_length < newsize) {
1514 error_setg_errno(errp, EINVAL,
1515 "Length too large: %s: 0x" RAM_ADDR_FMT
1516 " > 0x" RAM_ADDR_FMT, block->idstr,
1517 newsize, block->max_length);
1518 return -EINVAL;
1519 }
1520
1521 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1522 block->used_length = newsize;
1523 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1524 DIRTY_CLIENTS_ALL);
1525 memory_region_set_size(block->mr, newsize);
1526 if (block->resized) {
1527 block->resized(block->idstr, newsize, block->host);
1528 }
1529 return 0;
1530 }
1531
1532 /* Called with ram_list.mutex held */
1533 static void dirty_memory_extend(ram_addr_t old_ram_size,
1534 ram_addr_t new_ram_size)
1535 {
1536 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1537 DIRTY_MEMORY_BLOCK_SIZE);
1538 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1539 DIRTY_MEMORY_BLOCK_SIZE);
1540 int i;
1541
1542 /* Only need to extend if block count increased */
1543 if (new_num_blocks <= old_num_blocks) {
1544 return;
1545 }
1546
1547 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1548 DirtyMemoryBlocks *old_blocks;
1549 DirtyMemoryBlocks *new_blocks;
1550 int j;
1551
1552 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1553 new_blocks = g_malloc(sizeof(*new_blocks) +
1554 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1555
1556 if (old_num_blocks) {
1557 memcpy(new_blocks->blocks, old_blocks->blocks,
1558 old_num_blocks * sizeof(old_blocks->blocks[0]));
1559 }
1560
1561 for (j = old_num_blocks; j < new_num_blocks; j++) {
1562 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1563 }
1564
1565 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1566
1567 if (old_blocks) {
1568 g_free_rcu(old_blocks, rcu);
1569 }
1570 }
1571 }
1572
1573 static void ram_block_add(RAMBlock *new_block, Error **errp)
1574 {
1575 RAMBlock *block;
1576 RAMBlock *last_block = NULL;
1577 ram_addr_t old_ram_size, new_ram_size;
1578 Error *err = NULL;
1579
1580 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1581
1582 qemu_mutex_lock_ramlist();
1583 new_block->offset = find_ram_offset(new_block->max_length);
1584
1585 if (!new_block->host) {
1586 if (xen_enabled()) {
1587 xen_ram_alloc(new_block->offset, new_block->max_length,
1588 new_block->mr, &err);
1589 if (err) {
1590 error_propagate(errp, err);
1591 qemu_mutex_unlock_ramlist();
1592 return;
1593 }
1594 } else {
1595 new_block->host = phys_mem_alloc(new_block->max_length,
1596 &new_block->mr->align);
1597 if (!new_block->host) {
1598 error_setg_errno(errp, errno,
1599 "cannot set up guest memory '%s'",
1600 memory_region_name(new_block->mr));
1601 qemu_mutex_unlock_ramlist();
1602 return;
1603 }
1604 memory_try_enable_merging(new_block->host, new_block->max_length);
1605 }
1606 }
1607
1608 new_ram_size = MAX(old_ram_size,
1609 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1610 if (new_ram_size > old_ram_size) {
1611 migration_bitmap_extend(old_ram_size, new_ram_size);
1612 dirty_memory_extend(old_ram_size, new_ram_size);
1613 }
1614 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1615 * QLIST (which has an RCU-friendly variant) does not have insertion at
1616 * tail, so save the last element in last_block.
1617 */
1618 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1619 last_block = block;
1620 if (block->max_length < new_block->max_length) {
1621 break;
1622 }
1623 }
1624 if (block) {
1625 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1626 } else if (last_block) {
1627 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1628 } else { /* list is empty */
1629 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1630 }
1631 ram_list.mru_block = NULL;
1632
1633 /* Write list before version */
1634 smp_wmb();
1635 ram_list.version++;
1636 qemu_mutex_unlock_ramlist();
1637
1638 cpu_physical_memory_set_dirty_range(new_block->offset,
1639 new_block->used_length,
1640 DIRTY_CLIENTS_ALL);
1641
1642 if (new_block->host) {
1643 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1644 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1645 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1646 if (kvm_enabled()) {
1647 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1648 }
1649 }
1650 }
1651
1652 #ifdef __linux__
1653 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1654 bool share, const char *mem_path,
1655 Error **errp)
1656 {
1657 RAMBlock *new_block;
1658 Error *local_err = NULL;
1659
1660 if (xen_enabled()) {
1661 error_setg(errp, "-mem-path not supported with Xen");
1662 return NULL;
1663 }
1664
1665 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1666 /*
1667 * file_ram_alloc() needs to allocate just like
1668 * phys_mem_alloc, but we haven't bothered to provide
1669 * a hook there.
1670 */
1671 error_setg(errp,
1672 "-mem-path not supported with this accelerator");
1673 return NULL;
1674 }
1675
1676 size = HOST_PAGE_ALIGN(size);
1677 new_block = g_malloc0(sizeof(*new_block));
1678 new_block->mr = mr;
1679 new_block->used_length = size;
1680 new_block->max_length = size;
1681 new_block->flags = share ? RAM_SHARED : 0;
1682 new_block->host = file_ram_alloc(new_block, size,
1683 mem_path, errp);
1684 if (!new_block->host) {
1685 g_free(new_block);
1686 return NULL;
1687 }
1688
1689 ram_block_add(new_block, &local_err);
1690 if (local_err) {
1691 g_free(new_block);
1692 error_propagate(errp, local_err);
1693 return NULL;
1694 }
1695 return new_block;
1696 }
1697 #endif
1698
1699 static
1700 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1701 void (*resized)(const char*,
1702 uint64_t length,
1703 void *host),
1704 void *host, bool resizeable,
1705 MemoryRegion *mr, Error **errp)
1706 {
1707 RAMBlock *new_block;
1708 Error *local_err = NULL;
1709
1710 size = HOST_PAGE_ALIGN(size);
1711 max_size = HOST_PAGE_ALIGN(max_size);
1712 new_block = g_malloc0(sizeof(*new_block));
1713 new_block->mr = mr;
1714 new_block->resized = resized;
1715 new_block->used_length = size;
1716 new_block->max_length = max_size;
1717 assert(max_size >= size);
1718 new_block->fd = -1;
1719 new_block->host = host;
1720 if (host) {
1721 new_block->flags |= RAM_PREALLOC;
1722 }
1723 if (resizeable) {
1724 new_block->flags |= RAM_RESIZEABLE;
1725 }
1726 ram_block_add(new_block, &local_err);
1727 if (local_err) {
1728 g_free(new_block);
1729 error_propagate(errp, local_err);
1730 return NULL;
1731 }
1732 return new_block;
1733 }
1734
1735 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1736 MemoryRegion *mr, Error **errp)
1737 {
1738 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1739 }
1740
1741 RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
1742 {
1743 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1744 }
1745
1746 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1747 void (*resized)(const char*,
1748 uint64_t length,
1749 void *host),
1750 MemoryRegion *mr, Error **errp)
1751 {
1752 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
1753 }
1754
1755 static void reclaim_ramblock(RAMBlock *block)
1756 {
1757 if (block->flags & RAM_PREALLOC) {
1758 ;
1759 } else if (xen_enabled()) {
1760 xen_invalidate_map_cache_entry(block->host);
1761 #ifndef _WIN32
1762 } else if (block->fd >= 0) {
1763 qemu_ram_munmap(block->host, block->max_length);
1764 close(block->fd);
1765 #endif
1766 } else {
1767 qemu_anon_ram_free(block->host, block->max_length);
1768 }
1769 g_free(block);
1770 }
1771
1772 void qemu_ram_free(RAMBlock *block)
1773 {
1774 qemu_mutex_lock_ramlist();
1775 QLIST_REMOVE_RCU(block, next);
1776 ram_list.mru_block = NULL;
1777 /* Write list before version */
1778 smp_wmb();
1779 ram_list.version++;
1780 call_rcu(block, reclaim_ramblock, rcu);
1781 qemu_mutex_unlock_ramlist();
1782 }
1783
1784 #ifndef _WIN32
1785 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1786 {
1787 RAMBlock *block;
1788 ram_addr_t offset;
1789 int flags;
1790 void *area, *vaddr;
1791
1792 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1793 offset = addr - block->offset;
1794 if (offset < block->max_length) {
1795 vaddr = ramblock_ptr(block, offset);
1796 if (block->flags & RAM_PREALLOC) {
1797 ;
1798 } else if (xen_enabled()) {
1799 abort();
1800 } else {
1801 flags = MAP_FIXED;
1802 if (block->fd >= 0) {
1803 flags |= (block->flags & RAM_SHARED ?
1804 MAP_SHARED : MAP_PRIVATE);
1805 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1806 flags, block->fd, offset);
1807 } else {
1808 /*
1809 * Remap needs to match alloc. Accelerators that
1810 * set phys_mem_alloc never remap. If they did,
1811 * we'd need a remap hook here.
1812 */
1813 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1814
1815 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1816 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1817 flags, -1, 0);
1818 }
1819 if (area != vaddr) {
1820 fprintf(stderr, "Could not remap addr: "
1821 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1822 length, addr);
1823 exit(1);
1824 }
1825 memory_try_enable_merging(vaddr, length);
1826 qemu_ram_setup_dump(vaddr, length);
1827 }
1828 }
1829 }
1830 }
1831 #endif /* !_WIN32 */
1832
1833 int qemu_get_ram_fd(ram_addr_t addr)
1834 {
1835 RAMBlock *block;
1836 int fd;
1837
1838 rcu_read_lock();
1839 block = qemu_get_ram_block(addr);
1840 fd = block->fd;
1841 rcu_read_unlock();
1842 return fd;
1843 }
1844
1845 void qemu_set_ram_fd(ram_addr_t addr, int fd)
1846 {
1847 RAMBlock *block;
1848
1849 rcu_read_lock();
1850 block = qemu_get_ram_block(addr);
1851 block->fd = fd;
1852 rcu_read_unlock();
1853 }
1854
1855 void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1856 {
1857 RAMBlock *block;
1858 void *ptr;
1859
1860 rcu_read_lock();
1861 block = qemu_get_ram_block(addr);
1862 ptr = ramblock_ptr(block, 0);
1863 rcu_read_unlock();
1864 return ptr;
1865 }
1866
1867 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1868 * This should not be used for general purpose DMA. Use address_space_map
1869 * or address_space_rw instead. For local memory (e.g. video ram) that the
1870 * device owns, use memory_region_get_ram_ptr.
1871 *
1872 * Called within RCU critical section.
1873 */
1874 void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
1875 {
1876 RAMBlock *block = ram_block;
1877
1878 if (block == NULL) {
1879 block = qemu_get_ram_block(addr);
1880 }
1881
1882 if (xen_enabled() && block->host == NULL) {
1883 /* We need to check if the requested address is in the RAM
1884 * because we don't want to map the entire memory in QEMU.
1885 * In that case just map until the end of the page.
1886 */
1887 if (block->offset == 0) {
1888 return xen_map_cache(addr, 0, 0);
1889 }
1890
1891 block->host = xen_map_cache(block->offset, block->max_length, 1);
1892 }
1893 return ramblock_ptr(block, addr - block->offset);
1894 }
1895
1896 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1897 * but takes a size argument.
1898 *
1899 * Called within RCU critical section.
1900 */
1901 static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1902 hwaddr *size)
1903 {
1904 RAMBlock *block = ram_block;
1905 ram_addr_t offset_inside_block;
1906 if (*size == 0) {
1907 return NULL;
1908 }
1909
1910 if (block == NULL) {
1911 block = qemu_get_ram_block(addr);
1912 }
1913 offset_inside_block = addr - block->offset;
1914 *size = MIN(*size, block->max_length - offset_inside_block);
1915
1916 if (xen_enabled() && block->host == NULL) {
1917 /* We need to check if the requested address is in the RAM
1918 * because we don't want to map the entire memory in QEMU.
1919 * In that case just map the requested area.
1920 */
1921 if (block->offset == 0) {
1922 return xen_map_cache(addr, *size, 1);
1923 }
1924
1925 block->host = xen_map_cache(block->offset, block->max_length, 1);
1926 }
1927
1928 return ramblock_ptr(block, offset_inside_block);
1929 }
1930
1931 /*
1932 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1933 * in that RAMBlock.
1934 *
1935 * ptr: Host pointer to look up
1936 * round_offset: If true round the result offset down to a page boundary
1937 * *ram_addr: set to result ram_addr
1938 * *offset: set to result offset within the RAMBlock
1939 *
1940 * Returns: RAMBlock (or NULL if not found)
1941 *
1942 * By the time this function returns, the returned pointer is not protected
1943 * by RCU anymore. If the caller is not within an RCU critical section and
1944 * does not hold the iothread lock, it must have other means of protecting the
1945 * pointer, such as a reference to the region that includes the incoming
1946 * ram_addr_t.
1947 */
1948 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1949 ram_addr_t *ram_addr,
1950 ram_addr_t *offset)
1951 {
1952 RAMBlock *block;
1953 uint8_t *host = ptr;
1954
1955 if (xen_enabled()) {
1956 rcu_read_lock();
1957 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1958 block = qemu_get_ram_block(*ram_addr);
1959 if (block) {
1960 *offset = (host - block->host);
1961 }
1962 rcu_read_unlock();
1963 return block;
1964 }
1965
1966 rcu_read_lock();
1967 block = atomic_rcu_read(&ram_list.mru_block);
1968 if (block && block->host && host - block->host < block->max_length) {
1969 goto found;
1970 }
1971
1972 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1973 /* This case append when the block is not mapped. */
1974 if (block->host == NULL) {
1975 continue;
1976 }
1977 if (host - block->host < block->max_length) {
1978 goto found;
1979 }
1980 }
1981
1982 rcu_read_unlock();
1983 return NULL;
1984
1985 found:
1986 *offset = (host - block->host);
1987 if (round_offset) {
1988 *offset &= TARGET_PAGE_MASK;
1989 }
1990 *ram_addr = block->offset + *offset;
1991 rcu_read_unlock();
1992 return block;
1993 }
1994
1995 /*
1996 * Finds the named RAMBlock
1997 *
1998 * name: The name of RAMBlock to find
1999 *
2000 * Returns: RAMBlock (or NULL if not found)
2001 */
2002 RAMBlock *qemu_ram_block_by_name(const char *name)
2003 {
2004 RAMBlock *block;
2005
2006 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2007 if (!strcmp(name, block->idstr)) {
2008 return block;
2009 }
2010 }
2011
2012 return NULL;
2013 }
2014
2015 /* Some of the softmmu routines need to translate from a host pointer
2016 (typically a TLB entry) back to a ram offset. */
2017 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2018 {
2019 RAMBlock *block;
2020 ram_addr_t offset; /* Not used */
2021
2022 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
2023
2024 if (!block) {
2025 return NULL;
2026 }
2027
2028 return block->mr;
2029 }
2030
2031 /* Called within RCU critical section. */
2032 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
2033 uint64_t val, unsigned size)
2034 {
2035 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
2036 tb_invalidate_phys_page_fast(ram_addr, size);
2037 }
2038 switch (size) {
2039 case 1:
2040 stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
2041 break;
2042 case 2:
2043 stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
2044 break;
2045 case 4:
2046 stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
2047 break;
2048 default:
2049 abort();
2050 }
2051 /* Set both VGA and migration bits for simplicity and to remove
2052 * the notdirty callback faster.
2053 */
2054 cpu_physical_memory_set_dirty_range(ram_addr, size,
2055 DIRTY_CLIENTS_NOCODE);
2056 /* we remove the notdirty callback only if the code has been
2057 flushed */
2058 if (!cpu_physical_memory_is_clean(ram_addr)) {
2059 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
2060 }
2061 }
2062
2063 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2064 unsigned size, bool is_write)
2065 {
2066 return is_write;
2067 }
2068
2069 static const MemoryRegionOps notdirty_mem_ops = {
2070 .write = notdirty_mem_write,
2071 .valid.accepts = notdirty_mem_accepts,
2072 .endianness = DEVICE_NATIVE_ENDIAN,
2073 };
2074
2075 /* Generate a debug exception if a watchpoint has been hit. */
2076 static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
2077 {
2078 CPUState *cpu = current_cpu;
2079 CPUClass *cc = CPU_GET_CLASS(cpu);
2080 CPUArchState *env = cpu->env_ptr;
2081 target_ulong pc, cs_base;
2082 target_ulong vaddr;
2083 CPUWatchpoint *wp;
2084 int cpu_flags;
2085
2086 if (cpu->watchpoint_hit) {
2087 /* We re-entered the check after replacing the TB. Now raise
2088 * the debug interrupt so that is will trigger after the
2089 * current instruction. */
2090 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
2091 return;
2092 }
2093 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2094 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
2095 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2096 && (wp->flags & flags)) {
2097 if (flags == BP_MEM_READ) {
2098 wp->flags |= BP_WATCHPOINT_HIT_READ;
2099 } else {
2100 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2101 }
2102 wp->hitaddr = vaddr;
2103 wp->hitattrs = attrs;
2104 if (!cpu->watchpoint_hit) {
2105 if (wp->flags & BP_CPU &&
2106 !cc->debug_check_watchpoint(cpu, wp)) {
2107 wp->flags &= ~BP_WATCHPOINT_HIT;
2108 continue;
2109 }
2110 cpu->watchpoint_hit = wp;
2111 tb_check_watchpoint(cpu);
2112 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2113 cpu->exception_index = EXCP_DEBUG;
2114 cpu_loop_exit(cpu);
2115 } else {
2116 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2117 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
2118 cpu_resume_from_signal(cpu, NULL);
2119 }
2120 }
2121 } else {
2122 wp->flags &= ~BP_WATCHPOINT_HIT;
2123 }
2124 }
2125 }
2126
2127 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2128 so these check for a hit then pass through to the normal out-of-line
2129 phys routines. */
2130 static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2131 unsigned size, MemTxAttrs attrs)
2132 {
2133 MemTxResult res;
2134 uint64_t data;
2135 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2136 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2137
2138 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
2139 switch (size) {
2140 case 1:
2141 data = address_space_ldub(as, addr, attrs, &res);
2142 break;
2143 case 2:
2144 data = address_space_lduw(as, addr, attrs, &res);
2145 break;
2146 case 4:
2147 data = address_space_ldl(as, addr, attrs, &res);
2148 break;
2149 default: abort();
2150 }
2151 *pdata = data;
2152 return res;
2153 }
2154
2155 static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2156 uint64_t val, unsigned size,
2157 MemTxAttrs attrs)
2158 {
2159 MemTxResult res;
2160 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2161 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2162
2163 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2164 switch (size) {
2165 case 1:
2166 address_space_stb(as, addr, val, attrs, &res);
2167 break;
2168 case 2:
2169 address_space_stw(as, addr, val, attrs, &res);
2170 break;
2171 case 4:
2172 address_space_stl(as, addr, val, attrs, &res);
2173 break;
2174 default: abort();
2175 }
2176 return res;
2177 }
2178
2179 static const MemoryRegionOps watch_mem_ops = {
2180 .read_with_attrs = watch_mem_read,
2181 .write_with_attrs = watch_mem_write,
2182 .endianness = DEVICE_NATIVE_ENDIAN,
2183 };
2184
2185 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2186 unsigned len, MemTxAttrs attrs)
2187 {
2188 subpage_t *subpage = opaque;
2189 uint8_t buf[8];
2190 MemTxResult res;
2191
2192 #if defined(DEBUG_SUBPAGE)
2193 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2194 subpage, len, addr);
2195 #endif
2196 res = address_space_read(subpage->as, addr + subpage->base,
2197 attrs, buf, len);
2198 if (res) {
2199 return res;
2200 }
2201 switch (len) {
2202 case 1:
2203 *data = ldub_p(buf);
2204 return MEMTX_OK;
2205 case 2:
2206 *data = lduw_p(buf);
2207 return MEMTX_OK;
2208 case 4:
2209 *data = ldl_p(buf);
2210 return MEMTX_OK;
2211 case 8:
2212 *data = ldq_p(buf);
2213 return MEMTX_OK;
2214 default:
2215 abort();
2216 }
2217 }
2218
2219 static MemTxResult subpage_write(void *opaque, hwaddr addr,
2220 uint64_t value, unsigned len, MemTxAttrs attrs)
2221 {
2222 subpage_t *subpage = opaque;
2223 uint8_t buf[8];
2224
2225 #if defined(DEBUG_SUBPAGE)
2226 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2227 " value %"PRIx64"\n",
2228 __func__, subpage, len, addr, value);
2229 #endif
2230 switch (len) {
2231 case 1:
2232 stb_p(buf, value);
2233 break;
2234 case 2:
2235 stw_p(buf, value);
2236 break;
2237 case 4:
2238 stl_p(buf, value);
2239 break;
2240 case 8:
2241 stq_p(buf, value);
2242 break;
2243 default:
2244 abort();
2245 }
2246 return address_space_write(subpage->as, addr + subpage->base,
2247 attrs, buf, len);
2248 }
2249
2250 static bool subpage_accepts(void *opaque, hwaddr addr,
2251 unsigned len, bool is_write)
2252 {
2253 subpage_t *subpage = opaque;
2254 #if defined(DEBUG_SUBPAGE)
2255 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2256 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2257 #endif
2258
2259 return address_space_access_valid(subpage->as, addr + subpage->base,
2260 len, is_write);
2261 }
2262
2263 static const MemoryRegionOps subpage_ops = {
2264 .read_with_attrs = subpage_read,
2265 .write_with_attrs = subpage_write,
2266 .impl.min_access_size = 1,
2267 .impl.max_access_size = 8,
2268 .valid.min_access_size = 1,
2269 .valid.max_access_size = 8,
2270 .valid.accepts = subpage_accepts,
2271 .endianness = DEVICE_NATIVE_ENDIAN,
2272 };
2273
2274 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2275 uint16_t section)
2276 {
2277 int idx, eidx;
2278
2279 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2280 return -1;
2281 idx = SUBPAGE_IDX(start);
2282 eidx = SUBPAGE_IDX(end);
2283 #if defined(DEBUG_SUBPAGE)
2284 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2285 __func__, mmio, start, end, idx, eidx, section);
2286 #endif
2287 for (; idx <= eidx; idx++) {
2288 mmio->sub_section[idx] = section;
2289 }
2290
2291 return 0;
2292 }
2293
2294 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2295 {
2296 subpage_t *mmio;
2297
2298 mmio = g_malloc0(sizeof(subpage_t));
2299
2300 mmio->as = as;
2301 mmio->base = base;
2302 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2303 NULL, TARGET_PAGE_SIZE);
2304 mmio->iomem.subpage = true;
2305 #if defined(DEBUG_SUBPAGE)
2306 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2307 mmio, base, TARGET_PAGE_SIZE);
2308 #endif
2309 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2310
2311 return mmio;
2312 }
2313
2314 static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2315 MemoryRegion *mr)
2316 {
2317 assert(as);
2318 MemoryRegionSection section = {
2319 .address_space = as,
2320 .mr = mr,
2321 .offset_within_address_space = 0,
2322 .offset_within_region = 0,
2323 .size = int128_2_64(),
2324 };
2325
2326 return phys_section_add(map, &section);
2327 }
2328
2329 MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2330 {
2331 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2332 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
2333 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
2334 MemoryRegionSection *sections = d->map.sections;
2335
2336 return sections[index & ~TARGET_PAGE_MASK].mr;
2337 }
2338
2339 static void io_mem_init(void)
2340 {
2341 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2342 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2343 NULL, UINT64_MAX);
2344 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
2345 NULL, UINT64_MAX);
2346 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2347 NULL, UINT64_MAX);
2348 }
2349
2350 static void mem_begin(MemoryListener *listener)
2351 {
2352 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2353 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2354 uint16_t n;
2355
2356 n = dummy_section(&d->map, as, &io_mem_unassigned);
2357 assert(n == PHYS_SECTION_UNASSIGNED);
2358 n = dummy_section(&d->map, as, &io_mem_notdirty);
2359 assert(n == PHYS_SECTION_NOTDIRTY);
2360 n = dummy_section(&d->map, as, &io_mem_rom);
2361 assert(n == PHYS_SECTION_ROM);
2362 n = dummy_section(&d->map, as, &io_mem_watch);
2363 assert(n == PHYS_SECTION_WATCH);
2364
2365 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2366 d->as = as;
2367 as->next_dispatch = d;
2368 }
2369
2370 static void address_space_dispatch_free(AddressSpaceDispatch *d)
2371 {
2372 phys_sections_free(&d->map);
2373 g_free(d);
2374 }
2375
2376 static void mem_commit(MemoryListener *listener)
2377 {
2378 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2379 AddressSpaceDispatch *cur = as->dispatch;
2380 AddressSpaceDispatch *next = as->next_dispatch;
2381
2382 phys_page_compact_all(next, next->map.nodes_nb);
2383
2384 atomic_rcu_set(&as->dispatch, next);
2385 if (cur) {
2386 call_rcu(cur, address_space_dispatch_free, rcu);
2387 }
2388 }
2389
2390 static void tcg_commit(MemoryListener *listener)
2391 {
2392 CPUAddressSpace *cpuas;
2393 AddressSpaceDispatch *d;
2394
2395 /* since each CPU stores ram addresses in its TLB cache, we must
2396 reset the modified entries */
2397 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2398 cpu_reloading_memory_map();
2399 /* The CPU and TLB are protected by the iothread lock.
2400 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2401 * may have split the RCU critical section.
2402 */
2403 d = atomic_rcu_read(&cpuas->as->dispatch);
2404 cpuas->memory_dispatch = d;
2405 tlb_flush(cpuas->cpu, 1);
2406 }
2407
2408 void address_space_init_dispatch(AddressSpace *as)
2409 {
2410 as->dispatch = NULL;
2411 as->dispatch_listener = (MemoryListener) {
2412 .begin = mem_begin,
2413 .commit = mem_commit,
2414 .region_add = mem_add,
2415 .region_nop = mem_add,
2416 .priority = 0,
2417 };
2418 memory_listener_register(&as->dispatch_listener, as);
2419 }
2420
2421 void address_space_unregister(AddressSpace *as)
2422 {
2423 memory_listener_unregister(&as->dispatch_listener);
2424 }
2425
2426 void address_space_destroy_dispatch(AddressSpace *as)
2427 {
2428 AddressSpaceDispatch *d = as->dispatch;
2429
2430 atomic_rcu_set(&as->dispatch, NULL);
2431 if (d) {
2432 call_rcu(d, address_space_dispatch_free, rcu);
2433 }
2434 }
2435
2436 static void memory_map_init(void)
2437 {
2438 system_memory = g_malloc(sizeof(*system_memory));
2439
2440 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2441 address_space_init(&address_space_memory, system_memory, "memory");
2442
2443 system_io = g_malloc(sizeof(*system_io));
2444 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2445 65536);
2446 address_space_init(&address_space_io, system_io, "I/O");
2447 }
2448
2449 MemoryRegion *get_system_memory(void)
2450 {
2451 return system_memory;
2452 }
2453
2454 MemoryRegion *get_system_io(void)
2455 {
2456 return system_io;
2457 }
2458
2459 #endif /* !defined(CONFIG_USER_ONLY) */
2460
2461 /* physical memory access (slow version, mainly for debug) */
2462 #if defined(CONFIG_USER_ONLY)
2463 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2464 uint8_t *buf, int len, int is_write)
2465 {
2466 int l, flags;
2467 target_ulong page;
2468 void * p;
2469
2470 while (len > 0) {
2471 page = addr & TARGET_PAGE_MASK;
2472 l = (page + TARGET_PAGE_SIZE) - addr;
2473 if (l > len)
2474 l = len;
2475 flags = page_get_flags(page);
2476 if (!(flags & PAGE_VALID))
2477 return -1;
2478 if (is_write) {
2479 if (!(flags & PAGE_WRITE))
2480 return -1;
2481 /* XXX: this code should not depend on lock_user */
2482 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2483 return -1;
2484 memcpy(p, buf, l);
2485 unlock_user(p, addr, l);
2486 } else {
2487 if (!(flags & PAGE_READ))
2488 return -1;
2489 /* XXX: this code should not depend on lock_user */
2490 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2491 return -1;
2492 memcpy(buf, p, l);
2493 unlock_user(p, addr, 0);
2494 }
2495 len -= l;
2496 buf += l;
2497 addr += l;
2498 }
2499 return 0;
2500 }
2501
2502 #else
2503
2504 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2505 hwaddr length)
2506 {
2507 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2508 /* No early return if dirty_log_mask is or becomes 0, because
2509 * cpu_physical_memory_set_dirty_range will still call
2510 * xen_modified_memory.
2511 */
2512 if (dirty_log_mask) {
2513 dirty_log_mask =
2514 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2515 }
2516 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2517 tb_invalidate_phys_range(addr, addr + length);
2518 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2519 }
2520 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
2521 }
2522
2523 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2524 {
2525 unsigned access_size_max = mr->ops->valid.max_access_size;
2526
2527 /* Regions are assumed to support 1-4 byte accesses unless
2528 otherwise specified. */
2529 if (access_size_max == 0) {
2530 access_size_max = 4;
2531 }
2532
2533 /* Bound the maximum access by the alignment of the address. */
2534 if (!mr->ops->impl.unaligned) {
2535 unsigned align_size_max = addr & -addr;
2536 if (align_size_max != 0 && align_size_max < access_size_max) {
2537 access_size_max = align_size_max;
2538 }
2539 }
2540
2541 /* Don't attempt accesses larger than the maximum. */
2542 if (l > access_size_max) {
2543 l = access_size_max;
2544 }
2545 l = pow2floor(l);
2546
2547 return l;
2548 }
2549
2550 static bool prepare_mmio_access(MemoryRegion *mr)
2551 {
2552 bool unlocked = !qemu_mutex_iothread_locked();
2553 bool release_lock = false;
2554
2555 if (unlocked && mr->global_locking) {
2556 qemu_mutex_lock_iothread();
2557 unlocked = false;
2558 release_lock = true;
2559 }
2560 if (mr->flush_coalesced_mmio) {
2561 if (unlocked) {
2562 qemu_mutex_lock_iothread();
2563 }
2564 qemu_flush_coalesced_mmio_buffer();
2565 if (unlocked) {
2566 qemu_mutex_unlock_iothread();
2567 }
2568 }
2569
2570 return release_lock;
2571 }
2572
2573 /* Called within RCU critical section. */
2574 static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2575 MemTxAttrs attrs,
2576 const uint8_t *buf,
2577 int len, hwaddr addr1,
2578 hwaddr l, MemoryRegion *mr)
2579 {
2580 uint8_t *ptr;
2581 uint64_t val;
2582 MemTxResult result = MEMTX_OK;
2583 bool release_lock = false;
2584
2585 for (;;) {
2586 if (!memory_access_is_direct(mr, true)) {
2587 release_lock |= prepare_mmio_access(mr);
2588 l = memory_access_size(mr, l, addr1);
2589 /* XXX: could force current_cpu to NULL to avoid
2590 potential bugs */
2591 switch (l) {
2592 case 8:
2593 /* 64 bit write access */
2594 val = ldq_p(buf);
2595 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2596 attrs);
2597 break;
2598 case 4:
2599 /* 32 bit write access */
2600 val = ldl_p(buf);
2601 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2602 attrs);
2603 break;
2604 case 2:
2605 /* 16 bit write access */
2606 val = lduw_p(buf);
2607 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2608 attrs);
2609 break;
2610 case 1:
2611 /* 8 bit write access */
2612 val = ldub_p(buf);
2613 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2614 attrs);
2615 break;
2616 default:
2617 abort();
2618 }
2619 } else {
2620 addr1 += memory_region_get_ram_addr(mr);
2621 /* RAM case */
2622 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
2623 memcpy(ptr, buf, l);
2624 invalidate_and_set_dirty(mr, addr1, l);
2625 }
2626
2627 if (release_lock) {
2628 qemu_mutex_unlock_iothread();
2629 release_lock = false;
2630 }
2631
2632 len -= l;
2633 buf += l;
2634 addr += l;
2635
2636 if (!len) {
2637 break;
2638 }
2639
2640 l = len;
2641 mr = address_space_translate(as, addr, &addr1, &l, true);
2642 }
2643
2644 return result;
2645 }
2646
2647 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2648 const uint8_t *buf, int len)
2649 {
2650 hwaddr l;
2651 hwaddr addr1;
2652 MemoryRegion *mr;
2653 MemTxResult result = MEMTX_OK;
2654
2655 if (len > 0) {
2656 rcu_read_lock();
2657 l = len;
2658 mr = address_space_translate(as, addr, &addr1, &l, true);
2659 result = address_space_write_continue(as, addr, attrs, buf, len,
2660 addr1, l, mr);
2661 rcu_read_unlock();
2662 }
2663
2664 return result;
2665 }
2666
2667 /* Called within RCU critical section. */
2668 MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2669 MemTxAttrs attrs, uint8_t *buf,
2670 int len, hwaddr addr1, hwaddr l,
2671 MemoryRegion *mr)
2672 {
2673 uint8_t *ptr;
2674 uint64_t val;
2675 MemTxResult result = MEMTX_OK;
2676 bool release_lock = false;
2677
2678 for (;;) {
2679 if (!memory_access_is_direct(mr, false)) {
2680 /* I/O case */
2681 release_lock |= prepare_mmio_access(mr);
2682 l = memory_access_size(mr, l, addr1);
2683 switch (l) {
2684 case 8:
2685 /* 64 bit read access */
2686 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2687 attrs);
2688 stq_p(buf, val);
2689 break;
2690 case 4:
2691 /* 32 bit read access */
2692 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2693 attrs);
2694 stl_p(buf, val);
2695 break;
2696 case 2:
2697 /* 16 bit read access */
2698 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2699 attrs);
2700 stw_p(buf, val);
2701 break;
2702 case 1:
2703 /* 8 bit read access */
2704 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2705 attrs);
2706 stb_p(buf, val);
2707 break;
2708 default:
2709 abort();
2710 }
2711 } else {
2712 /* RAM case */
2713 ptr = qemu_get_ram_ptr(mr->ram_block,
2714 memory_region_get_ram_addr(mr) + addr1);
2715 memcpy(buf, ptr, l);
2716 }
2717
2718 if (release_lock) {
2719 qemu_mutex_unlock_iothread();
2720 release_lock = false;
2721 }
2722
2723 len -= l;
2724 buf += l;
2725 addr += l;
2726
2727 if (!len) {
2728 break;
2729 }
2730
2731 l = len;
2732 mr = address_space_translate(as, addr, &addr1, &l, false);
2733 }
2734
2735 return result;
2736 }
2737
2738 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2739 MemTxAttrs attrs, uint8_t *buf, int len)
2740 {
2741 hwaddr l;
2742 hwaddr addr1;
2743 MemoryRegion *mr;
2744 MemTxResult result = MEMTX_OK;
2745
2746 if (len > 0) {
2747 rcu_read_lock();
2748 l = len;
2749 mr = address_space_translate(as, addr, &addr1, &l, false);
2750 result = address_space_read_continue(as, addr, attrs, buf, len,
2751 addr1, l, mr);
2752 rcu_read_unlock();
2753 }
2754
2755 return result;
2756 }
2757
2758 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2759 uint8_t *buf, int len, bool is_write)
2760 {
2761 if (is_write) {
2762 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2763 } else {
2764 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2765 }
2766 }
2767
2768 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2769 int len, int is_write)
2770 {
2771 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2772 buf, len, is_write);
2773 }
2774
2775 enum write_rom_type {
2776 WRITE_DATA,
2777 FLUSH_CACHE,
2778 };
2779
2780 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2781 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2782 {
2783 hwaddr l;
2784 uint8_t *ptr;
2785 hwaddr addr1;
2786 MemoryRegion *mr;
2787
2788 rcu_read_lock();
2789 while (len > 0) {
2790 l = len;
2791 mr = address_space_translate(as, addr, &addr1, &l, true);
2792
2793 if (!(memory_region_is_ram(mr) ||
2794 memory_region_is_romd(mr))) {
2795 l = memory_access_size(mr, l, addr1);
2796 } else {
2797 addr1 += memory_region_get_ram_addr(mr);
2798 /* ROM/RAM case */
2799 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
2800 switch (type) {
2801 case WRITE_DATA:
2802 memcpy(ptr, buf, l);
2803 invalidate_and_set_dirty(mr, addr1, l);
2804 break;
2805 case FLUSH_CACHE:
2806 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2807 break;
2808 }
2809 }
2810 len -= l;
2811 buf += l;
2812 addr += l;
2813 }
2814 rcu_read_unlock();
2815 }
2816
2817 /* used for ROM loading : can write in RAM and ROM */
2818 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2819 const uint8_t *buf, int len)
2820 {
2821 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2822 }
2823
2824 void cpu_flush_icache_range(hwaddr start, int len)
2825 {
2826 /*
2827 * This function should do the same thing as an icache flush that was
2828 * triggered from within the guest. For TCG we are always cache coherent,
2829 * so there is no need to flush anything. For KVM / Xen we need to flush
2830 * the host's instruction cache at least.
2831 */
2832 if (tcg_enabled()) {
2833 return;
2834 }
2835
2836 cpu_physical_memory_write_rom_internal(&address_space_memory,
2837 start, NULL, len, FLUSH_CACHE);
2838 }
2839
2840 typedef struct {
2841 MemoryRegion *mr;
2842 void *buffer;
2843 hwaddr addr;
2844 hwaddr len;
2845 bool in_use;
2846 } BounceBuffer;
2847
2848 static BounceBuffer bounce;
2849
2850 typedef struct MapClient {
2851 QEMUBH *bh;
2852 QLIST_ENTRY(MapClient) link;
2853 } MapClient;
2854
2855 QemuMutex map_client_list_lock;
2856 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2857 = QLIST_HEAD_INITIALIZER(map_client_list);
2858
2859 static void cpu_unregister_map_client_do(MapClient *client)
2860 {
2861 QLIST_REMOVE(client, link);
2862 g_free(client);
2863 }
2864
2865 static void cpu_notify_map_clients_locked(void)
2866 {
2867 MapClient *client;
2868
2869 while (!QLIST_EMPTY(&map_client_list)) {
2870 client = QLIST_FIRST(&map_client_list);
2871 qemu_bh_schedule(client->bh);
2872 cpu_unregister_map_client_do(client);
2873 }
2874 }
2875
2876 void cpu_register_map_client(QEMUBH *bh)
2877 {
2878 MapClient *client = g_malloc(sizeof(*client));
2879
2880 qemu_mutex_lock(&map_client_list_lock);
2881 client->bh = bh;
2882 QLIST_INSERT_HEAD(&map_client_list, client, link);
2883 if (!atomic_read(&bounce.in_use)) {
2884 cpu_notify_map_clients_locked();
2885 }
2886 qemu_mutex_unlock(&map_client_list_lock);
2887 }
2888
2889 void cpu_exec_init_all(void)
2890 {
2891 qemu_mutex_init(&ram_list.mutex);
2892 io_mem_init();
2893 memory_map_init();
2894 qemu_mutex_init(&map_client_list_lock);
2895 }
2896
2897 void cpu_unregister_map_client(QEMUBH *bh)
2898 {
2899 MapClient *client;
2900
2901 qemu_mutex_lock(&map_client_list_lock);
2902 QLIST_FOREACH(client, &map_client_list, link) {
2903 if (client->bh == bh) {
2904 cpu_unregister_map_client_do(client);
2905 break;
2906 }
2907 }
2908 qemu_mutex_unlock(&map_client_list_lock);
2909 }
2910
2911 static void cpu_notify_map_clients(void)
2912 {
2913 qemu_mutex_lock(&map_client_list_lock);
2914 cpu_notify_map_clients_locked();
2915 qemu_mutex_unlock(&map_client_list_lock);
2916 }
2917
2918 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2919 {
2920 MemoryRegion *mr;
2921 hwaddr l, xlat;
2922
2923 rcu_read_lock();
2924 while (len > 0) {
2925 l = len;
2926 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2927 if (!memory_access_is_direct(mr, is_write)) {
2928 l = memory_access_size(mr, l, addr);
2929 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2930 return false;
2931 }
2932 }
2933
2934 len -= l;
2935 addr += l;
2936 }
2937 rcu_read_unlock();
2938 return true;
2939 }
2940
2941 /* Map a physical memory region into a host virtual address.
2942 * May map a subset of the requested range, given by and returned in *plen.
2943 * May return NULL if resources needed to perform the mapping are exhausted.
2944 * Use only for reads OR writes - not for read-modify-write operations.
2945 * Use cpu_register_map_client() to know when retrying the map operation is
2946 * likely to succeed.
2947 */
2948 void *address_space_map(AddressSpace *as,
2949 hwaddr addr,
2950 hwaddr *plen,
2951 bool is_write)
2952 {
2953 hwaddr len = *plen;
2954 hwaddr done = 0;
2955 hwaddr l, xlat, base;
2956 MemoryRegion *mr, *this_mr;
2957 ram_addr_t raddr;
2958 void *ptr;
2959
2960 if (len == 0) {
2961 return NULL;
2962 }
2963
2964 l = len;
2965 rcu_read_lock();
2966 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2967
2968 if (!memory_access_is_direct(mr, is_write)) {
2969 if (atomic_xchg(&bounce.in_use, true)) {
2970 rcu_read_unlock();
2971 return NULL;
2972 }
2973 /* Avoid unbounded allocations */
2974 l = MIN(l, TARGET_PAGE_SIZE);
2975 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2976 bounce.addr = addr;
2977 bounce.len = l;
2978
2979 memory_region_ref(mr);
2980 bounce.mr = mr;
2981 if (!is_write) {
2982 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2983 bounce.buffer, l);
2984 }
2985
2986 rcu_read_unlock();
2987 *plen = l;
2988 return bounce.buffer;
2989 }
2990
2991 base = xlat;
2992 raddr = memory_region_get_ram_addr(mr);
2993
2994 for (;;) {
2995 len -= l;
2996 addr += l;
2997 done += l;
2998 if (len == 0) {
2999 break;
3000 }
3001
3002 l = len;
3003 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
3004 if (this_mr != mr || xlat != base + done) {
3005 break;
3006 }
3007 }
3008
3009 memory_region_ref(mr);
3010 *plen = done;
3011 ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
3012 rcu_read_unlock();
3013
3014 return ptr;
3015 }
3016
3017 /* Unmaps a memory region previously mapped by address_space_map().
3018 * Will also mark the memory as dirty if is_write == 1. access_len gives
3019 * the amount of memory that was actually read or written by the caller.
3020 */
3021 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3022 int is_write, hwaddr access_len)
3023 {
3024 if (buffer != bounce.buffer) {
3025 MemoryRegion *mr;
3026 ram_addr_t addr1;
3027
3028 mr = qemu_ram_addr_from_host(buffer, &addr1);
3029 assert(mr != NULL);
3030 if (is_write) {
3031 invalidate_and_set_dirty(mr, addr1, access_len);
3032 }
3033 if (xen_enabled()) {
3034 xen_invalidate_map_cache_entry(buffer);
3035 }
3036 memory_region_unref(mr);
3037 return;
3038 }
3039 if (is_write) {
3040 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3041 bounce.buffer, access_len);
3042 }
3043 qemu_vfree(bounce.buffer);
3044 bounce.buffer = NULL;
3045 memory_region_unref(bounce.mr);
3046 atomic_mb_set(&bounce.in_use, false);
3047 cpu_notify_map_clients();
3048 }
3049
3050 void *cpu_physical_memory_map(hwaddr addr,
3051 hwaddr *plen,
3052 int is_write)
3053 {
3054 return address_space_map(&address_space_memory, addr, plen, is_write);
3055 }
3056
3057 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3058 int is_write, hwaddr access_len)
3059 {
3060 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3061 }
3062
3063 /* warning: addr must be aligned */
3064 static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3065 MemTxAttrs attrs,
3066 MemTxResult *result,
3067 enum device_endian endian)
3068 {
3069 uint8_t *ptr;
3070 uint64_t val;
3071 MemoryRegion *mr;
3072 hwaddr l = 4;
3073 hwaddr addr1;
3074 MemTxResult r;
3075 bool release_lock = false;
3076
3077 rcu_read_lock();
3078 mr = address_space_translate(as, addr, &addr1, &l, false);
3079 if (l < 4 || !memory_access_is_direct(mr, false)) {
3080 release_lock |= prepare_mmio_access(mr);
3081
3082 /* I/O case */
3083 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
3084 #if defined(TARGET_WORDS_BIGENDIAN)
3085 if (endian == DEVICE_LITTLE_ENDIAN) {
3086 val = bswap32(val);
3087 }
3088 #else
3089 if (endian == DEVICE_BIG_ENDIAN) {
3090 val = bswap32(val);
3091 }
3092 #endif
3093 } else {
3094 /* RAM case */
3095 ptr = qemu_get_ram_ptr(mr->ram_block,
3096 (memory_region_get_ram_addr(mr)
3097 & TARGET_PAGE_MASK)
3098 + addr1);
3099 switch (endian) {
3100 case DEVICE_LITTLE_ENDIAN:
3101 val = ldl_le_p(ptr);
3102 break;
3103 case DEVICE_BIG_ENDIAN:
3104 val = ldl_be_p(ptr);
3105 break;
3106 default:
3107 val = ldl_p(ptr);
3108 break;
3109 }
3110 r = MEMTX_OK;
3111 }
3112 if (result) {
3113 *result = r;
3114 }
3115 if (release_lock) {
3116 qemu_mutex_unlock_iothread();
3117 }
3118 rcu_read_unlock();
3119 return val;
3120 }
3121
3122 uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3123 MemTxAttrs attrs, MemTxResult *result)
3124 {
3125 return address_space_ldl_internal(as, addr, attrs, result,
3126 DEVICE_NATIVE_ENDIAN);
3127 }
3128
3129 uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3130 MemTxAttrs attrs, MemTxResult *result)
3131 {
3132 return address_space_ldl_internal(as, addr, attrs, result,
3133 DEVICE_LITTLE_ENDIAN);
3134 }
3135
3136 uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3137 MemTxAttrs attrs, MemTxResult *result)
3138 {
3139 return address_space_ldl_internal(as, addr, attrs, result,
3140 DEVICE_BIG_ENDIAN);
3141 }
3142
3143 uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
3144 {
3145 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3146 }
3147
3148 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
3149 {
3150 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3151 }
3152
3153 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
3154 {
3155 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3156 }
3157
3158 /* warning: addr must be aligned */
3159 static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3160 MemTxAttrs attrs,
3161 MemTxResult *result,
3162 enum device_endian endian)
3163 {
3164 uint8_t *ptr;
3165 uint64_t val;
3166 MemoryRegion *mr;
3167 hwaddr l = 8;
3168 hwaddr addr1;
3169 MemTxResult r;
3170 bool release_lock = false;
3171
3172 rcu_read_lock();
3173 mr = address_space_translate(as, addr, &addr1, &l,
3174 false);
3175 if (l < 8 || !memory_access_is_direct(mr, false)) {
3176 release_lock |= prepare_mmio_access(mr);
3177
3178 /* I/O case */
3179 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
3180 #if defined(TARGET_WORDS_BIGENDIAN)
3181 if (endian == DEVICE_LITTLE_ENDIAN) {
3182 val = bswap64(val);
3183 }
3184 #else
3185 if (endian == DEVICE_BIG_ENDIAN) {
3186 val = bswap64(val);
3187 }
3188 #endif
3189 } else {
3190 /* RAM case */
3191 ptr = qemu_get_ram_ptr(mr->ram_block,
3192 (memory_region_get_ram_addr(mr)
3193 & TARGET_PAGE_MASK)
3194 + addr1);
3195 switch (endian) {
3196 case DEVICE_LITTLE_ENDIAN:
3197 val = ldq_le_p(ptr);
3198 break;
3199 case DEVICE_BIG_ENDIAN:
3200 val = ldq_be_p(ptr);
3201 break;
3202 default:
3203 val = ldq_p(ptr);
3204 break;
3205 }
3206 r = MEMTX_OK;
3207 }
3208 if (result) {
3209 *result = r;
3210 }
3211 if (release_lock) {
3212 qemu_mutex_unlock_iothread();
3213 }
3214 rcu_read_unlock();
3215 return val;
3216 }
3217
3218 uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3219 MemTxAttrs attrs, MemTxResult *result)
3220 {
3221 return address_space_ldq_internal(as, addr, attrs, result,
3222 DEVICE_NATIVE_ENDIAN);
3223 }
3224
3225 uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3226 MemTxAttrs attrs, MemTxResult *result)
3227 {
3228 return address_space_ldq_internal(as, addr, attrs, result,
3229 DEVICE_LITTLE_ENDIAN);
3230 }
3231
3232 uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3233 MemTxAttrs attrs, MemTxResult *result)
3234 {
3235 return address_space_ldq_internal(as, addr, attrs, result,
3236 DEVICE_BIG_ENDIAN);
3237 }
3238
3239 uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
3240 {
3241 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3242 }
3243
3244 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
3245 {
3246 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3247 }
3248
3249 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
3250 {
3251 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3252 }
3253
3254 /* XXX: optimize */
3255 uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3256 MemTxAttrs attrs, MemTxResult *result)
3257 {
3258 uint8_t val;
3259 MemTxResult r;
3260
3261 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3262 if (result) {
3263 *result = r;
3264 }
3265 return val;
3266 }
3267
3268 uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3269 {
3270 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3271 }
3272
3273 /* warning: addr must be aligned */
3274 static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3275 hwaddr addr,
3276 MemTxAttrs attrs,
3277 MemTxResult *result,
3278 enum device_endian endian)
3279 {
3280 uint8_t *ptr;
3281 uint64_t val;
3282 MemoryRegion *mr;
3283 hwaddr l = 2;
3284 hwaddr addr1;
3285 MemTxResult r;
3286 bool release_lock = false;
3287
3288 rcu_read_lock();
3289 mr = address_space_translate(as, addr, &addr1, &l,
3290 false);
3291 if (l < 2 || !memory_access_is_direct(mr, false)) {
3292 release_lock |= prepare_mmio_access(mr);
3293
3294 /* I/O case */
3295 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
3296 #if defined(TARGET_WORDS_BIGENDIAN)
3297 if (endian == DEVICE_LITTLE_ENDIAN) {
3298 val = bswap16(val);
3299 }
3300 #else
3301 if (endian == DEVICE_BIG_ENDIAN) {
3302 val = bswap16(val);
3303 }
3304 #endif
3305 } else {
3306 /* RAM case */
3307 ptr = qemu_get_ram_ptr(mr->ram_block,
3308 (memory_region_get_ram_addr(mr)
3309 & TARGET_PAGE_MASK)
3310 + addr1);
3311 switch (endian) {
3312 case DEVICE_LITTLE_ENDIAN:
3313 val = lduw_le_p(ptr);
3314 break;
3315 case DEVICE_BIG_ENDIAN:
3316 val = lduw_be_p(ptr);
3317 break;
3318 default:
3319 val = lduw_p(ptr);
3320 break;
3321 }
3322 r = MEMTX_OK;
3323 }
3324 if (result) {
3325 *result = r;
3326 }
3327 if (release_lock) {
3328 qemu_mutex_unlock_iothread();
3329 }
3330 rcu_read_unlock();
3331 return val;
3332 }
3333
3334 uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3335 MemTxAttrs attrs, MemTxResult *result)
3336 {
3337 return address_space_lduw_internal(as, addr, attrs, result,
3338 DEVICE_NATIVE_ENDIAN);
3339 }
3340
3341 uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3342 MemTxAttrs attrs, MemTxResult *result)
3343 {
3344 return address_space_lduw_internal(as, addr, attrs, result,
3345 DEVICE_LITTLE_ENDIAN);
3346 }
3347
3348 uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3349 MemTxAttrs attrs, MemTxResult *result)
3350 {
3351 return address_space_lduw_internal(as, addr, attrs, result,
3352 DEVICE_BIG_ENDIAN);
3353 }
3354
3355 uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
3356 {
3357 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3358 }
3359
3360 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
3361 {
3362 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3363 }
3364
3365 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
3366 {
3367 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3368 }
3369
3370 /* warning: addr must be aligned. The ram page is not masked as dirty
3371 and the code inside is not invalidated. It is useful if the dirty
3372 bits are used to track modified PTEs */
3373 void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3374 MemTxAttrs attrs, MemTxResult *result)
3375 {
3376 uint8_t *ptr;
3377 MemoryRegion *mr;
3378 hwaddr l = 4;
3379 hwaddr addr1;
3380 MemTxResult r;
3381 uint8_t dirty_log_mask;
3382 bool release_lock = false;
3383
3384 rcu_read_lock();
3385 mr = address_space_translate(as, addr, &addr1, &l,
3386 true);
3387 if (l < 4 || !memory_access_is_direct(mr, true)) {
3388 release_lock |= prepare_mmio_access(mr);
3389
3390 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3391 } else {
3392 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3393 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
3394 stl_p(ptr, val);
3395
3396 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3397 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
3398 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
3399 r = MEMTX_OK;
3400 }
3401 if (result) {
3402 *result = r;
3403 }
3404 if (release_lock) {
3405 qemu_mutex_unlock_iothread();
3406 }
3407 rcu_read_unlock();
3408 }
3409
3410 void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3411 {
3412 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3413 }
3414
3415 /* warning: addr must be aligned */
3416 static inline void address_space_stl_internal(AddressSpace *as,
3417 hwaddr addr, uint32_t val,
3418 MemTxAttrs attrs,
3419 MemTxResult *result,
3420 enum device_endian endian)
3421 {
3422 uint8_t *ptr;
3423 MemoryRegion *mr;
3424 hwaddr l = 4;
3425 hwaddr addr1;
3426 MemTxResult r;
3427 bool release_lock = false;
3428
3429 rcu_read_lock();
3430 mr = address_space_translate(as, addr, &addr1, &l,
3431 true);
3432 if (l < 4 || !memory_access_is_direct(mr, true)) {
3433 release_lock |= prepare_mmio_access(mr);
3434
3435 #if defined(TARGET_WORDS_BIGENDIAN)
3436 if (endian == DEVICE_LITTLE_ENDIAN) {
3437 val = bswap32(val);
3438 }
3439 #else
3440 if (endian == DEVICE_BIG_ENDIAN) {
3441 val = bswap32(val);
3442 }
3443 #endif
3444 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3445 } else {
3446 /* RAM case */
3447 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3448 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
3449 switch (endian) {
3450 case DEVICE_LITTLE_ENDIAN:
3451 stl_le_p(ptr, val);
3452 break;
3453 case DEVICE_BIG_ENDIAN:
3454 stl_be_p(ptr, val);
3455 break;
3456 default:
3457 stl_p(ptr, val);
3458 break;
3459 }
3460 invalidate_and_set_dirty(mr, addr1, 4);
3461 r = MEMTX_OK;
3462 }
3463 if (result) {
3464 *result = r;
3465 }
3466 if (release_lock) {
3467 qemu_mutex_unlock_iothread();
3468 }
3469 rcu_read_unlock();
3470 }
3471
3472 void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3473 MemTxAttrs attrs, MemTxResult *result)
3474 {
3475 address_space_stl_internal(as, addr, val, attrs, result,
3476 DEVICE_NATIVE_ENDIAN);
3477 }
3478
3479 void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3480 MemTxAttrs attrs, MemTxResult *result)
3481 {
3482 address_space_stl_internal(as, addr, val, attrs, result,
3483 DEVICE_LITTLE_ENDIAN);
3484 }
3485
3486 void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3487 MemTxAttrs attrs, MemTxResult *result)
3488 {
3489 address_space_stl_internal(as, addr, val, attrs, result,
3490 DEVICE_BIG_ENDIAN);
3491 }
3492
3493 void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3494 {
3495 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3496 }
3497
3498 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3499 {
3500 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3501 }
3502
3503 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3504 {
3505 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3506 }
3507
3508 /* XXX: optimize */
3509 void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3510 MemTxAttrs attrs, MemTxResult *result)
3511 {
3512 uint8_t v = val;
3513 MemTxResult r;
3514
3515 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3516 if (result) {
3517 *result = r;
3518 }
3519 }
3520
3521 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3522 {
3523 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3524 }
3525
3526 /* warning: addr must be aligned */
3527 static inline void address_space_stw_internal(AddressSpace *as,
3528 hwaddr addr, uint32_t val,
3529 MemTxAttrs attrs,
3530 MemTxResult *result,
3531 enum device_endian endian)
3532 {
3533 uint8_t *ptr;
3534 MemoryRegion *mr;
3535 hwaddr l = 2;
3536 hwaddr addr1;
3537 MemTxResult r;
3538 bool release_lock = false;
3539
3540 rcu_read_lock();
3541 mr = address_space_translate(as, addr, &addr1, &l, true);
3542 if (l < 2 || !memory_access_is_direct(mr, true)) {
3543 release_lock |= prepare_mmio_access(mr);
3544
3545 #if defined(TARGET_WORDS_BIGENDIAN)
3546 if (endian == DEVICE_LITTLE_ENDIAN) {
3547 val = bswap16(val);
3548 }
3549 #else
3550 if (endian == DEVICE_BIG_ENDIAN) {
3551 val = bswap16(val);
3552 }
3553 #endif
3554 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
3555 } else {
3556 /* RAM case */
3557 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3558 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
3559 switch (endian) {
3560 case DEVICE_LITTLE_ENDIAN:
3561 stw_le_p(ptr, val);
3562 break;
3563 case DEVICE_BIG_ENDIAN:
3564 stw_be_p(ptr, val);
3565 break;
3566 default:
3567 stw_p(ptr, val);
3568 break;
3569 }
3570 invalidate_and_set_dirty(mr, addr1, 2);
3571 r = MEMTX_OK;
3572 }
3573 if (result) {
3574 *result = r;
3575 }
3576 if (release_lock) {
3577 qemu_mutex_unlock_iothread();
3578 }
3579 rcu_read_unlock();
3580 }
3581
3582 void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3583 MemTxAttrs attrs, MemTxResult *result)
3584 {
3585 address_space_stw_internal(as, addr, val, attrs, result,
3586 DEVICE_NATIVE_ENDIAN);
3587 }
3588
3589 void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3590 MemTxAttrs attrs, MemTxResult *result)
3591 {
3592 address_space_stw_internal(as, addr, val, attrs, result,
3593 DEVICE_LITTLE_ENDIAN);
3594 }
3595
3596 void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3597 MemTxAttrs attrs, MemTxResult *result)
3598 {
3599 address_space_stw_internal(as, addr, val, attrs, result,
3600 DEVICE_BIG_ENDIAN);
3601 }
3602
3603 void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3604 {
3605 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3606 }
3607
3608 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3609 {
3610 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3611 }
3612
3613 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3614 {
3615 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3616 }
3617
3618 /* XXX: optimize */
3619 void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3620 MemTxAttrs attrs, MemTxResult *result)
3621 {
3622 MemTxResult r;
3623 val = tswap64(val);
3624 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3625 if (result) {
3626 *result = r;
3627 }
3628 }
3629
3630 void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3631 MemTxAttrs attrs, MemTxResult *result)
3632 {
3633 MemTxResult r;
3634 val = cpu_to_le64(val);
3635 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3636 if (result) {
3637 *result = r;
3638 }
3639 }
3640 void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3641 MemTxAttrs attrs, MemTxResult *result)
3642 {
3643 MemTxResult r;
3644 val = cpu_to_be64(val);
3645 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3646 if (result) {
3647 *result = r;
3648 }
3649 }
3650
3651 void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3652 {
3653 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3654 }
3655
3656 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3657 {
3658 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3659 }
3660
3661 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3662 {
3663 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3664 }
3665
3666 /* virtual memory access for debug (includes writing to ROM) */
3667 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3668 uint8_t *buf, int len, int is_write)
3669 {
3670 int l;
3671 hwaddr phys_addr;
3672 target_ulong page;
3673
3674 while (len > 0) {
3675 int asidx;
3676 MemTxAttrs attrs;
3677
3678 page = addr & TARGET_PAGE_MASK;
3679 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3680 asidx = cpu_asidx_from_attrs(cpu, attrs);
3681 /* if no physical page mapped, return an error */
3682 if (phys_addr == -1)
3683 return -1;
3684 l = (page + TARGET_PAGE_SIZE) - addr;
3685 if (l > len)
3686 l = len;
3687 phys_addr += (addr & ~TARGET_PAGE_MASK);
3688 if (is_write) {
3689 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3690 phys_addr, buf, l);
3691 } else {
3692 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3693 MEMTXATTRS_UNSPECIFIED,
3694 buf, l, 0);
3695 }
3696 len -= l;
3697 buf += l;
3698 addr += l;
3699 }
3700 return 0;
3701 }
3702
3703 /*
3704 * Allows code that needs to deal with migration bitmaps etc to still be built
3705 * target independent.
3706 */
3707 size_t qemu_target_page_bits(void)
3708 {
3709 return TARGET_PAGE_BITS;
3710 }
3711
3712 #endif
3713
3714 /*
3715 * A helper function for the _utterly broken_ virtio device model to find out if
3716 * it's running on a big endian machine. Don't do this at home kids!
3717 */
3718 bool target_words_bigendian(void);
3719 bool target_words_bigendian(void)
3720 {
3721 #if defined(TARGET_WORDS_BIGENDIAN)
3722 return true;
3723 #else
3724 return false;
3725 #endif
3726 }
3727
3728 #ifndef CONFIG_USER_ONLY
3729 bool cpu_physical_memory_is_io(hwaddr phys_addr)
3730 {
3731 MemoryRegion*mr;
3732 hwaddr l = 1;
3733 bool res;
3734
3735 rcu_read_lock();
3736 mr = address_space_translate(&address_space_memory,
3737 phys_addr, &phys_addr, &l, false);
3738
3739 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3740 rcu_read_unlock();
3741 return res;
3742 }
3743
3744 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3745 {
3746 RAMBlock *block;
3747 int ret = 0;
3748
3749 rcu_read_lock();
3750 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
3751 ret = func(block->idstr, block->host, block->offset,
3752 block->used_length, opaque);
3753 if (ret) {
3754 break;
3755 }
3756 }
3757 rcu_read_unlock();
3758 return ret;
3759 }
3760 #endif