]> git.proxmox.com Git - mirror_qemu.git/blob - exec.c
memory: drop find_ram_block()
[mirror_qemu.git] / exec.c
1 /*
2 * Virtual page mapping
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
21 #ifndef _WIN32
22 #include <sys/mman.h>
23 #endif
24
25 #include "qemu/cutils.h"
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "tcg.h"
29 #include "hw/qdev-core.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "hw/boards.h"
32 #include "hw/xen/xen.h"
33 #endif
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "qemu/error-report.h"
39 #if defined(CONFIG_USER_ONLY)
40 #include <qemu.h>
41 #else /* !CONFIG_USER_ONLY */
42 #include "hw/hw.h"
43 #include "exec/memory.h"
44 #include "exec/ioport.h"
45 #include "sysemu/dma.h"
46 #include "exec/address-spaces.h"
47 #include "sysemu/xen-mapcache.h"
48 #include "trace.h"
49 #endif
50 #include "exec/cpu-all.h"
51 #include "qemu/rcu_queue.h"
52 #include "qemu/main-loop.h"
53 #include "translate-all.h"
54 #include "sysemu/replay.h"
55
56 #include "exec/memory-internal.h"
57 #include "exec/ram_addr.h"
58 #include "exec/log.h"
59
60 #include "qemu/range.h"
61 #ifndef _WIN32
62 #include "qemu/mmap-alloc.h"
63 #endif
64
65 //#define DEBUG_SUBPAGE
66
67 #if !defined(CONFIG_USER_ONLY)
68 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
69 * are protected by the ramlist lock.
70 */
71 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
72
73 static MemoryRegion *system_memory;
74 static MemoryRegion *system_io;
75
76 AddressSpace address_space_io;
77 AddressSpace address_space_memory;
78
79 MemoryRegion io_mem_rom, io_mem_notdirty;
80 static MemoryRegion io_mem_unassigned;
81
82 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
83 #define RAM_PREALLOC (1 << 0)
84
85 /* RAM is mmap-ed with MAP_SHARED */
86 #define RAM_SHARED (1 << 1)
87
88 /* Only a portion of RAM (used_length) is actually used, and migrated.
89 * This used_length size can change across reboots.
90 */
91 #define RAM_RESIZEABLE (1 << 2)
92
93 #endif
94
95 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
96 /* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
98 __thread CPUState *current_cpu;
99 /* 0 = Do not count executed instructions.
100 1 = Precise instruction counting.
101 2 = Adaptive rate instruction counting. */
102 int use_icount;
103
104 #if !defined(CONFIG_USER_ONLY)
105
106 typedef struct PhysPageEntry PhysPageEntry;
107
108 struct PhysPageEntry {
109 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
110 uint32_t skip : 6;
111 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
112 uint32_t ptr : 26;
113 };
114
115 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
116
117 /* Size of the L2 (and L3, etc) page tables. */
118 #define ADDR_SPACE_BITS 64
119
120 #define P_L2_BITS 9
121 #define P_L2_SIZE (1 << P_L2_BITS)
122
123 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
124
125 typedef PhysPageEntry Node[P_L2_SIZE];
126
127 typedef struct PhysPageMap {
128 struct rcu_head rcu;
129
130 unsigned sections_nb;
131 unsigned sections_nb_alloc;
132 unsigned nodes_nb;
133 unsigned nodes_nb_alloc;
134 Node *nodes;
135 MemoryRegionSection *sections;
136 } PhysPageMap;
137
138 struct AddressSpaceDispatch {
139 struct rcu_head rcu;
140
141 MemoryRegionSection *mru_section;
142 /* This is a multi-level map on the physical address space.
143 * The bottom level has pointers to MemoryRegionSections.
144 */
145 PhysPageEntry phys_map;
146 PhysPageMap map;
147 AddressSpace *as;
148 };
149
150 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
151 typedef struct subpage_t {
152 MemoryRegion iomem;
153 AddressSpace *as;
154 hwaddr base;
155 uint16_t sub_section[TARGET_PAGE_SIZE];
156 } subpage_t;
157
158 #define PHYS_SECTION_UNASSIGNED 0
159 #define PHYS_SECTION_NOTDIRTY 1
160 #define PHYS_SECTION_ROM 2
161 #define PHYS_SECTION_WATCH 3
162
163 static void io_mem_init(void);
164 static void memory_map_init(void);
165 static void tcg_commit(MemoryListener *listener);
166
167 static MemoryRegion io_mem_watch;
168
169 /**
170 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
171 * @cpu: the CPU whose AddressSpace this is
172 * @as: the AddressSpace itself
173 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
174 * @tcg_as_listener: listener for tracking changes to the AddressSpace
175 */
176 struct CPUAddressSpace {
177 CPUState *cpu;
178 AddressSpace *as;
179 struct AddressSpaceDispatch *memory_dispatch;
180 MemoryListener tcg_as_listener;
181 };
182
183 #endif
184
185 #if !defined(CONFIG_USER_ONLY)
186
187 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
188 {
189 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
190 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
191 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
192 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
193 }
194 }
195
196 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
197 {
198 unsigned i;
199 uint32_t ret;
200 PhysPageEntry e;
201 PhysPageEntry *p;
202
203 ret = map->nodes_nb++;
204 p = map->nodes[ret];
205 assert(ret != PHYS_MAP_NODE_NIL);
206 assert(ret != map->nodes_nb_alloc);
207
208 e.skip = leaf ? 0 : 1;
209 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
210 for (i = 0; i < P_L2_SIZE; ++i) {
211 memcpy(&p[i], &e, sizeof(e));
212 }
213 return ret;
214 }
215
216 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
217 hwaddr *index, hwaddr *nb, uint16_t leaf,
218 int level)
219 {
220 PhysPageEntry *p;
221 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
222
223 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
224 lp->ptr = phys_map_node_alloc(map, level == 0);
225 }
226 p = map->nodes[lp->ptr];
227 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
228
229 while (*nb && lp < &p[P_L2_SIZE]) {
230 if ((*index & (step - 1)) == 0 && *nb >= step) {
231 lp->skip = 0;
232 lp->ptr = leaf;
233 *index += step;
234 *nb -= step;
235 } else {
236 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
237 }
238 ++lp;
239 }
240 }
241
242 static void phys_page_set(AddressSpaceDispatch *d,
243 hwaddr index, hwaddr nb,
244 uint16_t leaf)
245 {
246 /* Wildly overreserve - it doesn't matter much. */
247 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
248
249 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
250 }
251
252 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
253 * and update our entry so we can skip it and go directly to the destination.
254 */
255 static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
256 {
257 unsigned valid_ptr = P_L2_SIZE;
258 int valid = 0;
259 PhysPageEntry *p;
260 int i;
261
262 if (lp->ptr == PHYS_MAP_NODE_NIL) {
263 return;
264 }
265
266 p = nodes[lp->ptr];
267 for (i = 0; i < P_L2_SIZE; i++) {
268 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
269 continue;
270 }
271
272 valid_ptr = i;
273 valid++;
274 if (p[i].skip) {
275 phys_page_compact(&p[i], nodes, compacted);
276 }
277 }
278
279 /* We can only compress if there's only one child. */
280 if (valid != 1) {
281 return;
282 }
283
284 assert(valid_ptr < P_L2_SIZE);
285
286 /* Don't compress if it won't fit in the # of bits we have. */
287 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
288 return;
289 }
290
291 lp->ptr = p[valid_ptr].ptr;
292 if (!p[valid_ptr].skip) {
293 /* If our only child is a leaf, make this a leaf. */
294 /* By design, we should have made this node a leaf to begin with so we
295 * should never reach here.
296 * But since it's so simple to handle this, let's do it just in case we
297 * change this rule.
298 */
299 lp->skip = 0;
300 } else {
301 lp->skip += p[valid_ptr].skip;
302 }
303 }
304
305 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
306 {
307 DECLARE_BITMAP(compacted, nodes_nb);
308
309 if (d->phys_map.skip) {
310 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
311 }
312 }
313
314 static inline bool section_covers_addr(const MemoryRegionSection *section,
315 hwaddr addr)
316 {
317 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
318 * the section must cover the entire address space.
319 */
320 return section->size.hi ||
321 range_covers_byte(section->offset_within_address_space,
322 section->size.lo, addr);
323 }
324
325 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
326 Node *nodes, MemoryRegionSection *sections)
327 {
328 PhysPageEntry *p;
329 hwaddr index = addr >> TARGET_PAGE_BITS;
330 int i;
331
332 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
333 if (lp.ptr == PHYS_MAP_NODE_NIL) {
334 return &sections[PHYS_SECTION_UNASSIGNED];
335 }
336 p = nodes[lp.ptr];
337 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
338 }
339
340 if (section_covers_addr(&sections[lp.ptr], addr)) {
341 return &sections[lp.ptr];
342 } else {
343 return &sections[PHYS_SECTION_UNASSIGNED];
344 }
345 }
346
347 bool memory_region_is_unassigned(MemoryRegion *mr)
348 {
349 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
350 && mr != &io_mem_watch;
351 }
352
353 /* Called from RCU critical section */
354 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
355 hwaddr addr,
356 bool resolve_subpage)
357 {
358 MemoryRegionSection *section = atomic_read(&d->mru_section);
359 subpage_t *subpage;
360 bool update;
361
362 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
363 section_covers_addr(section, addr)) {
364 update = false;
365 } else {
366 section = phys_page_find(d->phys_map, addr, d->map.nodes,
367 d->map.sections);
368 update = true;
369 }
370 if (resolve_subpage && section->mr->subpage) {
371 subpage = container_of(section->mr, subpage_t, iomem);
372 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
373 }
374 if (update) {
375 atomic_set(&d->mru_section, section);
376 }
377 return section;
378 }
379
380 /* Called from RCU critical section */
381 static MemoryRegionSection *
382 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
383 hwaddr *plen, bool resolve_subpage)
384 {
385 MemoryRegionSection *section;
386 MemoryRegion *mr;
387 Int128 diff;
388
389 section = address_space_lookup_region(d, addr, resolve_subpage);
390 /* Compute offset within MemoryRegionSection */
391 addr -= section->offset_within_address_space;
392
393 /* Compute offset within MemoryRegion */
394 *xlat = addr + section->offset_within_region;
395
396 mr = section->mr;
397
398 /* MMIO registers can be expected to perform full-width accesses based only
399 * on their address, without considering adjacent registers that could
400 * decode to completely different MemoryRegions. When such registers
401 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
402 * regions overlap wildly. For this reason we cannot clamp the accesses
403 * here.
404 *
405 * If the length is small (as is the case for address_space_ldl/stl),
406 * everything works fine. If the incoming length is large, however,
407 * the caller really has to do the clamping through memory_access_size.
408 */
409 if (memory_region_is_ram(mr)) {
410 diff = int128_sub(section->size, int128_make64(addr));
411 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
412 }
413 return section;
414 }
415
416 /* Called from RCU critical section */
417 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
418 hwaddr *xlat, hwaddr *plen,
419 bool is_write)
420 {
421 IOMMUTLBEntry iotlb;
422 MemoryRegionSection *section;
423 MemoryRegion *mr;
424
425 for (;;) {
426 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
427 section = address_space_translate_internal(d, addr, &addr, plen, true);
428 mr = section->mr;
429
430 if (!mr->iommu_ops) {
431 break;
432 }
433
434 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
435 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
436 | (addr & iotlb.addr_mask));
437 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
438 if (!(iotlb.perm & (1 << is_write))) {
439 mr = &io_mem_unassigned;
440 break;
441 }
442
443 as = iotlb.target_as;
444 }
445
446 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
447 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
448 *plen = MIN(page, *plen);
449 }
450
451 *xlat = addr;
452 return mr;
453 }
454
455 /* Called from RCU critical section */
456 MemoryRegionSection *
457 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
458 hwaddr *xlat, hwaddr *plen)
459 {
460 MemoryRegionSection *section;
461 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
462
463 section = address_space_translate_internal(d, addr, xlat, plen, false);
464
465 assert(!section->mr->iommu_ops);
466 return section;
467 }
468 #endif
469
470 #if !defined(CONFIG_USER_ONLY)
471
472 static int cpu_common_post_load(void *opaque, int version_id)
473 {
474 CPUState *cpu = opaque;
475
476 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
477 version_id is increased. */
478 cpu->interrupt_request &= ~0x01;
479 tlb_flush(cpu, 1);
480
481 return 0;
482 }
483
484 static int cpu_common_pre_load(void *opaque)
485 {
486 CPUState *cpu = opaque;
487
488 cpu->exception_index = -1;
489
490 return 0;
491 }
492
493 static bool cpu_common_exception_index_needed(void *opaque)
494 {
495 CPUState *cpu = opaque;
496
497 return tcg_enabled() && cpu->exception_index != -1;
498 }
499
500 static const VMStateDescription vmstate_cpu_common_exception_index = {
501 .name = "cpu_common/exception_index",
502 .version_id = 1,
503 .minimum_version_id = 1,
504 .needed = cpu_common_exception_index_needed,
505 .fields = (VMStateField[]) {
506 VMSTATE_INT32(exception_index, CPUState),
507 VMSTATE_END_OF_LIST()
508 }
509 };
510
511 static bool cpu_common_crash_occurred_needed(void *opaque)
512 {
513 CPUState *cpu = opaque;
514
515 return cpu->crash_occurred;
516 }
517
518 static const VMStateDescription vmstate_cpu_common_crash_occurred = {
519 .name = "cpu_common/crash_occurred",
520 .version_id = 1,
521 .minimum_version_id = 1,
522 .needed = cpu_common_crash_occurred_needed,
523 .fields = (VMStateField[]) {
524 VMSTATE_BOOL(crash_occurred, CPUState),
525 VMSTATE_END_OF_LIST()
526 }
527 };
528
529 const VMStateDescription vmstate_cpu_common = {
530 .name = "cpu_common",
531 .version_id = 1,
532 .minimum_version_id = 1,
533 .pre_load = cpu_common_pre_load,
534 .post_load = cpu_common_post_load,
535 .fields = (VMStateField[]) {
536 VMSTATE_UINT32(halted, CPUState),
537 VMSTATE_UINT32(interrupt_request, CPUState),
538 VMSTATE_END_OF_LIST()
539 },
540 .subsections = (const VMStateDescription*[]) {
541 &vmstate_cpu_common_exception_index,
542 &vmstate_cpu_common_crash_occurred,
543 NULL
544 }
545 };
546
547 #endif
548
549 CPUState *qemu_get_cpu(int index)
550 {
551 CPUState *cpu;
552
553 CPU_FOREACH(cpu) {
554 if (cpu->cpu_index == index) {
555 return cpu;
556 }
557 }
558
559 return NULL;
560 }
561
562 #if !defined(CONFIG_USER_ONLY)
563 void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
564 {
565 CPUAddressSpace *newas;
566
567 /* Target code should have set num_ases before calling us */
568 assert(asidx < cpu->num_ases);
569
570 if (asidx == 0) {
571 /* address space 0 gets the convenience alias */
572 cpu->as = as;
573 }
574
575 /* KVM cannot currently support multiple address spaces. */
576 assert(asidx == 0 || !kvm_enabled());
577
578 if (!cpu->cpu_ases) {
579 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
580 }
581
582 newas = &cpu->cpu_ases[asidx];
583 newas->cpu = cpu;
584 newas->as = as;
585 if (tcg_enabled()) {
586 newas->tcg_as_listener.commit = tcg_commit;
587 memory_listener_register(&newas->tcg_as_listener, as);
588 }
589 }
590
591 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
592 {
593 /* Return the AddressSpace corresponding to the specified index */
594 return cpu->cpu_ases[asidx].as;
595 }
596 #endif
597
598 #ifndef CONFIG_USER_ONLY
599 static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
600
601 static int cpu_get_free_index(Error **errp)
602 {
603 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
604
605 if (cpu >= MAX_CPUMASK_BITS) {
606 error_setg(errp, "Trying to use more CPUs than max of %d",
607 MAX_CPUMASK_BITS);
608 return -1;
609 }
610
611 bitmap_set(cpu_index_map, cpu, 1);
612 return cpu;
613 }
614
615 void cpu_exec_exit(CPUState *cpu)
616 {
617 if (cpu->cpu_index == -1) {
618 /* cpu_index was never allocated by this @cpu or was already freed. */
619 return;
620 }
621
622 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
623 cpu->cpu_index = -1;
624 }
625 #else
626
627 static int cpu_get_free_index(Error **errp)
628 {
629 CPUState *some_cpu;
630 int cpu_index = 0;
631
632 CPU_FOREACH(some_cpu) {
633 cpu_index++;
634 }
635 return cpu_index;
636 }
637
638 void cpu_exec_exit(CPUState *cpu)
639 {
640 }
641 #endif
642
643 void cpu_exec_init(CPUState *cpu, Error **errp)
644 {
645 CPUClass *cc = CPU_GET_CLASS(cpu);
646 Error *local_err = NULL;
647
648 cpu->as = NULL;
649 cpu->num_ases = 0;
650
651 #ifndef CONFIG_USER_ONLY
652 cpu->thread_id = qemu_get_thread_id();
653
654 /* This is a softmmu CPU object, so create a property for it
655 * so users can wire up its memory. (This can't go in qom/cpu.c
656 * because that file is compiled only once for both user-mode
657 * and system builds.) The default if no link is set up is to use
658 * the system address space.
659 */
660 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
661 (Object **)&cpu->memory,
662 qdev_prop_allow_set_link_before_realize,
663 OBJ_PROP_LINK_UNREF_ON_RELEASE,
664 &error_abort);
665 cpu->memory = system_memory;
666 object_ref(OBJECT(cpu->memory));
667 #endif
668
669 #if defined(CONFIG_USER_ONLY)
670 cpu_list_lock();
671 #endif
672 cpu->cpu_index = cpu_get_free_index(&local_err);
673 if (local_err) {
674 error_propagate(errp, local_err);
675 #if defined(CONFIG_USER_ONLY)
676 cpu_list_unlock();
677 #endif
678 return;
679 }
680 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
681 #if defined(CONFIG_USER_ONLY)
682 (void) cc;
683 cpu_list_unlock();
684 #else
685 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
686 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
687 }
688 if (cc->vmsd != NULL) {
689 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
690 }
691 #endif
692 }
693
694 #if defined(CONFIG_USER_ONLY)
695 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
696 {
697 tb_invalidate_phys_page_range(pc, pc + 1, 0);
698 }
699 #else
700 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
701 {
702 MemTxAttrs attrs;
703 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
704 int asidx = cpu_asidx_from_attrs(cpu, attrs);
705 if (phys != -1) {
706 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
707 phys | (pc & ~TARGET_PAGE_MASK));
708 }
709 }
710 #endif
711
712 #if defined(CONFIG_USER_ONLY)
713 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
714
715 {
716 }
717
718 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
719 int flags)
720 {
721 return -ENOSYS;
722 }
723
724 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
725 {
726 }
727
728 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
729 int flags, CPUWatchpoint **watchpoint)
730 {
731 return -ENOSYS;
732 }
733 #else
734 /* Add a watchpoint. */
735 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
736 int flags, CPUWatchpoint **watchpoint)
737 {
738 CPUWatchpoint *wp;
739
740 /* forbid ranges which are empty or run off the end of the address space */
741 if (len == 0 || (addr + len - 1) < addr) {
742 error_report("tried to set invalid watchpoint at %"
743 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
744 return -EINVAL;
745 }
746 wp = g_malloc(sizeof(*wp));
747
748 wp->vaddr = addr;
749 wp->len = len;
750 wp->flags = flags;
751
752 /* keep all GDB-injected watchpoints in front */
753 if (flags & BP_GDB) {
754 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
755 } else {
756 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
757 }
758
759 tlb_flush_page(cpu, addr);
760
761 if (watchpoint)
762 *watchpoint = wp;
763 return 0;
764 }
765
766 /* Remove a specific watchpoint. */
767 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
768 int flags)
769 {
770 CPUWatchpoint *wp;
771
772 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
773 if (addr == wp->vaddr && len == wp->len
774 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
775 cpu_watchpoint_remove_by_ref(cpu, wp);
776 return 0;
777 }
778 }
779 return -ENOENT;
780 }
781
782 /* Remove a specific watchpoint by reference. */
783 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
784 {
785 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
786
787 tlb_flush_page(cpu, watchpoint->vaddr);
788
789 g_free(watchpoint);
790 }
791
792 /* Remove all matching watchpoints. */
793 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
794 {
795 CPUWatchpoint *wp, *next;
796
797 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
798 if (wp->flags & mask) {
799 cpu_watchpoint_remove_by_ref(cpu, wp);
800 }
801 }
802 }
803
804 /* Return true if this watchpoint address matches the specified
805 * access (ie the address range covered by the watchpoint overlaps
806 * partially or completely with the address range covered by the
807 * access).
808 */
809 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
810 vaddr addr,
811 vaddr len)
812 {
813 /* We know the lengths are non-zero, but a little caution is
814 * required to avoid errors in the case where the range ends
815 * exactly at the top of the address space and so addr + len
816 * wraps round to zero.
817 */
818 vaddr wpend = wp->vaddr + wp->len - 1;
819 vaddr addrend = addr + len - 1;
820
821 return !(addr > wpend || wp->vaddr > addrend);
822 }
823
824 #endif
825
826 /* Add a breakpoint. */
827 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
828 CPUBreakpoint **breakpoint)
829 {
830 CPUBreakpoint *bp;
831
832 bp = g_malloc(sizeof(*bp));
833
834 bp->pc = pc;
835 bp->flags = flags;
836
837 /* keep all GDB-injected breakpoints in front */
838 if (flags & BP_GDB) {
839 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
840 } else {
841 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
842 }
843
844 breakpoint_invalidate(cpu, pc);
845
846 if (breakpoint) {
847 *breakpoint = bp;
848 }
849 return 0;
850 }
851
852 /* Remove a specific breakpoint. */
853 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
854 {
855 CPUBreakpoint *bp;
856
857 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
858 if (bp->pc == pc && bp->flags == flags) {
859 cpu_breakpoint_remove_by_ref(cpu, bp);
860 return 0;
861 }
862 }
863 return -ENOENT;
864 }
865
866 /* Remove a specific breakpoint by reference. */
867 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
868 {
869 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
870
871 breakpoint_invalidate(cpu, breakpoint->pc);
872
873 g_free(breakpoint);
874 }
875
876 /* Remove all matching breakpoints. */
877 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
878 {
879 CPUBreakpoint *bp, *next;
880
881 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
882 if (bp->flags & mask) {
883 cpu_breakpoint_remove_by_ref(cpu, bp);
884 }
885 }
886 }
887
888 /* enable or disable single step mode. EXCP_DEBUG is returned by the
889 CPU loop after each instruction */
890 void cpu_single_step(CPUState *cpu, int enabled)
891 {
892 if (cpu->singlestep_enabled != enabled) {
893 cpu->singlestep_enabled = enabled;
894 if (kvm_enabled()) {
895 kvm_update_guest_debug(cpu, 0);
896 } else {
897 /* must flush all the translated code to avoid inconsistencies */
898 /* XXX: only flush what is necessary */
899 tb_flush(cpu);
900 }
901 }
902 }
903
904 void cpu_abort(CPUState *cpu, const char *fmt, ...)
905 {
906 va_list ap;
907 va_list ap2;
908
909 va_start(ap, fmt);
910 va_copy(ap2, ap);
911 fprintf(stderr, "qemu: fatal: ");
912 vfprintf(stderr, fmt, ap);
913 fprintf(stderr, "\n");
914 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
915 if (qemu_log_separate()) {
916 qemu_log("qemu: fatal: ");
917 qemu_log_vprintf(fmt, ap2);
918 qemu_log("\n");
919 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
920 qemu_log_flush();
921 qemu_log_close();
922 }
923 va_end(ap2);
924 va_end(ap);
925 replay_finish();
926 #if defined(CONFIG_USER_ONLY)
927 {
928 struct sigaction act;
929 sigfillset(&act.sa_mask);
930 act.sa_handler = SIG_DFL;
931 sigaction(SIGABRT, &act, NULL);
932 }
933 #endif
934 abort();
935 }
936
937 #if !defined(CONFIG_USER_ONLY)
938 /* Called from RCU critical section */
939 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
940 {
941 RAMBlock *block;
942
943 block = atomic_rcu_read(&ram_list.mru_block);
944 if (block && addr - block->offset < block->max_length) {
945 return block;
946 }
947 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
948 if (addr - block->offset < block->max_length) {
949 goto found;
950 }
951 }
952
953 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
954 abort();
955
956 found:
957 /* It is safe to write mru_block outside the iothread lock. This
958 * is what happens:
959 *
960 * mru_block = xxx
961 * rcu_read_unlock()
962 * xxx removed from list
963 * rcu_read_lock()
964 * read mru_block
965 * mru_block = NULL;
966 * call_rcu(reclaim_ramblock, xxx);
967 * rcu_read_unlock()
968 *
969 * atomic_rcu_set is not needed here. The block was already published
970 * when it was placed into the list. Here we're just making an extra
971 * copy of the pointer.
972 */
973 ram_list.mru_block = block;
974 return block;
975 }
976
977 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
978 {
979 CPUState *cpu;
980 ram_addr_t start1;
981 RAMBlock *block;
982 ram_addr_t end;
983
984 end = TARGET_PAGE_ALIGN(start + length);
985 start &= TARGET_PAGE_MASK;
986
987 rcu_read_lock();
988 block = qemu_get_ram_block(start);
989 assert(block == qemu_get_ram_block(end - 1));
990 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
991 CPU_FOREACH(cpu) {
992 tlb_reset_dirty(cpu, start1, length);
993 }
994 rcu_read_unlock();
995 }
996
997 /* Note: start and end must be within the same ram block. */
998 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
999 ram_addr_t length,
1000 unsigned client)
1001 {
1002 DirtyMemoryBlocks *blocks;
1003 unsigned long end, page;
1004 bool dirty = false;
1005
1006 if (length == 0) {
1007 return false;
1008 }
1009
1010 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1011 page = start >> TARGET_PAGE_BITS;
1012
1013 rcu_read_lock();
1014
1015 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1016
1017 while (page < end) {
1018 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1019 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1020 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1021
1022 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1023 offset, num);
1024 page += num;
1025 }
1026
1027 rcu_read_unlock();
1028
1029 if (dirty && tcg_enabled()) {
1030 tlb_reset_dirty_range_all(start, length);
1031 }
1032
1033 return dirty;
1034 }
1035
1036 /* Called from RCU critical section */
1037 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
1038 MemoryRegionSection *section,
1039 target_ulong vaddr,
1040 hwaddr paddr, hwaddr xlat,
1041 int prot,
1042 target_ulong *address)
1043 {
1044 hwaddr iotlb;
1045 CPUWatchpoint *wp;
1046
1047 if (memory_region_is_ram(section->mr)) {
1048 /* Normal RAM. */
1049 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1050 + xlat;
1051 if (!section->readonly) {
1052 iotlb |= PHYS_SECTION_NOTDIRTY;
1053 } else {
1054 iotlb |= PHYS_SECTION_ROM;
1055 }
1056 } else {
1057 AddressSpaceDispatch *d;
1058
1059 d = atomic_rcu_read(&section->address_space->dispatch);
1060 iotlb = section - d->map.sections;
1061 iotlb += xlat;
1062 }
1063
1064 /* Make accesses to pages with watchpoints go via the
1065 watchpoint trap routines. */
1066 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1067 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1068 /* Avoid trapping reads of pages with a write breakpoint. */
1069 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1070 iotlb = PHYS_SECTION_WATCH + paddr;
1071 *address |= TLB_MMIO;
1072 break;
1073 }
1074 }
1075 }
1076
1077 return iotlb;
1078 }
1079 #endif /* defined(CONFIG_USER_ONLY) */
1080
1081 #if !defined(CONFIG_USER_ONLY)
1082
1083 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1084 uint16_t section);
1085 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
1086
1087 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1088 qemu_anon_ram_alloc;
1089
1090 /*
1091 * Set a custom physical guest memory alloator.
1092 * Accelerators with unusual needs may need this. Hopefully, we can
1093 * get rid of it eventually.
1094 */
1095 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
1096 {
1097 phys_mem_alloc = alloc;
1098 }
1099
1100 static uint16_t phys_section_add(PhysPageMap *map,
1101 MemoryRegionSection *section)
1102 {
1103 /* The physical section number is ORed with a page-aligned
1104 * pointer to produce the iotlb entries. Thus it should
1105 * never overflow into the page-aligned value.
1106 */
1107 assert(map->sections_nb < TARGET_PAGE_SIZE);
1108
1109 if (map->sections_nb == map->sections_nb_alloc) {
1110 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1111 map->sections = g_renew(MemoryRegionSection, map->sections,
1112 map->sections_nb_alloc);
1113 }
1114 map->sections[map->sections_nb] = *section;
1115 memory_region_ref(section->mr);
1116 return map->sections_nb++;
1117 }
1118
1119 static void phys_section_destroy(MemoryRegion *mr)
1120 {
1121 bool have_sub_page = mr->subpage;
1122
1123 memory_region_unref(mr);
1124
1125 if (have_sub_page) {
1126 subpage_t *subpage = container_of(mr, subpage_t, iomem);
1127 object_unref(OBJECT(&subpage->iomem));
1128 g_free(subpage);
1129 }
1130 }
1131
1132 static void phys_sections_free(PhysPageMap *map)
1133 {
1134 while (map->sections_nb > 0) {
1135 MemoryRegionSection *section = &map->sections[--map->sections_nb];
1136 phys_section_destroy(section->mr);
1137 }
1138 g_free(map->sections);
1139 g_free(map->nodes);
1140 }
1141
1142 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
1143 {
1144 subpage_t *subpage;
1145 hwaddr base = section->offset_within_address_space
1146 & TARGET_PAGE_MASK;
1147 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
1148 d->map.nodes, d->map.sections);
1149 MemoryRegionSection subsection = {
1150 .offset_within_address_space = base,
1151 .size = int128_make64(TARGET_PAGE_SIZE),
1152 };
1153 hwaddr start, end;
1154
1155 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1156
1157 if (!(existing->mr->subpage)) {
1158 subpage = subpage_init(d->as, base);
1159 subsection.address_space = d->as;
1160 subsection.mr = &subpage->iomem;
1161 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1162 phys_section_add(&d->map, &subsection));
1163 } else {
1164 subpage = container_of(existing->mr, subpage_t, iomem);
1165 }
1166 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1167 end = start + int128_get64(section->size) - 1;
1168 subpage_register(subpage, start, end,
1169 phys_section_add(&d->map, section));
1170 }
1171
1172
1173 static void register_multipage(AddressSpaceDispatch *d,
1174 MemoryRegionSection *section)
1175 {
1176 hwaddr start_addr = section->offset_within_address_space;
1177 uint16_t section_index = phys_section_add(&d->map, section);
1178 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1179 TARGET_PAGE_BITS));
1180
1181 assert(num_pages);
1182 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1183 }
1184
1185 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
1186 {
1187 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1188 AddressSpaceDispatch *d = as->next_dispatch;
1189 MemoryRegionSection now = *section, remain = *section;
1190 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1191
1192 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1193 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1194 - now.offset_within_address_space;
1195
1196 now.size = int128_min(int128_make64(left), now.size);
1197 register_subpage(d, &now);
1198 } else {
1199 now.size = int128_zero();
1200 }
1201 while (int128_ne(remain.size, now.size)) {
1202 remain.size = int128_sub(remain.size, now.size);
1203 remain.offset_within_address_space += int128_get64(now.size);
1204 remain.offset_within_region += int128_get64(now.size);
1205 now = remain;
1206 if (int128_lt(remain.size, page_size)) {
1207 register_subpage(d, &now);
1208 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1209 now.size = page_size;
1210 register_subpage(d, &now);
1211 } else {
1212 now.size = int128_and(now.size, int128_neg(page_size));
1213 register_multipage(d, &now);
1214 }
1215 }
1216 }
1217
1218 void qemu_flush_coalesced_mmio_buffer(void)
1219 {
1220 if (kvm_enabled())
1221 kvm_flush_coalesced_mmio_buffer();
1222 }
1223
1224 void qemu_mutex_lock_ramlist(void)
1225 {
1226 qemu_mutex_lock(&ram_list.mutex);
1227 }
1228
1229 void qemu_mutex_unlock_ramlist(void)
1230 {
1231 qemu_mutex_unlock(&ram_list.mutex);
1232 }
1233
1234 #ifdef __linux__
1235 static void *file_ram_alloc(RAMBlock *block,
1236 ram_addr_t memory,
1237 const char *path,
1238 Error **errp)
1239 {
1240 bool unlink_on_error = false;
1241 char *filename;
1242 char *sanitized_name;
1243 char *c;
1244 void *area;
1245 int fd = -1;
1246 int64_t page_size;
1247
1248 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1249 error_setg(errp,
1250 "host lacks kvm mmu notifiers, -mem-path unsupported");
1251 return NULL;
1252 }
1253
1254 for (;;) {
1255 fd = open(path, O_RDWR);
1256 if (fd >= 0) {
1257 /* @path names an existing file, use it */
1258 break;
1259 }
1260 if (errno == ENOENT) {
1261 /* @path names a file that doesn't exist, create it */
1262 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1263 if (fd >= 0) {
1264 unlink_on_error = true;
1265 break;
1266 }
1267 } else if (errno == EISDIR) {
1268 /* @path names a directory, create a file there */
1269 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1270 sanitized_name = g_strdup(memory_region_name(block->mr));
1271 for (c = sanitized_name; *c != '\0'; c++) {
1272 if (*c == '/') {
1273 *c = '_';
1274 }
1275 }
1276
1277 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1278 sanitized_name);
1279 g_free(sanitized_name);
1280
1281 fd = mkstemp(filename);
1282 if (fd >= 0) {
1283 unlink(filename);
1284 g_free(filename);
1285 break;
1286 }
1287 g_free(filename);
1288 }
1289 if (errno != EEXIST && errno != EINTR) {
1290 error_setg_errno(errp, errno,
1291 "can't open backing store %s for guest RAM",
1292 path);
1293 goto error;
1294 }
1295 /*
1296 * Try again on EINTR and EEXIST. The latter happens when
1297 * something else creates the file between our two open().
1298 */
1299 }
1300
1301 page_size = qemu_fd_getpagesize(fd);
1302 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
1303
1304 if (memory < page_size) {
1305 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1306 "or larger than page size 0x%" PRIx64,
1307 memory, page_size);
1308 goto error;
1309 }
1310
1311 memory = ROUND_UP(memory, page_size);
1312
1313 /*
1314 * ftruncate is not supported by hugetlbfs in older
1315 * hosts, so don't bother bailing out on errors.
1316 * If anything goes wrong with it under other filesystems,
1317 * mmap will fail.
1318 */
1319 if (ftruncate(fd, memory)) {
1320 perror("ftruncate");
1321 }
1322
1323 area = qemu_ram_mmap(fd, memory, block->mr->align,
1324 block->flags & RAM_SHARED);
1325 if (area == MAP_FAILED) {
1326 error_setg_errno(errp, errno,
1327 "unable to map backing store for guest RAM");
1328 goto error;
1329 }
1330
1331 if (mem_prealloc) {
1332 os_mem_prealloc(fd, area, memory);
1333 }
1334
1335 block->fd = fd;
1336 return area;
1337
1338 error:
1339 if (unlink_on_error) {
1340 unlink(path);
1341 }
1342 if (fd != -1) {
1343 close(fd);
1344 }
1345 return NULL;
1346 }
1347 #endif
1348
1349 /* Called with the ramlist lock held. */
1350 static ram_addr_t find_ram_offset(ram_addr_t size)
1351 {
1352 RAMBlock *block, *next_block;
1353 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1354
1355 assert(size != 0); /* it would hand out same offset multiple times */
1356
1357 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1358 return 0;
1359 }
1360
1361 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1362 ram_addr_t end, next = RAM_ADDR_MAX;
1363
1364 end = block->offset + block->max_length;
1365
1366 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
1367 if (next_block->offset >= end) {
1368 next = MIN(next, next_block->offset);
1369 }
1370 }
1371 if (next - end >= size && next - end < mingap) {
1372 offset = end;
1373 mingap = next - end;
1374 }
1375 }
1376
1377 if (offset == RAM_ADDR_MAX) {
1378 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1379 (uint64_t)size);
1380 abort();
1381 }
1382
1383 return offset;
1384 }
1385
1386 ram_addr_t last_ram_offset(void)
1387 {
1388 RAMBlock *block;
1389 ram_addr_t last = 0;
1390
1391 rcu_read_lock();
1392 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1393 last = MAX(last, block->offset + block->max_length);
1394 }
1395 rcu_read_unlock();
1396 return last;
1397 }
1398
1399 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1400 {
1401 int ret;
1402
1403 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1404 if (!machine_dump_guest_core(current_machine)) {
1405 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1406 if (ret) {
1407 perror("qemu_madvise");
1408 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1409 "but dump_guest_core=off specified\n");
1410 }
1411 }
1412 }
1413
1414 const char *qemu_ram_get_idstr(RAMBlock *rb)
1415 {
1416 return rb->idstr;
1417 }
1418
1419 /* Called with iothread lock held. */
1420 void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
1421 {
1422 RAMBlock *block;
1423
1424 rcu_read_lock();
1425
1426 assert(new_block);
1427 assert(!new_block->idstr[0]);
1428
1429 if (dev) {
1430 char *id = qdev_get_dev_path(dev);
1431 if (id) {
1432 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1433 g_free(id);
1434 }
1435 }
1436 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1437
1438 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1439 if (block != new_block &&
1440 !strcmp(block->idstr, new_block->idstr)) {
1441 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1442 new_block->idstr);
1443 abort();
1444 }
1445 }
1446 rcu_read_unlock();
1447 }
1448
1449 /* Called with iothread lock held. */
1450 void qemu_ram_unset_idstr(RAMBlock *block)
1451 {
1452 /* FIXME: arch_init.c assumes that this is not called throughout
1453 * migration. Ignore the problem since hot-unplug during migration
1454 * does not work anyway.
1455 */
1456
1457 rcu_read_lock();
1458 if (block) {
1459 memset(block->idstr, 0, sizeof(block->idstr));
1460 }
1461 rcu_read_unlock();
1462 }
1463
1464 static int memory_try_enable_merging(void *addr, size_t len)
1465 {
1466 if (!machine_mem_merge(current_machine)) {
1467 /* disabled by the user */
1468 return 0;
1469 }
1470
1471 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1472 }
1473
1474 /* Only legal before guest might have detected the memory size: e.g. on
1475 * incoming migration, or right after reset.
1476 *
1477 * As memory core doesn't know how is memory accessed, it is up to
1478 * resize callback to update device state and/or add assertions to detect
1479 * misuse, if necessary.
1480 */
1481 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
1482 {
1483 assert(block);
1484
1485 newsize = HOST_PAGE_ALIGN(newsize);
1486
1487 if (block->used_length == newsize) {
1488 return 0;
1489 }
1490
1491 if (!(block->flags & RAM_RESIZEABLE)) {
1492 error_setg_errno(errp, EINVAL,
1493 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1494 " in != 0x" RAM_ADDR_FMT, block->idstr,
1495 newsize, block->used_length);
1496 return -EINVAL;
1497 }
1498
1499 if (block->max_length < newsize) {
1500 error_setg_errno(errp, EINVAL,
1501 "Length too large: %s: 0x" RAM_ADDR_FMT
1502 " > 0x" RAM_ADDR_FMT, block->idstr,
1503 newsize, block->max_length);
1504 return -EINVAL;
1505 }
1506
1507 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1508 block->used_length = newsize;
1509 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1510 DIRTY_CLIENTS_ALL);
1511 memory_region_set_size(block->mr, newsize);
1512 if (block->resized) {
1513 block->resized(block->idstr, newsize, block->host);
1514 }
1515 return 0;
1516 }
1517
1518 /* Called with ram_list.mutex held */
1519 static void dirty_memory_extend(ram_addr_t old_ram_size,
1520 ram_addr_t new_ram_size)
1521 {
1522 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1523 DIRTY_MEMORY_BLOCK_SIZE);
1524 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1525 DIRTY_MEMORY_BLOCK_SIZE);
1526 int i;
1527
1528 /* Only need to extend if block count increased */
1529 if (new_num_blocks <= old_num_blocks) {
1530 return;
1531 }
1532
1533 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1534 DirtyMemoryBlocks *old_blocks;
1535 DirtyMemoryBlocks *new_blocks;
1536 int j;
1537
1538 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1539 new_blocks = g_malloc(sizeof(*new_blocks) +
1540 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1541
1542 if (old_num_blocks) {
1543 memcpy(new_blocks->blocks, old_blocks->blocks,
1544 old_num_blocks * sizeof(old_blocks->blocks[0]));
1545 }
1546
1547 for (j = old_num_blocks; j < new_num_blocks; j++) {
1548 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1549 }
1550
1551 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1552
1553 if (old_blocks) {
1554 g_free_rcu(old_blocks, rcu);
1555 }
1556 }
1557 }
1558
1559 static void ram_block_add(RAMBlock *new_block, Error **errp)
1560 {
1561 RAMBlock *block;
1562 RAMBlock *last_block = NULL;
1563 ram_addr_t old_ram_size, new_ram_size;
1564 Error *err = NULL;
1565
1566 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1567
1568 qemu_mutex_lock_ramlist();
1569 new_block->offset = find_ram_offset(new_block->max_length);
1570
1571 if (!new_block->host) {
1572 if (xen_enabled()) {
1573 xen_ram_alloc(new_block->offset, new_block->max_length,
1574 new_block->mr, &err);
1575 if (err) {
1576 error_propagate(errp, err);
1577 qemu_mutex_unlock_ramlist();
1578 return;
1579 }
1580 } else {
1581 new_block->host = phys_mem_alloc(new_block->max_length,
1582 &new_block->mr->align);
1583 if (!new_block->host) {
1584 error_setg_errno(errp, errno,
1585 "cannot set up guest memory '%s'",
1586 memory_region_name(new_block->mr));
1587 qemu_mutex_unlock_ramlist();
1588 return;
1589 }
1590 memory_try_enable_merging(new_block->host, new_block->max_length);
1591 }
1592 }
1593
1594 new_ram_size = MAX(old_ram_size,
1595 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1596 if (new_ram_size > old_ram_size) {
1597 migration_bitmap_extend(old_ram_size, new_ram_size);
1598 dirty_memory_extend(old_ram_size, new_ram_size);
1599 }
1600 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1601 * QLIST (which has an RCU-friendly variant) does not have insertion at
1602 * tail, so save the last element in last_block.
1603 */
1604 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1605 last_block = block;
1606 if (block->max_length < new_block->max_length) {
1607 break;
1608 }
1609 }
1610 if (block) {
1611 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1612 } else if (last_block) {
1613 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1614 } else { /* list is empty */
1615 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1616 }
1617 ram_list.mru_block = NULL;
1618
1619 /* Write list before version */
1620 smp_wmb();
1621 ram_list.version++;
1622 qemu_mutex_unlock_ramlist();
1623
1624 cpu_physical_memory_set_dirty_range(new_block->offset,
1625 new_block->used_length,
1626 DIRTY_CLIENTS_ALL);
1627
1628 if (new_block->host) {
1629 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1630 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1631 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1632 if (kvm_enabled()) {
1633 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1634 }
1635 }
1636 }
1637
1638 #ifdef __linux__
1639 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1640 bool share, const char *mem_path,
1641 Error **errp)
1642 {
1643 RAMBlock *new_block;
1644 Error *local_err = NULL;
1645
1646 if (xen_enabled()) {
1647 error_setg(errp, "-mem-path not supported with Xen");
1648 return NULL;
1649 }
1650
1651 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1652 /*
1653 * file_ram_alloc() needs to allocate just like
1654 * phys_mem_alloc, but we haven't bothered to provide
1655 * a hook there.
1656 */
1657 error_setg(errp,
1658 "-mem-path not supported with this accelerator");
1659 return NULL;
1660 }
1661
1662 size = HOST_PAGE_ALIGN(size);
1663 new_block = g_malloc0(sizeof(*new_block));
1664 new_block->mr = mr;
1665 new_block->used_length = size;
1666 new_block->max_length = size;
1667 new_block->flags = share ? RAM_SHARED : 0;
1668 new_block->host = file_ram_alloc(new_block, size,
1669 mem_path, errp);
1670 if (!new_block->host) {
1671 g_free(new_block);
1672 return NULL;
1673 }
1674
1675 ram_block_add(new_block, &local_err);
1676 if (local_err) {
1677 g_free(new_block);
1678 error_propagate(errp, local_err);
1679 return NULL;
1680 }
1681 return new_block;
1682 }
1683 #endif
1684
1685 static
1686 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1687 void (*resized)(const char*,
1688 uint64_t length,
1689 void *host),
1690 void *host, bool resizeable,
1691 MemoryRegion *mr, Error **errp)
1692 {
1693 RAMBlock *new_block;
1694 Error *local_err = NULL;
1695
1696 size = HOST_PAGE_ALIGN(size);
1697 max_size = HOST_PAGE_ALIGN(max_size);
1698 new_block = g_malloc0(sizeof(*new_block));
1699 new_block->mr = mr;
1700 new_block->resized = resized;
1701 new_block->used_length = size;
1702 new_block->max_length = max_size;
1703 assert(max_size >= size);
1704 new_block->fd = -1;
1705 new_block->host = host;
1706 if (host) {
1707 new_block->flags |= RAM_PREALLOC;
1708 }
1709 if (resizeable) {
1710 new_block->flags |= RAM_RESIZEABLE;
1711 }
1712 ram_block_add(new_block, &local_err);
1713 if (local_err) {
1714 g_free(new_block);
1715 error_propagate(errp, local_err);
1716 return NULL;
1717 }
1718 return new_block;
1719 }
1720
1721 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1722 MemoryRegion *mr, Error **errp)
1723 {
1724 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1725 }
1726
1727 RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
1728 {
1729 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1730 }
1731
1732 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1733 void (*resized)(const char*,
1734 uint64_t length,
1735 void *host),
1736 MemoryRegion *mr, Error **errp)
1737 {
1738 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
1739 }
1740
1741 static void reclaim_ramblock(RAMBlock *block)
1742 {
1743 if (block->flags & RAM_PREALLOC) {
1744 ;
1745 } else if (xen_enabled()) {
1746 xen_invalidate_map_cache_entry(block->host);
1747 #ifndef _WIN32
1748 } else if (block->fd >= 0) {
1749 qemu_ram_munmap(block->host, block->max_length);
1750 close(block->fd);
1751 #endif
1752 } else {
1753 qemu_anon_ram_free(block->host, block->max_length);
1754 }
1755 g_free(block);
1756 }
1757
1758 void qemu_ram_free(RAMBlock *block)
1759 {
1760 if (!block) {
1761 return;
1762 }
1763
1764 qemu_mutex_lock_ramlist();
1765 QLIST_REMOVE_RCU(block, next);
1766 ram_list.mru_block = NULL;
1767 /* Write list before version */
1768 smp_wmb();
1769 ram_list.version++;
1770 call_rcu(block, reclaim_ramblock, rcu);
1771 qemu_mutex_unlock_ramlist();
1772 }
1773
1774 #ifndef _WIN32
1775 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1776 {
1777 RAMBlock *block;
1778 ram_addr_t offset;
1779 int flags;
1780 void *area, *vaddr;
1781
1782 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1783 offset = addr - block->offset;
1784 if (offset < block->max_length) {
1785 vaddr = ramblock_ptr(block, offset);
1786 if (block->flags & RAM_PREALLOC) {
1787 ;
1788 } else if (xen_enabled()) {
1789 abort();
1790 } else {
1791 flags = MAP_FIXED;
1792 if (block->fd >= 0) {
1793 flags |= (block->flags & RAM_SHARED ?
1794 MAP_SHARED : MAP_PRIVATE);
1795 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1796 flags, block->fd, offset);
1797 } else {
1798 /*
1799 * Remap needs to match alloc. Accelerators that
1800 * set phys_mem_alloc never remap. If they did,
1801 * we'd need a remap hook here.
1802 */
1803 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1804
1805 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1806 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1807 flags, -1, 0);
1808 }
1809 if (area != vaddr) {
1810 fprintf(stderr, "Could not remap addr: "
1811 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1812 length, addr);
1813 exit(1);
1814 }
1815 memory_try_enable_merging(vaddr, length);
1816 qemu_ram_setup_dump(vaddr, length);
1817 }
1818 }
1819 }
1820 }
1821 #endif /* !_WIN32 */
1822
1823 int qemu_get_ram_fd(ram_addr_t addr)
1824 {
1825 RAMBlock *block;
1826 int fd;
1827
1828 rcu_read_lock();
1829 block = qemu_get_ram_block(addr);
1830 fd = block->fd;
1831 rcu_read_unlock();
1832 return fd;
1833 }
1834
1835 void qemu_set_ram_fd(ram_addr_t addr, int fd)
1836 {
1837 RAMBlock *block;
1838
1839 rcu_read_lock();
1840 block = qemu_get_ram_block(addr);
1841 block->fd = fd;
1842 rcu_read_unlock();
1843 }
1844
1845 void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1846 {
1847 RAMBlock *block;
1848 void *ptr;
1849
1850 rcu_read_lock();
1851 block = qemu_get_ram_block(addr);
1852 ptr = ramblock_ptr(block, 0);
1853 rcu_read_unlock();
1854 return ptr;
1855 }
1856
1857 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1858 * This should not be used for general purpose DMA. Use address_space_map
1859 * or address_space_rw instead. For local memory (e.g. video ram) that the
1860 * device owns, use memory_region_get_ram_ptr.
1861 *
1862 * Called within RCU critical section.
1863 */
1864 void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
1865 {
1866 RAMBlock *block = ram_block;
1867
1868 if (block == NULL) {
1869 block = qemu_get_ram_block(addr);
1870 }
1871
1872 if (xen_enabled() && block->host == NULL) {
1873 /* We need to check if the requested address is in the RAM
1874 * because we don't want to map the entire memory in QEMU.
1875 * In that case just map until the end of the page.
1876 */
1877 if (block->offset == 0) {
1878 return xen_map_cache(addr, 0, 0);
1879 }
1880
1881 block->host = xen_map_cache(block->offset, block->max_length, 1);
1882 }
1883 return ramblock_ptr(block, addr - block->offset);
1884 }
1885
1886 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1887 * but takes a size argument.
1888 *
1889 * Called within RCU critical section.
1890 */
1891 static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1892 hwaddr *size)
1893 {
1894 RAMBlock *block = ram_block;
1895 ram_addr_t offset_inside_block;
1896 if (*size == 0) {
1897 return NULL;
1898 }
1899
1900 if (block == NULL) {
1901 block = qemu_get_ram_block(addr);
1902 }
1903 offset_inside_block = addr - block->offset;
1904 *size = MIN(*size, block->max_length - offset_inside_block);
1905
1906 if (xen_enabled() && block->host == NULL) {
1907 /* We need to check if the requested address is in the RAM
1908 * because we don't want to map the entire memory in QEMU.
1909 * In that case just map the requested area.
1910 */
1911 if (block->offset == 0) {
1912 return xen_map_cache(addr, *size, 1);
1913 }
1914
1915 block->host = xen_map_cache(block->offset, block->max_length, 1);
1916 }
1917
1918 return ramblock_ptr(block, offset_inside_block);
1919 }
1920
1921 /*
1922 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1923 * in that RAMBlock.
1924 *
1925 * ptr: Host pointer to look up
1926 * round_offset: If true round the result offset down to a page boundary
1927 * *ram_addr: set to result ram_addr
1928 * *offset: set to result offset within the RAMBlock
1929 *
1930 * Returns: RAMBlock (or NULL if not found)
1931 *
1932 * By the time this function returns, the returned pointer is not protected
1933 * by RCU anymore. If the caller is not within an RCU critical section and
1934 * does not hold the iothread lock, it must have other means of protecting the
1935 * pointer, such as a reference to the region that includes the incoming
1936 * ram_addr_t.
1937 */
1938 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1939 ram_addr_t *ram_addr,
1940 ram_addr_t *offset)
1941 {
1942 RAMBlock *block;
1943 uint8_t *host = ptr;
1944
1945 if (xen_enabled()) {
1946 rcu_read_lock();
1947 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1948 block = qemu_get_ram_block(*ram_addr);
1949 if (block) {
1950 *offset = (host - block->host);
1951 }
1952 rcu_read_unlock();
1953 return block;
1954 }
1955
1956 rcu_read_lock();
1957 block = atomic_rcu_read(&ram_list.mru_block);
1958 if (block && block->host && host - block->host < block->max_length) {
1959 goto found;
1960 }
1961
1962 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1963 /* This case append when the block is not mapped. */
1964 if (block->host == NULL) {
1965 continue;
1966 }
1967 if (host - block->host < block->max_length) {
1968 goto found;
1969 }
1970 }
1971
1972 rcu_read_unlock();
1973 return NULL;
1974
1975 found:
1976 *offset = (host - block->host);
1977 if (round_offset) {
1978 *offset &= TARGET_PAGE_MASK;
1979 }
1980 *ram_addr = block->offset + *offset;
1981 rcu_read_unlock();
1982 return block;
1983 }
1984
1985 /*
1986 * Finds the named RAMBlock
1987 *
1988 * name: The name of RAMBlock to find
1989 *
1990 * Returns: RAMBlock (or NULL if not found)
1991 */
1992 RAMBlock *qemu_ram_block_by_name(const char *name)
1993 {
1994 RAMBlock *block;
1995
1996 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1997 if (!strcmp(name, block->idstr)) {
1998 return block;
1999 }
2000 }
2001
2002 return NULL;
2003 }
2004
2005 /* Some of the softmmu routines need to translate from a host pointer
2006 (typically a TLB entry) back to a ram offset. */
2007 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2008 {
2009 RAMBlock *block;
2010 ram_addr_t offset; /* Not used */
2011
2012 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
2013
2014 if (!block) {
2015 return NULL;
2016 }
2017
2018 return block->mr;
2019 }
2020
2021 /* Called within RCU critical section. */
2022 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
2023 uint64_t val, unsigned size)
2024 {
2025 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
2026 tb_invalidate_phys_page_fast(ram_addr, size);
2027 }
2028 switch (size) {
2029 case 1:
2030 stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
2031 break;
2032 case 2:
2033 stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
2034 break;
2035 case 4:
2036 stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
2037 break;
2038 default:
2039 abort();
2040 }
2041 /* Set both VGA and migration bits for simplicity and to remove
2042 * the notdirty callback faster.
2043 */
2044 cpu_physical_memory_set_dirty_range(ram_addr, size,
2045 DIRTY_CLIENTS_NOCODE);
2046 /* we remove the notdirty callback only if the code has been
2047 flushed */
2048 if (!cpu_physical_memory_is_clean(ram_addr)) {
2049 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
2050 }
2051 }
2052
2053 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2054 unsigned size, bool is_write)
2055 {
2056 return is_write;
2057 }
2058
2059 static const MemoryRegionOps notdirty_mem_ops = {
2060 .write = notdirty_mem_write,
2061 .valid.accepts = notdirty_mem_accepts,
2062 .endianness = DEVICE_NATIVE_ENDIAN,
2063 };
2064
2065 /* Generate a debug exception if a watchpoint has been hit. */
2066 static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
2067 {
2068 CPUState *cpu = current_cpu;
2069 CPUClass *cc = CPU_GET_CLASS(cpu);
2070 CPUArchState *env = cpu->env_ptr;
2071 target_ulong pc, cs_base;
2072 target_ulong vaddr;
2073 CPUWatchpoint *wp;
2074 uint32_t cpu_flags;
2075
2076 if (cpu->watchpoint_hit) {
2077 /* We re-entered the check after replacing the TB. Now raise
2078 * the debug interrupt so that is will trigger after the
2079 * current instruction. */
2080 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
2081 return;
2082 }
2083 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2084 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
2085 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2086 && (wp->flags & flags)) {
2087 if (flags == BP_MEM_READ) {
2088 wp->flags |= BP_WATCHPOINT_HIT_READ;
2089 } else {
2090 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2091 }
2092 wp->hitaddr = vaddr;
2093 wp->hitattrs = attrs;
2094 if (!cpu->watchpoint_hit) {
2095 if (wp->flags & BP_CPU &&
2096 !cc->debug_check_watchpoint(cpu, wp)) {
2097 wp->flags &= ~BP_WATCHPOINT_HIT;
2098 continue;
2099 }
2100 cpu->watchpoint_hit = wp;
2101 tb_check_watchpoint(cpu);
2102 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2103 cpu->exception_index = EXCP_DEBUG;
2104 cpu_loop_exit(cpu);
2105 } else {
2106 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2107 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
2108 cpu_resume_from_signal(cpu, NULL);
2109 }
2110 }
2111 } else {
2112 wp->flags &= ~BP_WATCHPOINT_HIT;
2113 }
2114 }
2115 }
2116
2117 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2118 so these check for a hit then pass through to the normal out-of-line
2119 phys routines. */
2120 static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2121 unsigned size, MemTxAttrs attrs)
2122 {
2123 MemTxResult res;
2124 uint64_t data;
2125 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2126 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2127
2128 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
2129 switch (size) {
2130 case 1:
2131 data = address_space_ldub(as, addr, attrs, &res);
2132 break;
2133 case 2:
2134 data = address_space_lduw(as, addr, attrs, &res);
2135 break;
2136 case 4:
2137 data = address_space_ldl(as, addr, attrs, &res);
2138 break;
2139 default: abort();
2140 }
2141 *pdata = data;
2142 return res;
2143 }
2144
2145 static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2146 uint64_t val, unsigned size,
2147 MemTxAttrs attrs)
2148 {
2149 MemTxResult res;
2150 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2151 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2152
2153 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2154 switch (size) {
2155 case 1:
2156 address_space_stb(as, addr, val, attrs, &res);
2157 break;
2158 case 2:
2159 address_space_stw(as, addr, val, attrs, &res);
2160 break;
2161 case 4:
2162 address_space_stl(as, addr, val, attrs, &res);
2163 break;
2164 default: abort();
2165 }
2166 return res;
2167 }
2168
2169 static const MemoryRegionOps watch_mem_ops = {
2170 .read_with_attrs = watch_mem_read,
2171 .write_with_attrs = watch_mem_write,
2172 .endianness = DEVICE_NATIVE_ENDIAN,
2173 };
2174
2175 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2176 unsigned len, MemTxAttrs attrs)
2177 {
2178 subpage_t *subpage = opaque;
2179 uint8_t buf[8];
2180 MemTxResult res;
2181
2182 #if defined(DEBUG_SUBPAGE)
2183 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2184 subpage, len, addr);
2185 #endif
2186 res = address_space_read(subpage->as, addr + subpage->base,
2187 attrs, buf, len);
2188 if (res) {
2189 return res;
2190 }
2191 switch (len) {
2192 case 1:
2193 *data = ldub_p(buf);
2194 return MEMTX_OK;
2195 case 2:
2196 *data = lduw_p(buf);
2197 return MEMTX_OK;
2198 case 4:
2199 *data = ldl_p(buf);
2200 return MEMTX_OK;
2201 case 8:
2202 *data = ldq_p(buf);
2203 return MEMTX_OK;
2204 default:
2205 abort();
2206 }
2207 }
2208
2209 static MemTxResult subpage_write(void *opaque, hwaddr addr,
2210 uint64_t value, unsigned len, MemTxAttrs attrs)
2211 {
2212 subpage_t *subpage = opaque;
2213 uint8_t buf[8];
2214
2215 #if defined(DEBUG_SUBPAGE)
2216 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2217 " value %"PRIx64"\n",
2218 __func__, subpage, len, addr, value);
2219 #endif
2220 switch (len) {
2221 case 1:
2222 stb_p(buf, value);
2223 break;
2224 case 2:
2225 stw_p(buf, value);
2226 break;
2227 case 4:
2228 stl_p(buf, value);
2229 break;
2230 case 8:
2231 stq_p(buf, value);
2232 break;
2233 default:
2234 abort();
2235 }
2236 return address_space_write(subpage->as, addr + subpage->base,
2237 attrs, buf, len);
2238 }
2239
2240 static bool subpage_accepts(void *opaque, hwaddr addr,
2241 unsigned len, bool is_write)
2242 {
2243 subpage_t *subpage = opaque;
2244 #if defined(DEBUG_SUBPAGE)
2245 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2246 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2247 #endif
2248
2249 return address_space_access_valid(subpage->as, addr + subpage->base,
2250 len, is_write);
2251 }
2252
2253 static const MemoryRegionOps subpage_ops = {
2254 .read_with_attrs = subpage_read,
2255 .write_with_attrs = subpage_write,
2256 .impl.min_access_size = 1,
2257 .impl.max_access_size = 8,
2258 .valid.min_access_size = 1,
2259 .valid.max_access_size = 8,
2260 .valid.accepts = subpage_accepts,
2261 .endianness = DEVICE_NATIVE_ENDIAN,
2262 };
2263
2264 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2265 uint16_t section)
2266 {
2267 int idx, eidx;
2268
2269 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2270 return -1;
2271 idx = SUBPAGE_IDX(start);
2272 eidx = SUBPAGE_IDX(end);
2273 #if defined(DEBUG_SUBPAGE)
2274 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2275 __func__, mmio, start, end, idx, eidx, section);
2276 #endif
2277 for (; idx <= eidx; idx++) {
2278 mmio->sub_section[idx] = section;
2279 }
2280
2281 return 0;
2282 }
2283
2284 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2285 {
2286 subpage_t *mmio;
2287
2288 mmio = g_malloc0(sizeof(subpage_t));
2289
2290 mmio->as = as;
2291 mmio->base = base;
2292 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2293 NULL, TARGET_PAGE_SIZE);
2294 mmio->iomem.subpage = true;
2295 #if defined(DEBUG_SUBPAGE)
2296 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2297 mmio, base, TARGET_PAGE_SIZE);
2298 #endif
2299 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2300
2301 return mmio;
2302 }
2303
2304 static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2305 MemoryRegion *mr)
2306 {
2307 assert(as);
2308 MemoryRegionSection section = {
2309 .address_space = as,
2310 .mr = mr,
2311 .offset_within_address_space = 0,
2312 .offset_within_region = 0,
2313 .size = int128_2_64(),
2314 };
2315
2316 return phys_section_add(map, &section);
2317 }
2318
2319 MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2320 {
2321 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2322 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
2323 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
2324 MemoryRegionSection *sections = d->map.sections;
2325
2326 return sections[index & ~TARGET_PAGE_MASK].mr;
2327 }
2328
2329 static void io_mem_init(void)
2330 {
2331 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2332 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2333 NULL, UINT64_MAX);
2334 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
2335 NULL, UINT64_MAX);
2336 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2337 NULL, UINT64_MAX);
2338 }
2339
2340 static void mem_begin(MemoryListener *listener)
2341 {
2342 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2343 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2344 uint16_t n;
2345
2346 n = dummy_section(&d->map, as, &io_mem_unassigned);
2347 assert(n == PHYS_SECTION_UNASSIGNED);
2348 n = dummy_section(&d->map, as, &io_mem_notdirty);
2349 assert(n == PHYS_SECTION_NOTDIRTY);
2350 n = dummy_section(&d->map, as, &io_mem_rom);
2351 assert(n == PHYS_SECTION_ROM);
2352 n = dummy_section(&d->map, as, &io_mem_watch);
2353 assert(n == PHYS_SECTION_WATCH);
2354
2355 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2356 d->as = as;
2357 as->next_dispatch = d;
2358 }
2359
2360 static void address_space_dispatch_free(AddressSpaceDispatch *d)
2361 {
2362 phys_sections_free(&d->map);
2363 g_free(d);
2364 }
2365
2366 static void mem_commit(MemoryListener *listener)
2367 {
2368 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2369 AddressSpaceDispatch *cur = as->dispatch;
2370 AddressSpaceDispatch *next = as->next_dispatch;
2371
2372 phys_page_compact_all(next, next->map.nodes_nb);
2373
2374 atomic_rcu_set(&as->dispatch, next);
2375 if (cur) {
2376 call_rcu(cur, address_space_dispatch_free, rcu);
2377 }
2378 }
2379
2380 static void tcg_commit(MemoryListener *listener)
2381 {
2382 CPUAddressSpace *cpuas;
2383 AddressSpaceDispatch *d;
2384
2385 /* since each CPU stores ram addresses in its TLB cache, we must
2386 reset the modified entries */
2387 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2388 cpu_reloading_memory_map();
2389 /* The CPU and TLB are protected by the iothread lock.
2390 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2391 * may have split the RCU critical section.
2392 */
2393 d = atomic_rcu_read(&cpuas->as->dispatch);
2394 cpuas->memory_dispatch = d;
2395 tlb_flush(cpuas->cpu, 1);
2396 }
2397
2398 void address_space_init_dispatch(AddressSpace *as)
2399 {
2400 as->dispatch = NULL;
2401 as->dispatch_listener = (MemoryListener) {
2402 .begin = mem_begin,
2403 .commit = mem_commit,
2404 .region_add = mem_add,
2405 .region_nop = mem_add,
2406 .priority = 0,
2407 };
2408 memory_listener_register(&as->dispatch_listener, as);
2409 }
2410
2411 void address_space_unregister(AddressSpace *as)
2412 {
2413 memory_listener_unregister(&as->dispatch_listener);
2414 }
2415
2416 void address_space_destroy_dispatch(AddressSpace *as)
2417 {
2418 AddressSpaceDispatch *d = as->dispatch;
2419
2420 atomic_rcu_set(&as->dispatch, NULL);
2421 if (d) {
2422 call_rcu(d, address_space_dispatch_free, rcu);
2423 }
2424 }
2425
2426 static void memory_map_init(void)
2427 {
2428 system_memory = g_malloc(sizeof(*system_memory));
2429
2430 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2431 address_space_init(&address_space_memory, system_memory, "memory");
2432
2433 system_io = g_malloc(sizeof(*system_io));
2434 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2435 65536);
2436 address_space_init(&address_space_io, system_io, "I/O");
2437 }
2438
2439 MemoryRegion *get_system_memory(void)
2440 {
2441 return system_memory;
2442 }
2443
2444 MemoryRegion *get_system_io(void)
2445 {
2446 return system_io;
2447 }
2448
2449 #endif /* !defined(CONFIG_USER_ONLY) */
2450
2451 /* physical memory access (slow version, mainly for debug) */
2452 #if defined(CONFIG_USER_ONLY)
2453 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2454 uint8_t *buf, int len, int is_write)
2455 {
2456 int l, flags;
2457 target_ulong page;
2458 void * p;
2459
2460 while (len > 0) {
2461 page = addr & TARGET_PAGE_MASK;
2462 l = (page + TARGET_PAGE_SIZE) - addr;
2463 if (l > len)
2464 l = len;
2465 flags = page_get_flags(page);
2466 if (!(flags & PAGE_VALID))
2467 return -1;
2468 if (is_write) {
2469 if (!(flags & PAGE_WRITE))
2470 return -1;
2471 /* XXX: this code should not depend on lock_user */
2472 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2473 return -1;
2474 memcpy(p, buf, l);
2475 unlock_user(p, addr, l);
2476 } else {
2477 if (!(flags & PAGE_READ))
2478 return -1;
2479 /* XXX: this code should not depend on lock_user */
2480 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2481 return -1;
2482 memcpy(buf, p, l);
2483 unlock_user(p, addr, 0);
2484 }
2485 len -= l;
2486 buf += l;
2487 addr += l;
2488 }
2489 return 0;
2490 }
2491
2492 #else
2493
2494 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2495 hwaddr length)
2496 {
2497 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2498 /* No early return if dirty_log_mask is or becomes 0, because
2499 * cpu_physical_memory_set_dirty_range will still call
2500 * xen_modified_memory.
2501 */
2502 if (dirty_log_mask) {
2503 dirty_log_mask =
2504 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2505 }
2506 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2507 tb_invalidate_phys_range(addr, addr + length);
2508 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2509 }
2510 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
2511 }
2512
2513 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2514 {
2515 unsigned access_size_max = mr->ops->valid.max_access_size;
2516
2517 /* Regions are assumed to support 1-4 byte accesses unless
2518 otherwise specified. */
2519 if (access_size_max == 0) {
2520 access_size_max = 4;
2521 }
2522
2523 /* Bound the maximum access by the alignment of the address. */
2524 if (!mr->ops->impl.unaligned) {
2525 unsigned align_size_max = addr & -addr;
2526 if (align_size_max != 0 && align_size_max < access_size_max) {
2527 access_size_max = align_size_max;
2528 }
2529 }
2530
2531 /* Don't attempt accesses larger than the maximum. */
2532 if (l > access_size_max) {
2533 l = access_size_max;
2534 }
2535 l = pow2floor(l);
2536
2537 return l;
2538 }
2539
2540 static bool prepare_mmio_access(MemoryRegion *mr)
2541 {
2542 bool unlocked = !qemu_mutex_iothread_locked();
2543 bool release_lock = false;
2544
2545 if (unlocked && mr->global_locking) {
2546 qemu_mutex_lock_iothread();
2547 unlocked = false;
2548 release_lock = true;
2549 }
2550 if (mr->flush_coalesced_mmio) {
2551 if (unlocked) {
2552 qemu_mutex_lock_iothread();
2553 }
2554 qemu_flush_coalesced_mmio_buffer();
2555 if (unlocked) {
2556 qemu_mutex_unlock_iothread();
2557 }
2558 }
2559
2560 return release_lock;
2561 }
2562
2563 /* Called within RCU critical section. */
2564 static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2565 MemTxAttrs attrs,
2566 const uint8_t *buf,
2567 int len, hwaddr addr1,
2568 hwaddr l, MemoryRegion *mr)
2569 {
2570 uint8_t *ptr;
2571 uint64_t val;
2572 MemTxResult result = MEMTX_OK;
2573 bool release_lock = false;
2574
2575 for (;;) {
2576 if (!memory_access_is_direct(mr, true)) {
2577 release_lock |= prepare_mmio_access(mr);
2578 l = memory_access_size(mr, l, addr1);
2579 /* XXX: could force current_cpu to NULL to avoid
2580 potential bugs */
2581 switch (l) {
2582 case 8:
2583 /* 64 bit write access */
2584 val = ldq_p(buf);
2585 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2586 attrs);
2587 break;
2588 case 4:
2589 /* 32 bit write access */
2590 val = ldl_p(buf);
2591 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2592 attrs);
2593 break;
2594 case 2:
2595 /* 16 bit write access */
2596 val = lduw_p(buf);
2597 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2598 attrs);
2599 break;
2600 case 1:
2601 /* 8 bit write access */
2602 val = ldub_p(buf);
2603 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2604 attrs);
2605 break;
2606 default:
2607 abort();
2608 }
2609 } else {
2610 addr1 += memory_region_get_ram_addr(mr);
2611 /* RAM case */
2612 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
2613 memcpy(ptr, buf, l);
2614 invalidate_and_set_dirty(mr, addr1, l);
2615 }
2616
2617 if (release_lock) {
2618 qemu_mutex_unlock_iothread();
2619 release_lock = false;
2620 }
2621
2622 len -= l;
2623 buf += l;
2624 addr += l;
2625
2626 if (!len) {
2627 break;
2628 }
2629
2630 l = len;
2631 mr = address_space_translate(as, addr, &addr1, &l, true);
2632 }
2633
2634 return result;
2635 }
2636
2637 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2638 const uint8_t *buf, int len)
2639 {
2640 hwaddr l;
2641 hwaddr addr1;
2642 MemoryRegion *mr;
2643 MemTxResult result = MEMTX_OK;
2644
2645 if (len > 0) {
2646 rcu_read_lock();
2647 l = len;
2648 mr = address_space_translate(as, addr, &addr1, &l, true);
2649 result = address_space_write_continue(as, addr, attrs, buf, len,
2650 addr1, l, mr);
2651 rcu_read_unlock();
2652 }
2653
2654 return result;
2655 }
2656
2657 /* Called within RCU critical section. */
2658 MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2659 MemTxAttrs attrs, uint8_t *buf,
2660 int len, hwaddr addr1, hwaddr l,
2661 MemoryRegion *mr)
2662 {
2663 uint8_t *ptr;
2664 uint64_t val;
2665 MemTxResult result = MEMTX_OK;
2666 bool release_lock = false;
2667
2668 for (;;) {
2669 if (!memory_access_is_direct(mr, false)) {
2670 /* I/O case */
2671 release_lock |= prepare_mmio_access(mr);
2672 l = memory_access_size(mr, l, addr1);
2673 switch (l) {
2674 case 8:
2675 /* 64 bit read access */
2676 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2677 attrs);
2678 stq_p(buf, val);
2679 break;
2680 case 4:
2681 /* 32 bit read access */
2682 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2683 attrs);
2684 stl_p(buf, val);
2685 break;
2686 case 2:
2687 /* 16 bit read access */
2688 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2689 attrs);
2690 stw_p(buf, val);
2691 break;
2692 case 1:
2693 /* 8 bit read access */
2694 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2695 attrs);
2696 stb_p(buf, val);
2697 break;
2698 default:
2699 abort();
2700 }
2701 } else {
2702 /* RAM case */
2703 ptr = qemu_get_ram_ptr(mr->ram_block,
2704 memory_region_get_ram_addr(mr) + addr1);
2705 memcpy(buf, ptr, l);
2706 }
2707
2708 if (release_lock) {
2709 qemu_mutex_unlock_iothread();
2710 release_lock = false;
2711 }
2712
2713 len -= l;
2714 buf += l;
2715 addr += l;
2716
2717 if (!len) {
2718 break;
2719 }
2720
2721 l = len;
2722 mr = address_space_translate(as, addr, &addr1, &l, false);
2723 }
2724
2725 return result;
2726 }
2727
2728 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2729 MemTxAttrs attrs, uint8_t *buf, int len)
2730 {
2731 hwaddr l;
2732 hwaddr addr1;
2733 MemoryRegion *mr;
2734 MemTxResult result = MEMTX_OK;
2735
2736 if (len > 0) {
2737 rcu_read_lock();
2738 l = len;
2739 mr = address_space_translate(as, addr, &addr1, &l, false);
2740 result = address_space_read_continue(as, addr, attrs, buf, len,
2741 addr1, l, mr);
2742 rcu_read_unlock();
2743 }
2744
2745 return result;
2746 }
2747
2748 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2749 uint8_t *buf, int len, bool is_write)
2750 {
2751 if (is_write) {
2752 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2753 } else {
2754 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2755 }
2756 }
2757
2758 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2759 int len, int is_write)
2760 {
2761 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2762 buf, len, is_write);
2763 }
2764
2765 enum write_rom_type {
2766 WRITE_DATA,
2767 FLUSH_CACHE,
2768 };
2769
2770 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2771 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2772 {
2773 hwaddr l;
2774 uint8_t *ptr;
2775 hwaddr addr1;
2776 MemoryRegion *mr;
2777
2778 rcu_read_lock();
2779 while (len > 0) {
2780 l = len;
2781 mr = address_space_translate(as, addr, &addr1, &l, true);
2782
2783 if (!(memory_region_is_ram(mr) ||
2784 memory_region_is_romd(mr))) {
2785 l = memory_access_size(mr, l, addr1);
2786 } else {
2787 addr1 += memory_region_get_ram_addr(mr);
2788 /* ROM/RAM case */
2789 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
2790 switch (type) {
2791 case WRITE_DATA:
2792 memcpy(ptr, buf, l);
2793 invalidate_and_set_dirty(mr, addr1, l);
2794 break;
2795 case FLUSH_CACHE:
2796 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2797 break;
2798 }
2799 }
2800 len -= l;
2801 buf += l;
2802 addr += l;
2803 }
2804 rcu_read_unlock();
2805 }
2806
2807 /* used for ROM loading : can write in RAM and ROM */
2808 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2809 const uint8_t *buf, int len)
2810 {
2811 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2812 }
2813
2814 void cpu_flush_icache_range(hwaddr start, int len)
2815 {
2816 /*
2817 * This function should do the same thing as an icache flush that was
2818 * triggered from within the guest. For TCG we are always cache coherent,
2819 * so there is no need to flush anything. For KVM / Xen we need to flush
2820 * the host's instruction cache at least.
2821 */
2822 if (tcg_enabled()) {
2823 return;
2824 }
2825
2826 cpu_physical_memory_write_rom_internal(&address_space_memory,
2827 start, NULL, len, FLUSH_CACHE);
2828 }
2829
2830 typedef struct {
2831 MemoryRegion *mr;
2832 void *buffer;
2833 hwaddr addr;
2834 hwaddr len;
2835 bool in_use;
2836 } BounceBuffer;
2837
2838 static BounceBuffer bounce;
2839
2840 typedef struct MapClient {
2841 QEMUBH *bh;
2842 QLIST_ENTRY(MapClient) link;
2843 } MapClient;
2844
2845 QemuMutex map_client_list_lock;
2846 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2847 = QLIST_HEAD_INITIALIZER(map_client_list);
2848
2849 static void cpu_unregister_map_client_do(MapClient *client)
2850 {
2851 QLIST_REMOVE(client, link);
2852 g_free(client);
2853 }
2854
2855 static void cpu_notify_map_clients_locked(void)
2856 {
2857 MapClient *client;
2858
2859 while (!QLIST_EMPTY(&map_client_list)) {
2860 client = QLIST_FIRST(&map_client_list);
2861 qemu_bh_schedule(client->bh);
2862 cpu_unregister_map_client_do(client);
2863 }
2864 }
2865
2866 void cpu_register_map_client(QEMUBH *bh)
2867 {
2868 MapClient *client = g_malloc(sizeof(*client));
2869
2870 qemu_mutex_lock(&map_client_list_lock);
2871 client->bh = bh;
2872 QLIST_INSERT_HEAD(&map_client_list, client, link);
2873 if (!atomic_read(&bounce.in_use)) {
2874 cpu_notify_map_clients_locked();
2875 }
2876 qemu_mutex_unlock(&map_client_list_lock);
2877 }
2878
2879 void cpu_exec_init_all(void)
2880 {
2881 qemu_mutex_init(&ram_list.mutex);
2882 io_mem_init();
2883 memory_map_init();
2884 qemu_mutex_init(&map_client_list_lock);
2885 }
2886
2887 void cpu_unregister_map_client(QEMUBH *bh)
2888 {
2889 MapClient *client;
2890
2891 qemu_mutex_lock(&map_client_list_lock);
2892 QLIST_FOREACH(client, &map_client_list, link) {
2893 if (client->bh == bh) {
2894 cpu_unregister_map_client_do(client);
2895 break;
2896 }
2897 }
2898 qemu_mutex_unlock(&map_client_list_lock);
2899 }
2900
2901 static void cpu_notify_map_clients(void)
2902 {
2903 qemu_mutex_lock(&map_client_list_lock);
2904 cpu_notify_map_clients_locked();
2905 qemu_mutex_unlock(&map_client_list_lock);
2906 }
2907
2908 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2909 {
2910 MemoryRegion *mr;
2911 hwaddr l, xlat;
2912
2913 rcu_read_lock();
2914 while (len > 0) {
2915 l = len;
2916 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2917 if (!memory_access_is_direct(mr, is_write)) {
2918 l = memory_access_size(mr, l, addr);
2919 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2920 return false;
2921 }
2922 }
2923
2924 len -= l;
2925 addr += l;
2926 }
2927 rcu_read_unlock();
2928 return true;
2929 }
2930
2931 /* Map a physical memory region into a host virtual address.
2932 * May map a subset of the requested range, given by and returned in *plen.
2933 * May return NULL if resources needed to perform the mapping are exhausted.
2934 * Use only for reads OR writes - not for read-modify-write operations.
2935 * Use cpu_register_map_client() to know when retrying the map operation is
2936 * likely to succeed.
2937 */
2938 void *address_space_map(AddressSpace *as,
2939 hwaddr addr,
2940 hwaddr *plen,
2941 bool is_write)
2942 {
2943 hwaddr len = *plen;
2944 hwaddr done = 0;
2945 hwaddr l, xlat, base;
2946 MemoryRegion *mr, *this_mr;
2947 ram_addr_t raddr;
2948 void *ptr;
2949
2950 if (len == 0) {
2951 return NULL;
2952 }
2953
2954 l = len;
2955 rcu_read_lock();
2956 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2957
2958 if (!memory_access_is_direct(mr, is_write)) {
2959 if (atomic_xchg(&bounce.in_use, true)) {
2960 rcu_read_unlock();
2961 return NULL;
2962 }
2963 /* Avoid unbounded allocations */
2964 l = MIN(l, TARGET_PAGE_SIZE);
2965 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2966 bounce.addr = addr;
2967 bounce.len = l;
2968
2969 memory_region_ref(mr);
2970 bounce.mr = mr;
2971 if (!is_write) {
2972 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2973 bounce.buffer, l);
2974 }
2975
2976 rcu_read_unlock();
2977 *plen = l;
2978 return bounce.buffer;
2979 }
2980
2981 base = xlat;
2982 raddr = memory_region_get_ram_addr(mr);
2983
2984 for (;;) {
2985 len -= l;
2986 addr += l;
2987 done += l;
2988 if (len == 0) {
2989 break;
2990 }
2991
2992 l = len;
2993 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2994 if (this_mr != mr || xlat != base + done) {
2995 break;
2996 }
2997 }
2998
2999 memory_region_ref(mr);
3000 *plen = done;
3001 ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
3002 rcu_read_unlock();
3003
3004 return ptr;
3005 }
3006
3007 /* Unmaps a memory region previously mapped by address_space_map().
3008 * Will also mark the memory as dirty if is_write == 1. access_len gives
3009 * the amount of memory that was actually read or written by the caller.
3010 */
3011 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3012 int is_write, hwaddr access_len)
3013 {
3014 if (buffer != bounce.buffer) {
3015 MemoryRegion *mr;
3016 ram_addr_t addr1;
3017
3018 mr = qemu_ram_addr_from_host(buffer, &addr1);
3019 assert(mr != NULL);
3020 if (is_write) {
3021 invalidate_and_set_dirty(mr, addr1, access_len);
3022 }
3023 if (xen_enabled()) {
3024 xen_invalidate_map_cache_entry(buffer);
3025 }
3026 memory_region_unref(mr);
3027 return;
3028 }
3029 if (is_write) {
3030 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3031 bounce.buffer, access_len);
3032 }
3033 qemu_vfree(bounce.buffer);
3034 bounce.buffer = NULL;
3035 memory_region_unref(bounce.mr);
3036 atomic_mb_set(&bounce.in_use, false);
3037 cpu_notify_map_clients();
3038 }
3039
3040 void *cpu_physical_memory_map(hwaddr addr,
3041 hwaddr *plen,
3042 int is_write)
3043 {
3044 return address_space_map(&address_space_memory, addr, plen, is_write);
3045 }
3046
3047 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3048 int is_write, hwaddr access_len)
3049 {
3050 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3051 }
3052
3053 /* warning: addr must be aligned */
3054 static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3055 MemTxAttrs attrs,
3056 MemTxResult *result,
3057 enum device_endian endian)
3058 {
3059 uint8_t *ptr;
3060 uint64_t val;
3061 MemoryRegion *mr;
3062 hwaddr l = 4;
3063 hwaddr addr1;
3064 MemTxResult r;
3065 bool release_lock = false;
3066
3067 rcu_read_lock();
3068 mr = address_space_translate(as, addr, &addr1, &l, false);
3069 if (l < 4 || !memory_access_is_direct(mr, false)) {
3070 release_lock |= prepare_mmio_access(mr);
3071
3072 /* I/O case */
3073 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
3074 #if defined(TARGET_WORDS_BIGENDIAN)
3075 if (endian == DEVICE_LITTLE_ENDIAN) {
3076 val = bswap32(val);
3077 }
3078 #else
3079 if (endian == DEVICE_BIG_ENDIAN) {
3080 val = bswap32(val);
3081 }
3082 #endif
3083 } else {
3084 /* RAM case */
3085 ptr = qemu_get_ram_ptr(mr->ram_block,
3086 (memory_region_get_ram_addr(mr)
3087 & TARGET_PAGE_MASK)
3088 + addr1);
3089 switch (endian) {
3090 case DEVICE_LITTLE_ENDIAN:
3091 val = ldl_le_p(ptr);
3092 break;
3093 case DEVICE_BIG_ENDIAN:
3094 val = ldl_be_p(ptr);
3095 break;
3096 default:
3097 val = ldl_p(ptr);
3098 break;
3099 }
3100 r = MEMTX_OK;
3101 }
3102 if (result) {
3103 *result = r;
3104 }
3105 if (release_lock) {
3106 qemu_mutex_unlock_iothread();
3107 }
3108 rcu_read_unlock();
3109 return val;
3110 }
3111
3112 uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3113 MemTxAttrs attrs, MemTxResult *result)
3114 {
3115 return address_space_ldl_internal(as, addr, attrs, result,
3116 DEVICE_NATIVE_ENDIAN);
3117 }
3118
3119 uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3120 MemTxAttrs attrs, MemTxResult *result)
3121 {
3122 return address_space_ldl_internal(as, addr, attrs, result,
3123 DEVICE_LITTLE_ENDIAN);
3124 }
3125
3126 uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3127 MemTxAttrs attrs, MemTxResult *result)
3128 {
3129 return address_space_ldl_internal(as, addr, attrs, result,
3130 DEVICE_BIG_ENDIAN);
3131 }
3132
3133 uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
3134 {
3135 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3136 }
3137
3138 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
3139 {
3140 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3141 }
3142
3143 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
3144 {
3145 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3146 }
3147
3148 /* warning: addr must be aligned */
3149 static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3150 MemTxAttrs attrs,
3151 MemTxResult *result,
3152 enum device_endian endian)
3153 {
3154 uint8_t *ptr;
3155 uint64_t val;
3156 MemoryRegion *mr;
3157 hwaddr l = 8;
3158 hwaddr addr1;
3159 MemTxResult r;
3160 bool release_lock = false;
3161
3162 rcu_read_lock();
3163 mr = address_space_translate(as, addr, &addr1, &l,
3164 false);
3165 if (l < 8 || !memory_access_is_direct(mr, false)) {
3166 release_lock |= prepare_mmio_access(mr);
3167
3168 /* I/O case */
3169 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
3170 #if defined(TARGET_WORDS_BIGENDIAN)
3171 if (endian == DEVICE_LITTLE_ENDIAN) {
3172 val = bswap64(val);
3173 }
3174 #else
3175 if (endian == DEVICE_BIG_ENDIAN) {
3176 val = bswap64(val);
3177 }
3178 #endif
3179 } else {
3180 /* RAM case */
3181 ptr = qemu_get_ram_ptr(mr->ram_block,
3182 (memory_region_get_ram_addr(mr)
3183 & TARGET_PAGE_MASK)
3184 + addr1);
3185 switch (endian) {
3186 case DEVICE_LITTLE_ENDIAN:
3187 val = ldq_le_p(ptr);
3188 break;
3189 case DEVICE_BIG_ENDIAN:
3190 val = ldq_be_p(ptr);
3191 break;
3192 default:
3193 val = ldq_p(ptr);
3194 break;
3195 }
3196 r = MEMTX_OK;
3197 }
3198 if (result) {
3199 *result = r;
3200 }
3201 if (release_lock) {
3202 qemu_mutex_unlock_iothread();
3203 }
3204 rcu_read_unlock();
3205 return val;
3206 }
3207
3208 uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3209 MemTxAttrs attrs, MemTxResult *result)
3210 {
3211 return address_space_ldq_internal(as, addr, attrs, result,
3212 DEVICE_NATIVE_ENDIAN);
3213 }
3214
3215 uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3216 MemTxAttrs attrs, MemTxResult *result)
3217 {
3218 return address_space_ldq_internal(as, addr, attrs, result,
3219 DEVICE_LITTLE_ENDIAN);
3220 }
3221
3222 uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3223 MemTxAttrs attrs, MemTxResult *result)
3224 {
3225 return address_space_ldq_internal(as, addr, attrs, result,
3226 DEVICE_BIG_ENDIAN);
3227 }
3228
3229 uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
3230 {
3231 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3232 }
3233
3234 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
3235 {
3236 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3237 }
3238
3239 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
3240 {
3241 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3242 }
3243
3244 /* XXX: optimize */
3245 uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3246 MemTxAttrs attrs, MemTxResult *result)
3247 {
3248 uint8_t val;
3249 MemTxResult r;
3250
3251 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3252 if (result) {
3253 *result = r;
3254 }
3255 return val;
3256 }
3257
3258 uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3259 {
3260 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3261 }
3262
3263 /* warning: addr must be aligned */
3264 static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3265 hwaddr addr,
3266 MemTxAttrs attrs,
3267 MemTxResult *result,
3268 enum device_endian endian)
3269 {
3270 uint8_t *ptr;
3271 uint64_t val;
3272 MemoryRegion *mr;
3273 hwaddr l = 2;
3274 hwaddr addr1;
3275 MemTxResult r;
3276 bool release_lock = false;
3277
3278 rcu_read_lock();
3279 mr = address_space_translate(as, addr, &addr1, &l,
3280 false);
3281 if (l < 2 || !memory_access_is_direct(mr, false)) {
3282 release_lock |= prepare_mmio_access(mr);
3283
3284 /* I/O case */
3285 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
3286 #if defined(TARGET_WORDS_BIGENDIAN)
3287 if (endian == DEVICE_LITTLE_ENDIAN) {
3288 val = bswap16(val);
3289 }
3290 #else
3291 if (endian == DEVICE_BIG_ENDIAN) {
3292 val = bswap16(val);
3293 }
3294 #endif
3295 } else {
3296 /* RAM case */
3297 ptr = qemu_get_ram_ptr(mr->ram_block,
3298 (memory_region_get_ram_addr(mr)
3299 & TARGET_PAGE_MASK)
3300 + addr1);
3301 switch (endian) {
3302 case DEVICE_LITTLE_ENDIAN:
3303 val = lduw_le_p(ptr);
3304 break;
3305 case DEVICE_BIG_ENDIAN:
3306 val = lduw_be_p(ptr);
3307 break;
3308 default:
3309 val = lduw_p(ptr);
3310 break;
3311 }
3312 r = MEMTX_OK;
3313 }
3314 if (result) {
3315 *result = r;
3316 }
3317 if (release_lock) {
3318 qemu_mutex_unlock_iothread();
3319 }
3320 rcu_read_unlock();
3321 return val;
3322 }
3323
3324 uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3325 MemTxAttrs attrs, MemTxResult *result)
3326 {
3327 return address_space_lduw_internal(as, addr, attrs, result,
3328 DEVICE_NATIVE_ENDIAN);
3329 }
3330
3331 uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3332 MemTxAttrs attrs, MemTxResult *result)
3333 {
3334 return address_space_lduw_internal(as, addr, attrs, result,
3335 DEVICE_LITTLE_ENDIAN);
3336 }
3337
3338 uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3339 MemTxAttrs attrs, MemTxResult *result)
3340 {
3341 return address_space_lduw_internal(as, addr, attrs, result,
3342 DEVICE_BIG_ENDIAN);
3343 }
3344
3345 uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
3346 {
3347 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3348 }
3349
3350 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
3351 {
3352 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3353 }
3354
3355 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
3356 {
3357 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3358 }
3359
3360 /* warning: addr must be aligned. The ram page is not masked as dirty
3361 and the code inside is not invalidated. It is useful if the dirty
3362 bits are used to track modified PTEs */
3363 void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3364 MemTxAttrs attrs, MemTxResult *result)
3365 {
3366 uint8_t *ptr;
3367 MemoryRegion *mr;
3368 hwaddr l = 4;
3369 hwaddr addr1;
3370 MemTxResult r;
3371 uint8_t dirty_log_mask;
3372 bool release_lock = false;
3373
3374 rcu_read_lock();
3375 mr = address_space_translate(as, addr, &addr1, &l,
3376 true);
3377 if (l < 4 || !memory_access_is_direct(mr, true)) {
3378 release_lock |= prepare_mmio_access(mr);
3379
3380 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3381 } else {
3382 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3383 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
3384 stl_p(ptr, val);
3385
3386 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3387 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
3388 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
3389 r = MEMTX_OK;
3390 }
3391 if (result) {
3392 *result = r;
3393 }
3394 if (release_lock) {
3395 qemu_mutex_unlock_iothread();
3396 }
3397 rcu_read_unlock();
3398 }
3399
3400 void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3401 {
3402 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3403 }
3404
3405 /* warning: addr must be aligned */
3406 static inline void address_space_stl_internal(AddressSpace *as,
3407 hwaddr addr, uint32_t val,
3408 MemTxAttrs attrs,
3409 MemTxResult *result,
3410 enum device_endian endian)
3411 {
3412 uint8_t *ptr;
3413 MemoryRegion *mr;
3414 hwaddr l = 4;
3415 hwaddr addr1;
3416 MemTxResult r;
3417 bool release_lock = false;
3418
3419 rcu_read_lock();
3420 mr = address_space_translate(as, addr, &addr1, &l,
3421 true);
3422 if (l < 4 || !memory_access_is_direct(mr, true)) {
3423 release_lock |= prepare_mmio_access(mr);
3424
3425 #if defined(TARGET_WORDS_BIGENDIAN)
3426 if (endian == DEVICE_LITTLE_ENDIAN) {
3427 val = bswap32(val);
3428 }
3429 #else
3430 if (endian == DEVICE_BIG_ENDIAN) {
3431 val = bswap32(val);
3432 }
3433 #endif
3434 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3435 } else {
3436 /* RAM case */
3437 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3438 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
3439 switch (endian) {
3440 case DEVICE_LITTLE_ENDIAN:
3441 stl_le_p(ptr, val);
3442 break;
3443 case DEVICE_BIG_ENDIAN:
3444 stl_be_p(ptr, val);
3445 break;
3446 default:
3447 stl_p(ptr, val);
3448 break;
3449 }
3450 invalidate_and_set_dirty(mr, addr1, 4);
3451 r = MEMTX_OK;
3452 }
3453 if (result) {
3454 *result = r;
3455 }
3456 if (release_lock) {
3457 qemu_mutex_unlock_iothread();
3458 }
3459 rcu_read_unlock();
3460 }
3461
3462 void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3463 MemTxAttrs attrs, MemTxResult *result)
3464 {
3465 address_space_stl_internal(as, addr, val, attrs, result,
3466 DEVICE_NATIVE_ENDIAN);
3467 }
3468
3469 void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3470 MemTxAttrs attrs, MemTxResult *result)
3471 {
3472 address_space_stl_internal(as, addr, val, attrs, result,
3473 DEVICE_LITTLE_ENDIAN);
3474 }
3475
3476 void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3477 MemTxAttrs attrs, MemTxResult *result)
3478 {
3479 address_space_stl_internal(as, addr, val, attrs, result,
3480 DEVICE_BIG_ENDIAN);
3481 }
3482
3483 void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3484 {
3485 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3486 }
3487
3488 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3489 {
3490 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3491 }
3492
3493 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3494 {
3495 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3496 }
3497
3498 /* XXX: optimize */
3499 void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3500 MemTxAttrs attrs, MemTxResult *result)
3501 {
3502 uint8_t v = val;
3503 MemTxResult r;
3504
3505 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3506 if (result) {
3507 *result = r;
3508 }
3509 }
3510
3511 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3512 {
3513 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3514 }
3515
3516 /* warning: addr must be aligned */
3517 static inline void address_space_stw_internal(AddressSpace *as,
3518 hwaddr addr, uint32_t val,
3519 MemTxAttrs attrs,
3520 MemTxResult *result,
3521 enum device_endian endian)
3522 {
3523 uint8_t *ptr;
3524 MemoryRegion *mr;
3525 hwaddr l = 2;
3526 hwaddr addr1;
3527 MemTxResult r;
3528 bool release_lock = false;
3529
3530 rcu_read_lock();
3531 mr = address_space_translate(as, addr, &addr1, &l, true);
3532 if (l < 2 || !memory_access_is_direct(mr, true)) {
3533 release_lock |= prepare_mmio_access(mr);
3534
3535 #if defined(TARGET_WORDS_BIGENDIAN)
3536 if (endian == DEVICE_LITTLE_ENDIAN) {
3537 val = bswap16(val);
3538 }
3539 #else
3540 if (endian == DEVICE_BIG_ENDIAN) {
3541 val = bswap16(val);
3542 }
3543 #endif
3544 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
3545 } else {
3546 /* RAM case */
3547 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3548 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
3549 switch (endian) {
3550 case DEVICE_LITTLE_ENDIAN:
3551 stw_le_p(ptr, val);
3552 break;
3553 case DEVICE_BIG_ENDIAN:
3554 stw_be_p(ptr, val);
3555 break;
3556 default:
3557 stw_p(ptr, val);
3558 break;
3559 }
3560 invalidate_and_set_dirty(mr, addr1, 2);
3561 r = MEMTX_OK;
3562 }
3563 if (result) {
3564 *result = r;
3565 }
3566 if (release_lock) {
3567 qemu_mutex_unlock_iothread();
3568 }
3569 rcu_read_unlock();
3570 }
3571
3572 void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3573 MemTxAttrs attrs, MemTxResult *result)
3574 {
3575 address_space_stw_internal(as, addr, val, attrs, result,
3576 DEVICE_NATIVE_ENDIAN);
3577 }
3578
3579 void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3580 MemTxAttrs attrs, MemTxResult *result)
3581 {
3582 address_space_stw_internal(as, addr, val, attrs, result,
3583 DEVICE_LITTLE_ENDIAN);
3584 }
3585
3586 void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3587 MemTxAttrs attrs, MemTxResult *result)
3588 {
3589 address_space_stw_internal(as, addr, val, attrs, result,
3590 DEVICE_BIG_ENDIAN);
3591 }
3592
3593 void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3594 {
3595 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3596 }
3597
3598 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3599 {
3600 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3601 }
3602
3603 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3604 {
3605 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3606 }
3607
3608 /* XXX: optimize */
3609 void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3610 MemTxAttrs attrs, MemTxResult *result)
3611 {
3612 MemTxResult r;
3613 val = tswap64(val);
3614 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3615 if (result) {
3616 *result = r;
3617 }
3618 }
3619
3620 void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3621 MemTxAttrs attrs, MemTxResult *result)
3622 {
3623 MemTxResult r;
3624 val = cpu_to_le64(val);
3625 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3626 if (result) {
3627 *result = r;
3628 }
3629 }
3630 void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3631 MemTxAttrs attrs, MemTxResult *result)
3632 {
3633 MemTxResult r;
3634 val = cpu_to_be64(val);
3635 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3636 if (result) {
3637 *result = r;
3638 }
3639 }
3640
3641 void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3642 {
3643 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3644 }
3645
3646 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3647 {
3648 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3649 }
3650
3651 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3652 {
3653 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3654 }
3655
3656 /* virtual memory access for debug (includes writing to ROM) */
3657 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3658 uint8_t *buf, int len, int is_write)
3659 {
3660 int l;
3661 hwaddr phys_addr;
3662 target_ulong page;
3663
3664 while (len > 0) {
3665 int asidx;
3666 MemTxAttrs attrs;
3667
3668 page = addr & TARGET_PAGE_MASK;
3669 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3670 asidx = cpu_asidx_from_attrs(cpu, attrs);
3671 /* if no physical page mapped, return an error */
3672 if (phys_addr == -1)
3673 return -1;
3674 l = (page + TARGET_PAGE_SIZE) - addr;
3675 if (l > len)
3676 l = len;
3677 phys_addr += (addr & ~TARGET_PAGE_MASK);
3678 if (is_write) {
3679 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3680 phys_addr, buf, l);
3681 } else {
3682 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3683 MEMTXATTRS_UNSPECIFIED,
3684 buf, l, 0);
3685 }
3686 len -= l;
3687 buf += l;
3688 addr += l;
3689 }
3690 return 0;
3691 }
3692
3693 /*
3694 * Allows code that needs to deal with migration bitmaps etc to still be built
3695 * target independent.
3696 */
3697 size_t qemu_target_page_bits(void)
3698 {
3699 return TARGET_PAGE_BITS;
3700 }
3701
3702 #endif
3703
3704 /*
3705 * A helper function for the _utterly broken_ virtio device model to find out if
3706 * it's running on a big endian machine. Don't do this at home kids!
3707 */
3708 bool target_words_bigendian(void);
3709 bool target_words_bigendian(void)
3710 {
3711 #if defined(TARGET_WORDS_BIGENDIAN)
3712 return true;
3713 #else
3714 return false;
3715 #endif
3716 }
3717
3718 #ifndef CONFIG_USER_ONLY
3719 bool cpu_physical_memory_is_io(hwaddr phys_addr)
3720 {
3721 MemoryRegion*mr;
3722 hwaddr l = 1;
3723 bool res;
3724
3725 rcu_read_lock();
3726 mr = address_space_translate(&address_space_memory,
3727 phys_addr, &phys_addr, &l, false);
3728
3729 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3730 rcu_read_unlock();
3731 return res;
3732 }
3733
3734 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3735 {
3736 RAMBlock *block;
3737 int ret = 0;
3738
3739 rcu_read_lock();
3740 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
3741 ret = func(block->idstr, block->host, block->offset,
3742 block->used_length, opaque);
3743 if (ret) {
3744 break;
3745 }
3746 }
3747 rcu_read_unlock();
3748 return ret;
3749 }
3750 #endif