]> git.proxmox.com Git - mirror_qemu.git/blame - memory.c
memory: abstract address space operations
[mirror_qemu.git] / memory.c
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#include "memory.h"
1c0ffa58 15#include "exec-memory.h"
093bc2cd
AK
16#include <assert.h>
17
18typedef struct AddrRange AddrRange;
19
20struct AddrRange {
21 uint64_t start;
22 uint64_t size;
23};
24
25static AddrRange addrrange_make(uint64_t start, uint64_t size)
26{
27 return (AddrRange) { start, size };
28}
29
30static bool addrrange_equal(AddrRange r1, AddrRange r2)
31{
32 return r1.start == r2.start && r1.size == r2.size;
33}
34
35static uint64_t addrrange_end(AddrRange r)
36{
37 return r.start + r.size;
38}
39
40static AddrRange addrrange_shift(AddrRange range, int64_t delta)
41{
42 range.start += delta;
43 return range;
44}
45
46static bool addrrange_intersects(AddrRange r1, AddrRange r2)
47{
48 return (r1.start >= r2.start && r1.start < r2.start + r2.size)
49 || (r2.start >= r1.start && r2.start < r1.start + r1.size);
50}
51
52static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
53{
54 uint64_t start = MAX(r1.start, r2.start);
55 /* off-by-one arithmetic to prevent overflow */
56 uint64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1);
57 return addrrange_make(start, end - start + 1);
58}
59
60struct CoalescedMemoryRange {
61 AddrRange addr;
62 QTAILQ_ENTRY(CoalescedMemoryRange) link;
63};
64
65typedef struct FlatRange FlatRange;
66typedef struct FlatView FlatView;
67
68/* Range of memory in the global map. Addresses are absolute. */
69struct FlatRange {
70 MemoryRegion *mr;
71 target_phys_addr_t offset_in_region;
72 AddrRange addr;
5a583347 73 uint8_t dirty_log_mask;
093bc2cd
AK
74};
75
76/* Flattened global view of current active memory hierarchy. Kept in sorted
77 * order.
78 */
79struct FlatView {
80 FlatRange *ranges;
81 unsigned nr;
82 unsigned nr_allocated;
83};
84
cc31e6e7
AK
85typedef struct AddressSpace AddressSpace;
86typedef struct AddressSpaceOps AddressSpaceOps;
87
88/* A system address space - I/O, memory, etc. */
89struct AddressSpace {
90 const AddressSpaceOps *ops;
91 MemoryRegion *root;
92 FlatView current_map;
93};
94
95struct AddressSpaceOps {
96 void (*range_add)(AddressSpace *as, FlatRange *fr);
97 void (*range_del)(AddressSpace *as, FlatRange *fr);
98 void (*log_start)(AddressSpace *as, FlatRange *fr);
99 void (*log_stop)(AddressSpace *as, FlatRange *fr);
100};
101
093bc2cd
AK
102#define FOR_EACH_FLAT_RANGE(var, view) \
103 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
104
093bc2cd
AK
105static bool flatrange_equal(FlatRange *a, FlatRange *b)
106{
107 return a->mr == b->mr
108 && addrrange_equal(a->addr, b->addr)
109 && a->offset_in_region == b->offset_in_region;
110}
111
112static void flatview_init(FlatView *view)
113{
114 view->ranges = NULL;
115 view->nr = 0;
116 view->nr_allocated = 0;
117}
118
119/* Insert a range into a given position. Caller is responsible for maintaining
120 * sorting order.
121 */
122static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
123{
124 if (view->nr == view->nr_allocated) {
125 view->nr_allocated = MAX(2 * view->nr, 10);
126 view->ranges = qemu_realloc(view->ranges,
127 view->nr_allocated * sizeof(*view->ranges));
128 }
129 memmove(view->ranges + pos + 1, view->ranges + pos,
130 (view->nr - pos) * sizeof(FlatRange));
131 view->ranges[pos] = *range;
132 ++view->nr;
133}
134
135static void flatview_destroy(FlatView *view)
136{
137 qemu_free(view->ranges);
138}
139
3d8e6bf9
AK
140static bool can_merge(FlatRange *r1, FlatRange *r2)
141{
142 return addrrange_end(r1->addr) == r2->addr.start
143 && r1->mr == r2->mr
144 && r1->offset_in_region + r1->addr.size == r2->offset_in_region
145 && r1->dirty_log_mask == r2->dirty_log_mask;
146}
147
148/* Attempt to simplify a view by merging ajacent ranges */
149static void flatview_simplify(FlatView *view)
150{
151 unsigned i, j;
152
153 i = 0;
154 while (i < view->nr) {
155 j = i + 1;
156 while (j < view->nr
157 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
158 view->ranges[i].addr.size += view->ranges[j].addr.size;
159 ++j;
160 }
161 ++i;
162 memmove(&view->ranges[i], &view->ranges[j],
163 (view->nr - j) * sizeof(view->ranges[j]));
164 view->nr -= j - i;
165 }
166}
167
cc31e6e7
AK
168static void as_memory_range_add(AddressSpace *as, FlatRange *fr)
169{
170 ram_addr_t phys_offset, region_offset;
171
172 phys_offset = fr->mr->ram_addr;
173 region_offset = fr->offset_in_region;
174 /* cpu_register_physical_memory_log() wants region_offset for
175 * mmio, but prefers offseting phys_offset for RAM. Humour it.
176 */
177 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
178 phys_offset += region_offset;
179 region_offset = 0;
180 }
181
182 cpu_register_physical_memory_log(fr->addr.start,
183 fr->addr.size,
184 phys_offset,
185 region_offset,
186 fr->dirty_log_mask);
187}
188
189static void as_memory_range_del(AddressSpace *as, FlatRange *fr)
190{
191 cpu_register_physical_memory(fr->addr.start, fr->addr.size,
192 IO_MEM_UNASSIGNED);
193}
194
195static void as_memory_log_start(AddressSpace *as, FlatRange *fr)
196{
197 cpu_physical_log_start(fr->addr.start, fr->addr.size);
198}
199
200static void as_memory_log_stop(AddressSpace *as, FlatRange *fr)
201{
202 cpu_physical_log_stop(fr->addr.start, fr->addr.size);
203}
204
205static const AddressSpaceOps address_space_ops_memory = {
206 .range_add = as_memory_range_add,
207 .range_del = as_memory_range_del,
208 .log_start = as_memory_log_start,
209 .log_stop = as_memory_log_stop,
210};
211
212static AddressSpace address_space_memory = {
213 .ops = &address_space_ops_memory,
214};
215
093bc2cd
AK
216/* Render a memory region into the global view. Ranges in @view obscure
217 * ranges in @mr.
218 */
219static void render_memory_region(FlatView *view,
220 MemoryRegion *mr,
221 target_phys_addr_t base,
222 AddrRange clip)
223{
224 MemoryRegion *subregion;
225 unsigned i;
226 target_phys_addr_t offset_in_region;
227 uint64_t remain;
228 uint64_t now;
229 FlatRange fr;
230 AddrRange tmp;
231
232 base += mr->addr;
233
234 tmp = addrrange_make(base, mr->size);
235
236 if (!addrrange_intersects(tmp, clip)) {
237 return;
238 }
239
240 clip = addrrange_intersection(tmp, clip);
241
242 if (mr->alias) {
243 base -= mr->alias->addr;
244 base -= mr->alias_offset;
245 render_memory_region(view, mr->alias, base, clip);
246 return;
247 }
248
249 /* Render subregions in priority order. */
250 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
251 render_memory_region(view, subregion, base, clip);
252 }
253
254 if (!mr->has_ram_addr) {
255 return;
256 }
257
258 offset_in_region = clip.start - base;
259 base = clip.start;
260 remain = clip.size;
261
262 /* Render the region itself into any gaps left by the current view. */
263 for (i = 0; i < view->nr && remain; ++i) {
264 if (base >= addrrange_end(view->ranges[i].addr)) {
265 continue;
266 }
267 if (base < view->ranges[i].addr.start) {
268 now = MIN(remain, view->ranges[i].addr.start - base);
269 fr.mr = mr;
270 fr.offset_in_region = offset_in_region;
271 fr.addr = addrrange_make(base, now);
5a583347 272 fr.dirty_log_mask = mr->dirty_log_mask;
093bc2cd
AK
273 flatview_insert(view, i, &fr);
274 ++i;
275 base += now;
276 offset_in_region += now;
277 remain -= now;
278 }
279 if (base == view->ranges[i].addr.start) {
280 now = MIN(remain, view->ranges[i].addr.size);
281 base += now;
282 offset_in_region += now;
283 remain -= now;
284 }
285 }
286 if (remain) {
287 fr.mr = mr;
288 fr.offset_in_region = offset_in_region;
289 fr.addr = addrrange_make(base, remain);
5a583347 290 fr.dirty_log_mask = mr->dirty_log_mask;
093bc2cd
AK
291 flatview_insert(view, i, &fr);
292 }
293}
294
295/* Render a memory topology into a list of disjoint absolute ranges. */
296static FlatView generate_memory_topology(MemoryRegion *mr)
297{
298 FlatView view;
299
300 flatview_init(&view);
301
302 render_memory_region(&view, mr, 0, addrrange_make(0, UINT64_MAX));
3d8e6bf9 303 flatview_simplify(&view);
093bc2cd
AK
304
305 return view;
306}
307
cc31e6e7 308static void address_space_update_topology(AddressSpace *as)
093bc2cd 309{
cc31e6e7
AK
310 FlatView old_view = as->current_map;
311 FlatView new_view = generate_memory_topology(as->root);
093bc2cd
AK
312 unsigned iold, inew;
313 FlatRange *frold, *frnew;
093bc2cd
AK
314
315 /* Generate a symmetric difference of the old and new memory maps.
316 * Kill ranges in the old map, and instantiate ranges in the new map.
317 */
318 iold = inew = 0;
319 while (iold < old_view.nr || inew < new_view.nr) {
320 if (iold < old_view.nr) {
321 frold = &old_view.ranges[iold];
322 } else {
323 frold = NULL;
324 }
325 if (inew < new_view.nr) {
326 frnew = &new_view.ranges[inew];
327 } else {
328 frnew = NULL;
329 }
330
331 if (frold
332 && (!frnew
333 || frold->addr.start < frnew->addr.start
334 || (frold->addr.start == frnew->addr.start
335 && !flatrange_equal(frold, frnew)))) {
336 /* In old, but (not in new, or in new but attributes changed). */
337
cc31e6e7 338 as->ops->range_del(as, frold);
093bc2cd
AK
339 ++iold;
340 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
341 /* In both (logging may have changed) */
342
5a583347 343 if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
cc31e6e7 344 as->ops->log_stop(as, frnew);
5a583347 345 } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
cc31e6e7 346 as->ops->log_start(as, frnew);
5a583347
AK
347 }
348
093bc2cd
AK
349 ++iold;
350 ++inew;
093bc2cd
AK
351 } else {
352 /* In new */
353
cc31e6e7 354 as->ops->range_add(as, frnew);
093bc2cd
AK
355 ++inew;
356 }
357 }
cc31e6e7 358 as->current_map = new_view;
093bc2cd
AK
359 flatview_destroy(&old_view);
360}
361
cc31e6e7
AK
362static void memory_region_update_topology(void)
363{
364 address_space_update_topology(&address_space_memory);
365}
366
093bc2cd
AK
367void memory_region_init(MemoryRegion *mr,
368 const char *name,
369 uint64_t size)
370{
371 mr->ops = NULL;
372 mr->parent = NULL;
373 mr->size = size;
374 mr->addr = 0;
375 mr->offset = 0;
376 mr->has_ram_addr = false;
377 mr->priority = 0;
378 mr->may_overlap = false;
379 mr->alias = NULL;
380 QTAILQ_INIT(&mr->subregions);
381 memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
382 QTAILQ_INIT(&mr->coalesced);
383 mr->name = qemu_strdup(name);
5a583347 384 mr->dirty_log_mask = 0;
093bc2cd
AK
385}
386
387static bool memory_region_access_valid(MemoryRegion *mr,
388 target_phys_addr_t addr,
389 unsigned size)
390{
391 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
392 return false;
393 }
394
395 /* Treat zero as compatibility all valid */
396 if (!mr->ops->valid.max_access_size) {
397 return true;
398 }
399
400 if (size > mr->ops->valid.max_access_size
401 || size < mr->ops->valid.min_access_size) {
402 return false;
403 }
404 return true;
405}
406
407static uint32_t memory_region_read_thunk_n(void *_mr,
408 target_phys_addr_t addr,
409 unsigned size)
410{
411 MemoryRegion *mr = _mr;
412 unsigned access_size, access_size_min, access_size_max;
413 uint64_t access_mask;
414 uint32_t data = 0, tmp;
415 unsigned i;
416
417 if (!memory_region_access_valid(mr, addr, size)) {
418 return -1U; /* FIXME: better signalling */
419 }
420
421 /* FIXME: support unaligned access */
422
423 access_size_min = mr->ops->impl.min_access_size;
424 if (!access_size_min) {
425 access_size_min = 1;
426 }
427 access_size_max = mr->ops->impl.max_access_size;
428 if (!access_size_max) {
429 access_size_max = 4;
430 }
431 access_size = MAX(MIN(size, access_size_max), access_size_min);
432 access_mask = -1ULL >> (64 - access_size * 8);
433 addr += mr->offset;
434 for (i = 0; i < size; i += access_size) {
435 /* FIXME: big-endian support */
436 tmp = mr->ops->read(mr->opaque, addr + i, access_size);
437 data |= (tmp & access_mask) << (i * 8);
438 }
439
440 return data;
441}
442
443static void memory_region_write_thunk_n(void *_mr,
444 target_phys_addr_t addr,
445 unsigned size,
446 uint64_t data)
447{
448 MemoryRegion *mr = _mr;
449 unsigned access_size, access_size_min, access_size_max;
450 uint64_t access_mask;
451 unsigned i;
452
453 if (!memory_region_access_valid(mr, addr, size)) {
454 return; /* FIXME: better signalling */
455 }
456
457 /* FIXME: support unaligned access */
458
459 access_size_min = mr->ops->impl.min_access_size;
460 if (!access_size_min) {
461 access_size_min = 1;
462 }
463 access_size_max = mr->ops->impl.max_access_size;
464 if (!access_size_max) {
465 access_size_max = 4;
466 }
467 access_size = MAX(MIN(size, access_size_max), access_size_min);
468 access_mask = -1ULL >> (64 - access_size * 8);
469 addr += mr->offset;
470 for (i = 0; i < size; i += access_size) {
471 /* FIXME: big-endian support */
472 mr->ops->write(mr->opaque, addr + i, (data >> (i * 8)) & access_mask,
473 access_size);
474 }
475}
476
477static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr)
478{
479 return memory_region_read_thunk_n(mr, addr, 1);
480}
481
482static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr)
483{
484 return memory_region_read_thunk_n(mr, addr, 2);
485}
486
487static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr)
488{
489 return memory_region_read_thunk_n(mr, addr, 4);
490}
491
492static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr,
493 uint32_t data)
494{
495 memory_region_write_thunk_n(mr, addr, 1, data);
496}
497
498static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr,
499 uint32_t data)
500{
501 memory_region_write_thunk_n(mr, addr, 2, data);
502}
503
504static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr,
505 uint32_t data)
506{
507 memory_region_write_thunk_n(mr, addr, 4, data);
508}
509
510static CPUReadMemoryFunc * const memory_region_read_thunk[] = {
511 memory_region_read_thunk_b,
512 memory_region_read_thunk_w,
513 memory_region_read_thunk_l,
514};
515
516static CPUWriteMemoryFunc * const memory_region_write_thunk[] = {
517 memory_region_write_thunk_b,
518 memory_region_write_thunk_w,
519 memory_region_write_thunk_l,
520};
521
522void memory_region_init_io(MemoryRegion *mr,
523 const MemoryRegionOps *ops,
524 void *opaque,
525 const char *name,
526 uint64_t size)
527{
528 memory_region_init(mr, name, size);
529 mr->ops = ops;
530 mr->opaque = opaque;
531 mr->has_ram_addr = true;
532 mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk,
533 memory_region_write_thunk,
534 mr,
535 mr->ops->endianness);
536}
537
538void memory_region_init_ram(MemoryRegion *mr,
539 DeviceState *dev,
540 const char *name,
541 uint64_t size)
542{
543 memory_region_init(mr, name, size);
544 mr->has_ram_addr = true;
545 mr->ram_addr = qemu_ram_alloc(dev, name, size);
546}
547
548void memory_region_init_ram_ptr(MemoryRegion *mr,
549 DeviceState *dev,
550 const char *name,
551 uint64_t size,
552 void *ptr)
553{
554 memory_region_init(mr, name, size);
555 mr->has_ram_addr = true;
556 mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr);
557}
558
559void memory_region_init_alias(MemoryRegion *mr,
560 const char *name,
561 MemoryRegion *orig,
562 target_phys_addr_t offset,
563 uint64_t size)
564{
565 memory_region_init(mr, name, size);
566 mr->alias = orig;
567 mr->alias_offset = offset;
568}
569
570void memory_region_destroy(MemoryRegion *mr)
571{
572 assert(QTAILQ_EMPTY(&mr->subregions));
573 memory_region_clear_coalescing(mr);
574 qemu_free((char *)mr->name);
575}
576
577uint64_t memory_region_size(MemoryRegion *mr)
578{
579 return mr->size;
580}
581
582void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
583{
584 mr->offset = offset;
585}
586
587void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
588{
5a583347
AK
589 uint8_t mask = 1 << client;
590
591 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
592 memory_region_update_topology();
093bc2cd
AK
593}
594
595bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
596 unsigned client)
597{
5a583347
AK
598 assert(mr->has_ram_addr);
599 return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client);
093bc2cd
AK
600}
601
602void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr)
603{
5a583347
AK
604 assert(mr->has_ram_addr);
605 return cpu_physical_memory_set_dirty(mr->ram_addr + addr);
093bc2cd
AK
606}
607
608void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
609{
5a583347
AK
610 FlatRange *fr;
611
cc31e6e7 612 FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
5a583347
AK
613 if (fr->mr == mr) {
614 cpu_physical_sync_dirty_bitmap(fr->addr.start,
615 fr->addr.start + fr->addr.size);
616 }
617 }
093bc2cd
AK
618}
619
620void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
621{
622 /* FIXME */
623}
624
625void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
626 target_phys_addr_t size, unsigned client)
627{
5a583347
AK
628 assert(mr->has_ram_addr);
629 cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
630 mr->ram_addr + addr + size,
631 1 << client);
093bc2cd
AK
632}
633
634void *memory_region_get_ram_ptr(MemoryRegion *mr)
635{
636 if (mr->alias) {
637 return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
638 }
639
640 assert(mr->has_ram_addr);
641
642 return qemu_get_ram_ptr(mr->ram_addr);
643}
644
645static void memory_region_update_coalesced_range(MemoryRegion *mr)
646{
647 FlatRange *fr;
648 CoalescedMemoryRange *cmr;
649 AddrRange tmp;
650
cc31e6e7 651 FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
093bc2cd
AK
652 if (fr->mr == mr) {
653 qemu_unregister_coalesced_mmio(fr->addr.start, fr->addr.size);
654 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
655 tmp = addrrange_shift(cmr->addr,
656 fr->addr.start - fr->offset_in_region);
657 if (!addrrange_intersects(tmp, fr->addr)) {
658 continue;
659 }
660 tmp = addrrange_intersection(tmp, fr->addr);
661 qemu_register_coalesced_mmio(tmp.start, tmp.size);
662 }
663 }
664 }
665}
666
667void memory_region_set_coalescing(MemoryRegion *mr)
668{
669 memory_region_clear_coalescing(mr);
670 memory_region_add_coalescing(mr, 0, mr->size);
671}
672
673void memory_region_add_coalescing(MemoryRegion *mr,
674 target_phys_addr_t offset,
675 uint64_t size)
676{
677 CoalescedMemoryRange *cmr = qemu_malloc(sizeof(*cmr));
678
679 cmr->addr = addrrange_make(offset, size);
680 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
681 memory_region_update_coalesced_range(mr);
682}
683
684void memory_region_clear_coalescing(MemoryRegion *mr)
685{
686 CoalescedMemoryRange *cmr;
687
688 while (!QTAILQ_EMPTY(&mr->coalesced)) {
689 cmr = QTAILQ_FIRST(&mr->coalesced);
690 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
691 qemu_free(cmr);
692 }
693 memory_region_update_coalesced_range(mr);
694}
695
696static void memory_region_add_subregion_common(MemoryRegion *mr,
697 target_phys_addr_t offset,
698 MemoryRegion *subregion)
699{
700 MemoryRegion *other;
701
702 assert(!subregion->parent);
703 subregion->parent = mr;
704 subregion->addr = offset;
705 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
706 if (subregion->may_overlap || other->may_overlap) {
707 continue;
708 }
709 if (offset >= other->offset + other->size
710 || offset + subregion->size <= other->offset) {
711 continue;
712 }
713 printf("warning: subregion collision %llx/%llx vs %llx/%llx\n",
714 (unsigned long long)offset,
715 (unsigned long long)subregion->size,
716 (unsigned long long)other->offset,
717 (unsigned long long)other->size);
718 }
719 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
720 if (subregion->priority >= other->priority) {
721 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
722 goto done;
723 }
724 }
725 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
726done:
727 memory_region_update_topology();
728}
729
730
731void memory_region_add_subregion(MemoryRegion *mr,
732 target_phys_addr_t offset,
733 MemoryRegion *subregion)
734{
735 subregion->may_overlap = false;
736 subregion->priority = 0;
737 memory_region_add_subregion_common(mr, offset, subregion);
738}
739
740void memory_region_add_subregion_overlap(MemoryRegion *mr,
741 target_phys_addr_t offset,
742 MemoryRegion *subregion,
743 unsigned priority)
744{
745 subregion->may_overlap = true;
746 subregion->priority = priority;
747 memory_region_add_subregion_common(mr, offset, subregion);
748}
749
750void memory_region_del_subregion(MemoryRegion *mr,
751 MemoryRegion *subregion)
752{
753 assert(subregion->parent == mr);
754 subregion->parent = NULL;
755 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
756 memory_region_update_topology();
757}
1c0ffa58
AK
758
759void set_system_memory_map(MemoryRegion *mr)
760{
cc31e6e7 761 address_space_memory.root = mr;
1c0ffa58
AK
762 memory_region_update_topology();
763}