2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
15 #include "exec-memory.h"
18 typedef struct AddrRange AddrRange
;
25 static AddrRange
addrrange_make(uint64_t start
, uint64_t size
)
27 return (AddrRange
) { start
, size
};
30 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
32 return r1
.start
== r2
.start
&& r1
.size
== r2
.size
;
35 static uint64_t addrrange_end(AddrRange r
)
37 return r
.start
+ r
.size
;
40 static AddrRange
addrrange_shift(AddrRange range
, int64_t delta
)
46 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
48 return (r1
.start
>= r2
.start
&& r1
.start
< r2
.start
+ r2
.size
)
49 || (r2
.start
>= r1
.start
&& r2
.start
< r1
.start
+ r1
.size
);
52 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
54 uint64_t start
= MAX(r1
.start
, r2
.start
);
55 /* off-by-one arithmetic to prevent overflow */
56 uint64_t end
= MIN(addrrange_end(r1
) - 1, addrrange_end(r2
) - 1);
57 return addrrange_make(start
, end
- start
+ 1);
60 struct CoalescedMemoryRange
{
62 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
65 typedef struct FlatRange FlatRange
;
66 typedef struct FlatView FlatView
;
68 /* Range of memory in the global map. Addresses are absolute. */
71 target_phys_addr_t offset_in_region
;
73 uint8_t dirty_log_mask
;
76 /* Flattened global view of current active memory hierarchy. Kept in sorted
82 unsigned nr_allocated
;
85 typedef struct AddressSpace AddressSpace
;
86 typedef struct AddressSpaceOps AddressSpaceOps
;
88 /* A system address space - I/O, memory, etc. */
90 const AddressSpaceOps
*ops
;
95 struct AddressSpaceOps
{
96 void (*range_add
)(AddressSpace
*as
, FlatRange
*fr
);
97 void (*range_del
)(AddressSpace
*as
, FlatRange
*fr
);
98 void (*log_start
)(AddressSpace
*as
, FlatRange
*fr
);
99 void (*log_stop
)(AddressSpace
*as
, FlatRange
*fr
);
102 #define FOR_EACH_FLAT_RANGE(var, view) \
103 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
105 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
107 return a
->mr
== b
->mr
108 && addrrange_equal(a
->addr
, b
->addr
)
109 && a
->offset_in_region
== b
->offset_in_region
;
112 static void flatview_init(FlatView
*view
)
116 view
->nr_allocated
= 0;
119 /* Insert a range into a given position. Caller is responsible for maintaining
122 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
124 if (view
->nr
== view
->nr_allocated
) {
125 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
126 view
->ranges
= qemu_realloc(view
->ranges
,
127 view
->nr_allocated
* sizeof(*view
->ranges
));
129 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
130 (view
->nr
- pos
) * sizeof(FlatRange
));
131 view
->ranges
[pos
] = *range
;
135 static void flatview_destroy(FlatView
*view
)
137 qemu_free(view
->ranges
);
140 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
142 return addrrange_end(r1
->addr
) == r2
->addr
.start
144 && r1
->offset_in_region
+ r1
->addr
.size
== r2
->offset_in_region
145 && r1
->dirty_log_mask
== r2
->dirty_log_mask
;
148 /* Attempt to simplify a view by merging ajacent ranges */
149 static void flatview_simplify(FlatView
*view
)
154 while (i
< view
->nr
) {
157 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
158 view
->ranges
[i
].addr
.size
+= view
->ranges
[j
].addr
.size
;
162 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
163 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
168 static void memory_region_prepare_ram_addr(MemoryRegion
*mr
);
170 static void as_memory_range_add(AddressSpace
*as
, FlatRange
*fr
)
172 ram_addr_t phys_offset
, region_offset
;
174 memory_region_prepare_ram_addr(fr
->mr
);
176 phys_offset
= fr
->mr
->ram_addr
;
177 region_offset
= fr
->offset_in_region
;
178 /* cpu_register_physical_memory_log() wants region_offset for
179 * mmio, but prefers offseting phys_offset for RAM. Humour it.
181 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
182 phys_offset
+= region_offset
;
186 cpu_register_physical_memory_log(fr
->addr
.start
,
193 static void as_memory_range_del(AddressSpace
*as
, FlatRange
*fr
)
195 cpu_register_physical_memory(fr
->addr
.start
, fr
->addr
.size
,
199 static void as_memory_log_start(AddressSpace
*as
, FlatRange
*fr
)
201 cpu_physical_log_start(fr
->addr
.start
, fr
->addr
.size
);
204 static void as_memory_log_stop(AddressSpace
*as
, FlatRange
*fr
)
206 cpu_physical_log_stop(fr
->addr
.start
, fr
->addr
.size
);
209 static const AddressSpaceOps address_space_ops_memory
= {
210 .range_add
= as_memory_range_add
,
211 .range_del
= as_memory_range_del
,
212 .log_start
= as_memory_log_start
,
213 .log_stop
= as_memory_log_stop
,
216 static AddressSpace address_space_memory
= {
217 .ops
= &address_space_ops_memory
,
220 /* Render a memory region into the global view. Ranges in @view obscure
223 static void render_memory_region(FlatView
*view
,
225 target_phys_addr_t base
,
228 MemoryRegion
*subregion
;
230 target_phys_addr_t offset_in_region
;
238 tmp
= addrrange_make(base
, mr
->size
);
240 if (!addrrange_intersects(tmp
, clip
)) {
244 clip
= addrrange_intersection(tmp
, clip
);
247 base
-= mr
->alias
->addr
;
248 base
-= mr
->alias_offset
;
249 render_memory_region(view
, mr
->alias
, base
, clip
);
253 /* Render subregions in priority order. */
254 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
255 render_memory_region(view
, subregion
, base
, clip
);
258 if (!mr
->terminates
) {
262 offset_in_region
= clip
.start
- base
;
266 /* Render the region itself into any gaps left by the current view. */
267 for (i
= 0; i
< view
->nr
&& remain
; ++i
) {
268 if (base
>= addrrange_end(view
->ranges
[i
].addr
)) {
271 if (base
< view
->ranges
[i
].addr
.start
) {
272 now
= MIN(remain
, view
->ranges
[i
].addr
.start
- base
);
274 fr
.offset_in_region
= offset_in_region
;
275 fr
.addr
= addrrange_make(base
, now
);
276 fr
.dirty_log_mask
= mr
->dirty_log_mask
;
277 flatview_insert(view
, i
, &fr
);
280 offset_in_region
+= now
;
283 if (base
== view
->ranges
[i
].addr
.start
) {
284 now
= MIN(remain
, view
->ranges
[i
].addr
.size
);
286 offset_in_region
+= now
;
292 fr
.offset_in_region
= offset_in_region
;
293 fr
.addr
= addrrange_make(base
, remain
);
294 fr
.dirty_log_mask
= mr
->dirty_log_mask
;
295 flatview_insert(view
, i
, &fr
);
299 /* Render a memory topology into a list of disjoint absolute ranges. */
300 static FlatView
generate_memory_topology(MemoryRegion
*mr
)
304 flatview_init(&view
);
306 render_memory_region(&view
, mr
, 0, addrrange_make(0, UINT64_MAX
));
307 flatview_simplify(&view
);
312 static void address_space_update_topology(AddressSpace
*as
)
314 FlatView old_view
= as
->current_map
;
315 FlatView new_view
= generate_memory_topology(as
->root
);
317 FlatRange
*frold
, *frnew
;
319 /* Generate a symmetric difference of the old and new memory maps.
320 * Kill ranges in the old map, and instantiate ranges in the new map.
323 while (iold
< old_view
.nr
|| inew
< new_view
.nr
) {
324 if (iold
< old_view
.nr
) {
325 frold
= &old_view
.ranges
[iold
];
329 if (inew
< new_view
.nr
) {
330 frnew
= &new_view
.ranges
[inew
];
337 || frold
->addr
.start
< frnew
->addr
.start
338 || (frold
->addr
.start
== frnew
->addr
.start
339 && !flatrange_equal(frold
, frnew
)))) {
340 /* In old, but (not in new, or in new but attributes changed). */
342 as
->ops
->range_del(as
, frold
);
344 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
345 /* In both (logging may have changed) */
347 if (frold
->dirty_log_mask
&& !frnew
->dirty_log_mask
) {
348 as
->ops
->log_stop(as
, frnew
);
349 } else if (frnew
->dirty_log_mask
&& !frold
->dirty_log_mask
) {
350 as
->ops
->log_start(as
, frnew
);
358 as
->ops
->range_add(as
, frnew
);
362 as
->current_map
= new_view
;
363 flatview_destroy(&old_view
);
366 static void memory_region_update_topology(void)
368 address_space_update_topology(&address_space_memory
);
371 void memory_region_init(MemoryRegion
*mr
,
380 mr
->terminates
= false;
382 mr
->may_overlap
= false;
384 QTAILQ_INIT(&mr
->subregions
);
385 memset(&mr
->subregions_link
, 0, sizeof mr
->subregions_link
);
386 QTAILQ_INIT(&mr
->coalesced
);
387 mr
->name
= qemu_strdup(name
);
388 mr
->dirty_log_mask
= 0;
391 static bool memory_region_access_valid(MemoryRegion
*mr
,
392 target_phys_addr_t addr
,
395 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
399 /* Treat zero as compatibility all valid */
400 if (!mr
->ops
->valid
.max_access_size
) {
404 if (size
> mr
->ops
->valid
.max_access_size
405 || size
< mr
->ops
->valid
.min_access_size
) {
411 static uint32_t memory_region_read_thunk_n(void *_mr
,
412 target_phys_addr_t addr
,
415 MemoryRegion
*mr
= _mr
;
416 unsigned access_size
, access_size_min
, access_size_max
;
417 uint64_t access_mask
;
418 uint32_t data
= 0, tmp
;
421 if (!memory_region_access_valid(mr
, addr
, size
)) {
422 return -1U; /* FIXME: better signalling */
425 /* FIXME: support unaligned access */
427 access_size_min
= mr
->ops
->impl
.min_access_size
;
428 if (!access_size_min
) {
431 access_size_max
= mr
->ops
->impl
.max_access_size
;
432 if (!access_size_max
) {
435 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
436 access_mask
= -1ULL >> (64 - access_size
* 8);
438 for (i
= 0; i
< size
; i
+= access_size
) {
439 /* FIXME: big-endian support */
440 tmp
= mr
->ops
->read(mr
->opaque
, addr
+ i
, access_size
);
441 data
|= (tmp
& access_mask
) << (i
* 8);
447 static void memory_region_write_thunk_n(void *_mr
,
448 target_phys_addr_t addr
,
452 MemoryRegion
*mr
= _mr
;
453 unsigned access_size
, access_size_min
, access_size_max
;
454 uint64_t access_mask
;
457 if (!memory_region_access_valid(mr
, addr
, size
)) {
458 return; /* FIXME: better signalling */
461 /* FIXME: support unaligned access */
463 access_size_min
= mr
->ops
->impl
.min_access_size
;
464 if (!access_size_min
) {
467 access_size_max
= mr
->ops
->impl
.max_access_size
;
468 if (!access_size_max
) {
471 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
472 access_mask
= -1ULL >> (64 - access_size
* 8);
474 for (i
= 0; i
< size
; i
+= access_size
) {
475 /* FIXME: big-endian support */
476 mr
->ops
->write(mr
->opaque
, addr
+ i
, (data
>> (i
* 8)) & access_mask
,
481 static uint32_t memory_region_read_thunk_b(void *mr
, target_phys_addr_t addr
)
483 return memory_region_read_thunk_n(mr
, addr
, 1);
486 static uint32_t memory_region_read_thunk_w(void *mr
, target_phys_addr_t addr
)
488 return memory_region_read_thunk_n(mr
, addr
, 2);
491 static uint32_t memory_region_read_thunk_l(void *mr
, target_phys_addr_t addr
)
493 return memory_region_read_thunk_n(mr
, addr
, 4);
496 static void memory_region_write_thunk_b(void *mr
, target_phys_addr_t addr
,
499 memory_region_write_thunk_n(mr
, addr
, 1, data
);
502 static void memory_region_write_thunk_w(void *mr
, target_phys_addr_t addr
,
505 memory_region_write_thunk_n(mr
, addr
, 2, data
);
508 static void memory_region_write_thunk_l(void *mr
, target_phys_addr_t addr
,
511 memory_region_write_thunk_n(mr
, addr
, 4, data
);
514 static CPUReadMemoryFunc
* const memory_region_read_thunk
[] = {
515 memory_region_read_thunk_b
,
516 memory_region_read_thunk_w
,
517 memory_region_read_thunk_l
,
520 static CPUWriteMemoryFunc
* const memory_region_write_thunk
[] = {
521 memory_region_write_thunk_b
,
522 memory_region_write_thunk_w
,
523 memory_region_write_thunk_l
,
526 static void memory_region_prepare_ram_addr(MemoryRegion
*mr
)
528 if (mr
->backend_registered
) {
532 mr
->ram_addr
= cpu_register_io_memory(memory_region_read_thunk
,
533 memory_region_write_thunk
,
535 mr
->ops
->endianness
);
536 mr
->backend_registered
= true;
539 void memory_region_init_io(MemoryRegion
*mr
,
540 const MemoryRegionOps
*ops
,
545 memory_region_init(mr
, name
, size
);
548 mr
->terminates
= true;
549 mr
->backend_registered
= false;
552 void memory_region_init_ram(MemoryRegion
*mr
,
557 memory_region_init(mr
, name
, size
);
558 mr
->terminates
= true;
559 mr
->ram_addr
= qemu_ram_alloc(dev
, name
, size
);
560 mr
->backend_registered
= true;
563 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
569 memory_region_init(mr
, name
, size
);
570 mr
->terminates
= true;
571 mr
->ram_addr
= qemu_ram_alloc_from_ptr(dev
, name
, size
, ptr
);
572 mr
->backend_registered
= true;
575 void memory_region_init_alias(MemoryRegion
*mr
,
578 target_phys_addr_t offset
,
581 memory_region_init(mr
, name
, size
);
583 mr
->alias_offset
= offset
;
586 void memory_region_destroy(MemoryRegion
*mr
)
588 assert(QTAILQ_EMPTY(&mr
->subregions
));
589 memory_region_clear_coalescing(mr
);
590 qemu_free((char *)mr
->name
);
593 uint64_t memory_region_size(MemoryRegion
*mr
)
598 void memory_region_set_offset(MemoryRegion
*mr
, target_phys_addr_t offset
)
603 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
605 uint8_t mask
= 1 << client
;
607 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
608 memory_region_update_topology();
611 bool memory_region_get_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
,
614 assert(mr
->terminates
);
615 return cpu_physical_memory_get_dirty(mr
->ram_addr
+ addr
, 1 << client
);
618 void memory_region_set_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
)
620 assert(mr
->terminates
);
621 return cpu_physical_memory_set_dirty(mr
->ram_addr
+ addr
);
624 void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
)
628 FOR_EACH_FLAT_RANGE(fr
, &address_space_memory
.current_map
) {
630 cpu_physical_sync_dirty_bitmap(fr
->addr
.start
,
631 fr
->addr
.start
+ fr
->addr
.size
);
636 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
641 void memory_region_reset_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
,
642 target_phys_addr_t size
, unsigned client
)
644 assert(mr
->terminates
);
645 cpu_physical_memory_reset_dirty(mr
->ram_addr
+ addr
,
646 mr
->ram_addr
+ addr
+ size
,
650 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
653 return memory_region_get_ram_ptr(mr
->alias
) + mr
->alias_offset
;
656 assert(mr
->terminates
);
658 return qemu_get_ram_ptr(mr
->ram_addr
);
661 static void memory_region_update_coalesced_range(MemoryRegion
*mr
)
664 CoalescedMemoryRange
*cmr
;
667 FOR_EACH_FLAT_RANGE(fr
, &address_space_memory
.current_map
) {
669 qemu_unregister_coalesced_mmio(fr
->addr
.start
, fr
->addr
.size
);
670 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
671 tmp
= addrrange_shift(cmr
->addr
,
672 fr
->addr
.start
- fr
->offset_in_region
);
673 if (!addrrange_intersects(tmp
, fr
->addr
)) {
676 tmp
= addrrange_intersection(tmp
, fr
->addr
);
677 qemu_register_coalesced_mmio(tmp
.start
, tmp
.size
);
683 void memory_region_set_coalescing(MemoryRegion
*mr
)
685 memory_region_clear_coalescing(mr
);
686 memory_region_add_coalescing(mr
, 0, mr
->size
);
689 void memory_region_add_coalescing(MemoryRegion
*mr
,
690 target_phys_addr_t offset
,
693 CoalescedMemoryRange
*cmr
= qemu_malloc(sizeof(*cmr
));
695 cmr
->addr
= addrrange_make(offset
, size
);
696 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
697 memory_region_update_coalesced_range(mr
);
700 void memory_region_clear_coalescing(MemoryRegion
*mr
)
702 CoalescedMemoryRange
*cmr
;
704 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
705 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
706 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
709 memory_region_update_coalesced_range(mr
);
712 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
713 target_phys_addr_t offset
,
714 MemoryRegion
*subregion
)
718 assert(!subregion
->parent
);
719 subregion
->parent
= mr
;
720 subregion
->addr
= offset
;
721 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
722 if (subregion
->may_overlap
|| other
->may_overlap
) {
725 if (offset
>= other
->offset
+ other
->size
726 || offset
+ subregion
->size
<= other
->offset
) {
729 printf("warning: subregion collision %llx/%llx vs %llx/%llx\n",
730 (unsigned long long)offset
,
731 (unsigned long long)subregion
->size
,
732 (unsigned long long)other
->offset
,
733 (unsigned long long)other
->size
);
735 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
736 if (subregion
->priority
>= other
->priority
) {
737 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
741 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
743 memory_region_update_topology();
747 void memory_region_add_subregion(MemoryRegion
*mr
,
748 target_phys_addr_t offset
,
749 MemoryRegion
*subregion
)
751 subregion
->may_overlap
= false;
752 subregion
->priority
= 0;
753 memory_region_add_subregion_common(mr
, offset
, subregion
);
756 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
757 target_phys_addr_t offset
,
758 MemoryRegion
*subregion
,
761 subregion
->may_overlap
= true;
762 subregion
->priority
= priority
;
763 memory_region_add_subregion_common(mr
, offset
, subregion
);
766 void memory_region_del_subregion(MemoryRegion
*mr
,
767 MemoryRegion
*subregion
)
769 assert(subregion
->parent
== mr
);
770 subregion
->parent
= NULL
;
771 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
772 memory_region_update_topology();
775 void set_system_memory_map(MemoryRegion
*mr
)
777 address_space_memory
.root
= mr
;
778 memory_region_update_topology();