2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
15 #include "exec-memory.h"
19 typedef struct AddrRange AddrRange
;
26 static AddrRange
addrrange_make(uint64_t start
, uint64_t size
)
28 return (AddrRange
) { start
, size
};
31 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
33 return r1
.start
== r2
.start
&& r1
.size
== r2
.size
;
36 static uint64_t addrrange_end(AddrRange r
)
38 return r
.start
+ r
.size
;
41 static AddrRange
addrrange_shift(AddrRange range
, int64_t delta
)
47 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
49 return (r1
.start
>= r2
.start
&& r1
.start
< r2
.start
+ r2
.size
)
50 || (r2
.start
>= r1
.start
&& r2
.start
< r1
.start
+ r1
.size
);
53 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
55 uint64_t start
= MAX(r1
.start
, r2
.start
);
56 /* off-by-one arithmetic to prevent overflow */
57 uint64_t end
= MIN(addrrange_end(r1
) - 1, addrrange_end(r2
) - 1);
58 return addrrange_make(start
, end
- start
+ 1);
61 struct CoalescedMemoryRange
{
63 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
66 typedef struct FlatRange FlatRange
;
67 typedef struct FlatView FlatView
;
69 /* Range of memory in the global map. Addresses are absolute. */
72 target_phys_addr_t offset_in_region
;
74 uint8_t dirty_log_mask
;
77 /* Flattened global view of current active memory hierarchy. Kept in sorted
83 unsigned nr_allocated
;
86 typedef struct AddressSpace AddressSpace
;
87 typedef struct AddressSpaceOps AddressSpaceOps
;
89 /* A system address space - I/O, memory, etc. */
91 const AddressSpaceOps
*ops
;
96 struct AddressSpaceOps
{
97 void (*range_add
)(AddressSpace
*as
, FlatRange
*fr
);
98 void (*range_del
)(AddressSpace
*as
, FlatRange
*fr
);
99 void (*log_start
)(AddressSpace
*as
, FlatRange
*fr
);
100 void (*log_stop
)(AddressSpace
*as
, FlatRange
*fr
);
103 #define FOR_EACH_FLAT_RANGE(var, view) \
104 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
106 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
108 return a
->mr
== b
->mr
109 && addrrange_equal(a
->addr
, b
->addr
)
110 && a
->offset_in_region
== b
->offset_in_region
;
113 static void flatview_init(FlatView
*view
)
117 view
->nr_allocated
= 0;
120 /* Insert a range into a given position. Caller is responsible for maintaining
123 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
125 if (view
->nr
== view
->nr_allocated
) {
126 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
127 view
->ranges
= qemu_realloc(view
->ranges
,
128 view
->nr_allocated
* sizeof(*view
->ranges
));
130 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
131 (view
->nr
- pos
) * sizeof(FlatRange
));
132 view
->ranges
[pos
] = *range
;
136 static void flatview_destroy(FlatView
*view
)
138 qemu_free(view
->ranges
);
141 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
143 return addrrange_end(r1
->addr
) == r2
->addr
.start
145 && r1
->offset_in_region
+ r1
->addr
.size
== r2
->offset_in_region
146 && r1
->dirty_log_mask
== r2
->dirty_log_mask
;
149 /* Attempt to simplify a view by merging ajacent ranges */
150 static void flatview_simplify(FlatView
*view
)
155 while (i
< view
->nr
) {
158 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
159 view
->ranges
[i
].addr
.size
+= view
->ranges
[j
].addr
.size
;
163 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
164 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
169 static void memory_region_prepare_ram_addr(MemoryRegion
*mr
);
171 static void as_memory_range_add(AddressSpace
*as
, FlatRange
*fr
)
173 ram_addr_t phys_offset
, region_offset
;
175 memory_region_prepare_ram_addr(fr
->mr
);
177 phys_offset
= fr
->mr
->ram_addr
;
178 region_offset
= fr
->offset_in_region
;
179 /* cpu_register_physical_memory_log() wants region_offset for
180 * mmio, but prefers offseting phys_offset for RAM. Humour it.
182 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
183 phys_offset
+= region_offset
;
187 cpu_register_physical_memory_log(fr
->addr
.start
,
194 static void as_memory_range_del(AddressSpace
*as
, FlatRange
*fr
)
196 cpu_register_physical_memory(fr
->addr
.start
, fr
->addr
.size
,
200 static void as_memory_log_start(AddressSpace
*as
, FlatRange
*fr
)
202 cpu_physical_log_start(fr
->addr
.start
, fr
->addr
.size
);
205 static void as_memory_log_stop(AddressSpace
*as
, FlatRange
*fr
)
207 cpu_physical_log_stop(fr
->addr
.start
, fr
->addr
.size
);
210 static const AddressSpaceOps address_space_ops_memory
= {
211 .range_add
= as_memory_range_add
,
212 .range_del
= as_memory_range_del
,
213 .log_start
= as_memory_log_start
,
214 .log_stop
= as_memory_log_stop
,
217 static AddressSpace address_space_memory
= {
218 .ops
= &address_space_ops_memory
,
221 static const MemoryRegionPortio
*find_portio(MemoryRegion
*mr
, uint64_t offset
,
222 unsigned width
, bool write
)
224 const MemoryRegionPortio
*mrp
;
226 for (mrp
= mr
->ops
->old_portio
; mrp
->size
; ++mrp
) {
227 if (offset
>= mrp
->offset
&& offset
< mrp
->offset
+ mrp
->len
228 && width
== mrp
->size
229 && (write
? (bool)mrp
->write
: (bool)mrp
->read
)) {
236 static void memory_region_iorange_read(IORange
*iorange
,
241 MemoryRegion
*mr
= container_of(iorange
, MemoryRegion
, iorange
);
243 if (mr
->ops
->old_portio
) {
244 const MemoryRegionPortio
*mrp
= find_portio(mr
, offset
, width
, false);
246 *data
= ((uint64_t)1 << (width
* 8)) - 1;
248 *data
= mrp
->read(mr
->opaque
, offset
- mrp
->offset
);
252 *data
= mr
->ops
->read(mr
->opaque
, offset
, width
);
255 static void memory_region_iorange_write(IORange
*iorange
,
260 MemoryRegion
*mr
= container_of(iorange
, MemoryRegion
, iorange
);
262 if (mr
->ops
->old_portio
) {
263 const MemoryRegionPortio
*mrp
= find_portio(mr
, offset
, width
, true);
266 mrp
->write(mr
->opaque
, offset
- mrp
->offset
, data
);
270 mr
->ops
->write(mr
->opaque
, offset
, data
, width
);
273 static const IORangeOps memory_region_iorange_ops
= {
274 .read
= memory_region_iorange_read
,
275 .write
= memory_region_iorange_write
,
278 static void as_io_range_add(AddressSpace
*as
, FlatRange
*fr
)
280 iorange_init(&fr
->mr
->iorange
, &memory_region_iorange_ops
,
281 fr
->addr
.start
,fr
->addr
.size
);
282 ioport_register(&fr
->mr
->iorange
);
285 static void as_io_range_del(AddressSpace
*as
, FlatRange
*fr
)
287 isa_unassign_ioport(fr
->addr
.start
, fr
->addr
.size
);
290 static const AddressSpaceOps address_space_ops_io
= {
291 .range_add
= as_io_range_add
,
292 .range_del
= as_io_range_del
,
295 static AddressSpace address_space_io
= {
296 .ops
= &address_space_ops_io
,
299 /* Render a memory region into the global view. Ranges in @view obscure
302 static void render_memory_region(FlatView
*view
,
304 target_phys_addr_t base
,
307 MemoryRegion
*subregion
;
309 target_phys_addr_t offset_in_region
;
317 tmp
= addrrange_make(base
, mr
->size
);
319 if (!addrrange_intersects(tmp
, clip
)) {
323 clip
= addrrange_intersection(tmp
, clip
);
326 base
-= mr
->alias
->addr
;
327 base
-= mr
->alias_offset
;
328 render_memory_region(view
, mr
->alias
, base
, clip
);
332 /* Render subregions in priority order. */
333 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
334 render_memory_region(view
, subregion
, base
, clip
);
337 if (!mr
->terminates
) {
341 offset_in_region
= clip
.start
- base
;
345 /* Render the region itself into any gaps left by the current view. */
346 for (i
= 0; i
< view
->nr
&& remain
; ++i
) {
347 if (base
>= addrrange_end(view
->ranges
[i
].addr
)) {
350 if (base
< view
->ranges
[i
].addr
.start
) {
351 now
= MIN(remain
, view
->ranges
[i
].addr
.start
- base
);
353 fr
.offset_in_region
= offset_in_region
;
354 fr
.addr
= addrrange_make(base
, now
);
355 fr
.dirty_log_mask
= mr
->dirty_log_mask
;
356 flatview_insert(view
, i
, &fr
);
359 offset_in_region
+= now
;
362 if (base
== view
->ranges
[i
].addr
.start
) {
363 now
= MIN(remain
, view
->ranges
[i
].addr
.size
);
365 offset_in_region
+= now
;
371 fr
.offset_in_region
= offset_in_region
;
372 fr
.addr
= addrrange_make(base
, remain
);
373 fr
.dirty_log_mask
= mr
->dirty_log_mask
;
374 flatview_insert(view
, i
, &fr
);
378 /* Render a memory topology into a list of disjoint absolute ranges. */
379 static FlatView
generate_memory_topology(MemoryRegion
*mr
)
383 flatview_init(&view
);
385 render_memory_region(&view
, mr
, 0, addrrange_make(0, UINT64_MAX
));
386 flatview_simplify(&view
);
391 static void address_space_update_topology(AddressSpace
*as
)
393 FlatView old_view
= as
->current_map
;
394 FlatView new_view
= generate_memory_topology(as
->root
);
396 FlatRange
*frold
, *frnew
;
398 /* Generate a symmetric difference of the old and new memory maps.
399 * Kill ranges in the old map, and instantiate ranges in the new map.
402 while (iold
< old_view
.nr
|| inew
< new_view
.nr
) {
403 if (iold
< old_view
.nr
) {
404 frold
= &old_view
.ranges
[iold
];
408 if (inew
< new_view
.nr
) {
409 frnew
= &new_view
.ranges
[inew
];
416 || frold
->addr
.start
< frnew
->addr
.start
417 || (frold
->addr
.start
== frnew
->addr
.start
418 && !flatrange_equal(frold
, frnew
)))) {
419 /* In old, but (not in new, or in new but attributes changed). */
421 as
->ops
->range_del(as
, frold
);
423 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
424 /* In both (logging may have changed) */
426 if (frold
->dirty_log_mask
&& !frnew
->dirty_log_mask
) {
427 as
->ops
->log_stop(as
, frnew
);
428 } else if (frnew
->dirty_log_mask
&& !frold
->dirty_log_mask
) {
429 as
->ops
->log_start(as
, frnew
);
437 as
->ops
->range_add(as
, frnew
);
441 as
->current_map
= new_view
;
442 flatview_destroy(&old_view
);
445 static void memory_region_update_topology(void)
447 if (address_space_memory
.root
) {
448 address_space_update_topology(&address_space_memory
);
450 if (address_space_io
.root
) {
451 address_space_update_topology(&address_space_io
);
455 void memory_region_init(MemoryRegion
*mr
,
464 mr
->terminates
= false;
466 mr
->may_overlap
= false;
468 QTAILQ_INIT(&mr
->subregions
);
469 memset(&mr
->subregions_link
, 0, sizeof mr
->subregions_link
);
470 QTAILQ_INIT(&mr
->coalesced
);
471 mr
->name
= qemu_strdup(name
);
472 mr
->dirty_log_mask
= 0;
475 static bool memory_region_access_valid(MemoryRegion
*mr
,
476 target_phys_addr_t addr
,
479 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
483 /* Treat zero as compatibility all valid */
484 if (!mr
->ops
->valid
.max_access_size
) {
488 if (size
> mr
->ops
->valid
.max_access_size
489 || size
< mr
->ops
->valid
.min_access_size
) {
495 static uint32_t memory_region_read_thunk_n(void *_mr
,
496 target_phys_addr_t addr
,
499 MemoryRegion
*mr
= _mr
;
500 unsigned access_size
, access_size_min
, access_size_max
;
501 uint64_t access_mask
;
502 uint32_t data
= 0, tmp
;
505 if (!memory_region_access_valid(mr
, addr
, size
)) {
506 return -1U; /* FIXME: better signalling */
509 /* FIXME: support unaligned access */
511 access_size_min
= mr
->ops
->impl
.min_access_size
;
512 if (!access_size_min
) {
515 access_size_max
= mr
->ops
->impl
.max_access_size
;
516 if (!access_size_max
) {
519 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
520 access_mask
= -1ULL >> (64 - access_size
* 8);
522 for (i
= 0; i
< size
; i
+= access_size
) {
523 /* FIXME: big-endian support */
524 tmp
= mr
->ops
->read(mr
->opaque
, addr
+ i
, access_size
);
525 data
|= (tmp
& access_mask
) << (i
* 8);
531 static void memory_region_write_thunk_n(void *_mr
,
532 target_phys_addr_t addr
,
536 MemoryRegion
*mr
= _mr
;
537 unsigned access_size
, access_size_min
, access_size_max
;
538 uint64_t access_mask
;
541 if (!memory_region_access_valid(mr
, addr
, size
)) {
542 return; /* FIXME: better signalling */
545 /* FIXME: support unaligned access */
547 access_size_min
= mr
->ops
->impl
.min_access_size
;
548 if (!access_size_min
) {
551 access_size_max
= mr
->ops
->impl
.max_access_size
;
552 if (!access_size_max
) {
555 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
556 access_mask
= -1ULL >> (64 - access_size
* 8);
558 for (i
= 0; i
< size
; i
+= access_size
) {
559 /* FIXME: big-endian support */
560 mr
->ops
->write(mr
->opaque
, addr
+ i
, (data
>> (i
* 8)) & access_mask
,
565 static uint32_t memory_region_read_thunk_b(void *mr
, target_phys_addr_t addr
)
567 return memory_region_read_thunk_n(mr
, addr
, 1);
570 static uint32_t memory_region_read_thunk_w(void *mr
, target_phys_addr_t addr
)
572 return memory_region_read_thunk_n(mr
, addr
, 2);
575 static uint32_t memory_region_read_thunk_l(void *mr
, target_phys_addr_t addr
)
577 return memory_region_read_thunk_n(mr
, addr
, 4);
580 static void memory_region_write_thunk_b(void *mr
, target_phys_addr_t addr
,
583 memory_region_write_thunk_n(mr
, addr
, 1, data
);
586 static void memory_region_write_thunk_w(void *mr
, target_phys_addr_t addr
,
589 memory_region_write_thunk_n(mr
, addr
, 2, data
);
592 static void memory_region_write_thunk_l(void *mr
, target_phys_addr_t addr
,
595 memory_region_write_thunk_n(mr
, addr
, 4, data
);
598 static CPUReadMemoryFunc
* const memory_region_read_thunk
[] = {
599 memory_region_read_thunk_b
,
600 memory_region_read_thunk_w
,
601 memory_region_read_thunk_l
,
604 static CPUWriteMemoryFunc
* const memory_region_write_thunk
[] = {
605 memory_region_write_thunk_b
,
606 memory_region_write_thunk_w
,
607 memory_region_write_thunk_l
,
610 static void memory_region_prepare_ram_addr(MemoryRegion
*mr
)
612 if (mr
->backend_registered
) {
616 mr
->ram_addr
= cpu_register_io_memory(memory_region_read_thunk
,
617 memory_region_write_thunk
,
619 mr
->ops
->endianness
);
620 mr
->backend_registered
= true;
623 void memory_region_init_io(MemoryRegion
*mr
,
624 const MemoryRegionOps
*ops
,
629 memory_region_init(mr
, name
, size
);
632 mr
->terminates
= true;
633 mr
->backend_registered
= false;
636 void memory_region_init_ram(MemoryRegion
*mr
,
641 memory_region_init(mr
, name
, size
);
642 mr
->terminates
= true;
643 mr
->ram_addr
= qemu_ram_alloc(dev
, name
, size
);
644 mr
->backend_registered
= true;
647 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
653 memory_region_init(mr
, name
, size
);
654 mr
->terminates
= true;
655 mr
->ram_addr
= qemu_ram_alloc_from_ptr(dev
, name
, size
, ptr
);
656 mr
->backend_registered
= true;
659 void memory_region_init_alias(MemoryRegion
*mr
,
662 target_phys_addr_t offset
,
665 memory_region_init(mr
, name
, size
);
667 mr
->alias_offset
= offset
;
670 void memory_region_destroy(MemoryRegion
*mr
)
672 assert(QTAILQ_EMPTY(&mr
->subregions
));
673 memory_region_clear_coalescing(mr
);
674 qemu_free((char *)mr
->name
);
677 uint64_t memory_region_size(MemoryRegion
*mr
)
682 void memory_region_set_offset(MemoryRegion
*mr
, target_phys_addr_t offset
)
687 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
689 uint8_t mask
= 1 << client
;
691 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
692 memory_region_update_topology();
695 bool memory_region_get_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
,
698 assert(mr
->terminates
);
699 return cpu_physical_memory_get_dirty(mr
->ram_addr
+ addr
, 1 << client
);
702 void memory_region_set_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
)
704 assert(mr
->terminates
);
705 return cpu_physical_memory_set_dirty(mr
->ram_addr
+ addr
);
708 void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
)
712 FOR_EACH_FLAT_RANGE(fr
, &address_space_memory
.current_map
) {
714 cpu_physical_sync_dirty_bitmap(fr
->addr
.start
,
715 fr
->addr
.start
+ fr
->addr
.size
);
720 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
725 void memory_region_reset_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
,
726 target_phys_addr_t size
, unsigned client
)
728 assert(mr
->terminates
);
729 cpu_physical_memory_reset_dirty(mr
->ram_addr
+ addr
,
730 mr
->ram_addr
+ addr
+ size
,
734 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
737 return memory_region_get_ram_ptr(mr
->alias
) + mr
->alias_offset
;
740 assert(mr
->terminates
);
742 return qemu_get_ram_ptr(mr
->ram_addr
);
745 static void memory_region_update_coalesced_range(MemoryRegion
*mr
)
748 CoalescedMemoryRange
*cmr
;
751 FOR_EACH_FLAT_RANGE(fr
, &address_space_memory
.current_map
) {
753 qemu_unregister_coalesced_mmio(fr
->addr
.start
, fr
->addr
.size
);
754 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
755 tmp
= addrrange_shift(cmr
->addr
,
756 fr
->addr
.start
- fr
->offset_in_region
);
757 if (!addrrange_intersects(tmp
, fr
->addr
)) {
760 tmp
= addrrange_intersection(tmp
, fr
->addr
);
761 qemu_register_coalesced_mmio(tmp
.start
, tmp
.size
);
767 void memory_region_set_coalescing(MemoryRegion
*mr
)
769 memory_region_clear_coalescing(mr
);
770 memory_region_add_coalescing(mr
, 0, mr
->size
);
773 void memory_region_add_coalescing(MemoryRegion
*mr
,
774 target_phys_addr_t offset
,
777 CoalescedMemoryRange
*cmr
= qemu_malloc(sizeof(*cmr
));
779 cmr
->addr
= addrrange_make(offset
, size
);
780 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
781 memory_region_update_coalesced_range(mr
);
784 void memory_region_clear_coalescing(MemoryRegion
*mr
)
786 CoalescedMemoryRange
*cmr
;
788 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
789 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
790 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
793 memory_region_update_coalesced_range(mr
);
796 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
797 target_phys_addr_t offset
,
798 MemoryRegion
*subregion
)
802 assert(!subregion
->parent
);
803 subregion
->parent
= mr
;
804 subregion
->addr
= offset
;
805 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
806 if (subregion
->may_overlap
|| other
->may_overlap
) {
809 if (offset
>= other
->offset
+ other
->size
810 || offset
+ subregion
->size
<= other
->offset
) {
813 printf("warning: subregion collision %llx/%llx vs %llx/%llx\n",
814 (unsigned long long)offset
,
815 (unsigned long long)subregion
->size
,
816 (unsigned long long)other
->offset
,
817 (unsigned long long)other
->size
);
819 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
820 if (subregion
->priority
>= other
->priority
) {
821 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
825 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
827 memory_region_update_topology();
831 void memory_region_add_subregion(MemoryRegion
*mr
,
832 target_phys_addr_t offset
,
833 MemoryRegion
*subregion
)
835 subregion
->may_overlap
= false;
836 subregion
->priority
= 0;
837 memory_region_add_subregion_common(mr
, offset
, subregion
);
840 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
841 target_phys_addr_t offset
,
842 MemoryRegion
*subregion
,
845 subregion
->may_overlap
= true;
846 subregion
->priority
= priority
;
847 memory_region_add_subregion_common(mr
, offset
, subregion
);
850 void memory_region_del_subregion(MemoryRegion
*mr
,
851 MemoryRegion
*subregion
)
853 assert(subregion
->parent
== mr
);
854 subregion
->parent
= NULL
;
855 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
856 memory_region_update_topology();
859 void set_system_memory_map(MemoryRegion
*mr
)
861 address_space_memory
.root
= mr
;
862 memory_region_update_topology();
865 void set_system_io_map(MemoryRegion
*mr
)
867 address_space_io
.root
= mr
;
868 memory_region_update_topology();