2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
15 #include "exec-memory.h"
18 typedef struct AddrRange AddrRange
;
25 static AddrRange
addrrange_make(uint64_t start
, uint64_t size
)
27 return (AddrRange
) { start
, size
};
30 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
32 return r1
.start
== r2
.start
&& r1
.size
== r2
.size
;
35 static uint64_t addrrange_end(AddrRange r
)
37 return r
.start
+ r
.size
;
40 static AddrRange
addrrange_shift(AddrRange range
, int64_t delta
)
46 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
48 return (r1
.start
>= r2
.start
&& r1
.start
< r2
.start
+ r2
.size
)
49 || (r2
.start
>= r1
.start
&& r2
.start
< r1
.start
+ r1
.size
);
52 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
54 uint64_t start
= MAX(r1
.start
, r2
.start
);
55 /* off-by-one arithmetic to prevent overflow */
56 uint64_t end
= MIN(addrrange_end(r1
) - 1, addrrange_end(r2
) - 1);
57 return addrrange_make(start
, end
- start
+ 1);
60 struct CoalescedMemoryRange
{
62 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
65 typedef struct FlatRange FlatRange
;
66 typedef struct FlatView FlatView
;
68 /* Range of memory in the global map. Addresses are absolute. */
71 target_phys_addr_t offset_in_region
;
73 uint8_t dirty_log_mask
;
76 /* Flattened global view of current active memory hierarchy. Kept in sorted
82 unsigned nr_allocated
;
85 #define FOR_EACH_FLAT_RANGE(var, view) \
86 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
88 static FlatView current_memory_map
;
89 static MemoryRegion
*root_memory_region
;
91 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
94 && addrrange_equal(a
->addr
, b
->addr
)
95 && a
->offset_in_region
== b
->offset_in_region
;
98 static void flatview_init(FlatView
*view
)
102 view
->nr_allocated
= 0;
105 /* Insert a range into a given position. Caller is responsible for maintaining
108 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
110 if (view
->nr
== view
->nr_allocated
) {
111 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
112 view
->ranges
= qemu_realloc(view
->ranges
,
113 view
->nr_allocated
* sizeof(*view
->ranges
));
115 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
116 (view
->nr
- pos
) * sizeof(FlatRange
));
117 view
->ranges
[pos
] = *range
;
121 static void flatview_destroy(FlatView
*view
)
123 qemu_free(view
->ranges
);
126 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
128 return addrrange_end(r1
->addr
) == r2
->addr
.start
130 && r1
->offset_in_region
+ r1
->addr
.size
== r2
->offset_in_region
131 && r1
->dirty_log_mask
== r2
->dirty_log_mask
;
134 /* Attempt to simplify a view by merging ajacent ranges */
135 static void flatview_simplify(FlatView
*view
)
140 while (i
< view
->nr
) {
143 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
144 view
->ranges
[i
].addr
.size
+= view
->ranges
[j
].addr
.size
;
148 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
149 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
154 /* Render a memory region into the global view. Ranges in @view obscure
157 static void render_memory_region(FlatView
*view
,
159 target_phys_addr_t base
,
162 MemoryRegion
*subregion
;
164 target_phys_addr_t offset_in_region
;
172 tmp
= addrrange_make(base
, mr
->size
);
174 if (!addrrange_intersects(tmp
, clip
)) {
178 clip
= addrrange_intersection(tmp
, clip
);
181 base
-= mr
->alias
->addr
;
182 base
-= mr
->alias_offset
;
183 render_memory_region(view
, mr
->alias
, base
, clip
);
187 /* Render subregions in priority order. */
188 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
189 render_memory_region(view
, subregion
, base
, clip
);
192 if (!mr
->has_ram_addr
) {
196 offset_in_region
= clip
.start
- base
;
200 /* Render the region itself into any gaps left by the current view. */
201 for (i
= 0; i
< view
->nr
&& remain
; ++i
) {
202 if (base
>= addrrange_end(view
->ranges
[i
].addr
)) {
205 if (base
< view
->ranges
[i
].addr
.start
) {
206 now
= MIN(remain
, view
->ranges
[i
].addr
.start
- base
);
208 fr
.offset_in_region
= offset_in_region
;
209 fr
.addr
= addrrange_make(base
, now
);
210 fr
.dirty_log_mask
= mr
->dirty_log_mask
;
211 flatview_insert(view
, i
, &fr
);
214 offset_in_region
+= now
;
217 if (base
== view
->ranges
[i
].addr
.start
) {
218 now
= MIN(remain
, view
->ranges
[i
].addr
.size
);
220 offset_in_region
+= now
;
226 fr
.offset_in_region
= offset_in_region
;
227 fr
.addr
= addrrange_make(base
, remain
);
228 fr
.dirty_log_mask
= mr
->dirty_log_mask
;
229 flatview_insert(view
, i
, &fr
);
233 /* Render a memory topology into a list of disjoint absolute ranges. */
234 static FlatView
generate_memory_topology(MemoryRegion
*mr
)
238 flatview_init(&view
);
240 render_memory_region(&view
, mr
, 0, addrrange_make(0, UINT64_MAX
));
241 flatview_simplify(&view
);
246 static void memory_region_update_topology(void)
248 FlatView old_view
= current_memory_map
;
249 FlatView new_view
= generate_memory_topology(root_memory_region
);
251 FlatRange
*frold
, *frnew
;
252 ram_addr_t phys_offset
, region_offset
;
254 /* Generate a symmetric difference of the old and new memory maps.
255 * Kill ranges in the old map, and instantiate ranges in the new map.
258 while (iold
< old_view
.nr
|| inew
< new_view
.nr
) {
259 if (iold
< old_view
.nr
) {
260 frold
= &old_view
.ranges
[iold
];
264 if (inew
< new_view
.nr
) {
265 frnew
= &new_view
.ranges
[inew
];
272 || frold
->addr
.start
< frnew
->addr
.start
273 || (frold
->addr
.start
== frnew
->addr
.start
274 && !flatrange_equal(frold
, frnew
)))) {
275 /* In old, but (not in new, or in new but attributes changed). */
277 cpu_register_physical_memory(frold
->addr
.start
, frold
->addr
.size
,
280 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
281 /* In both (logging may have changed) */
283 if (frold
->dirty_log_mask
&& !frnew
->dirty_log_mask
) {
284 cpu_physical_log_stop(frnew
->addr
.start
, frnew
->addr
.size
);
285 } else if (frnew
->dirty_log_mask
&& !frold
->dirty_log_mask
) {
286 cpu_physical_log_start(frnew
->addr
.start
, frnew
->addr
.size
);
294 phys_offset
= frnew
->mr
->ram_addr
;
295 region_offset
= frnew
->offset_in_region
;
296 /* cpu_register_physical_memory_log() wants region_offset for
297 * mmio, but prefers offseting phys_offset for RAM. Humour it.
299 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
300 phys_offset
+= region_offset
;
304 cpu_register_physical_memory_log(frnew
->addr
.start
,
308 frnew
->dirty_log_mask
);
312 current_memory_map
= new_view
;
313 flatview_destroy(&old_view
);
316 void memory_region_init(MemoryRegion
*mr
,
325 mr
->has_ram_addr
= false;
327 mr
->may_overlap
= false;
329 QTAILQ_INIT(&mr
->subregions
);
330 memset(&mr
->subregions_link
, 0, sizeof mr
->subregions_link
);
331 QTAILQ_INIT(&mr
->coalesced
);
332 mr
->name
= qemu_strdup(name
);
333 mr
->dirty_log_mask
= 0;
336 static bool memory_region_access_valid(MemoryRegion
*mr
,
337 target_phys_addr_t addr
,
340 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
344 /* Treat zero as compatibility all valid */
345 if (!mr
->ops
->valid
.max_access_size
) {
349 if (size
> mr
->ops
->valid
.max_access_size
350 || size
< mr
->ops
->valid
.min_access_size
) {
356 static uint32_t memory_region_read_thunk_n(void *_mr
,
357 target_phys_addr_t addr
,
360 MemoryRegion
*mr
= _mr
;
361 unsigned access_size
, access_size_min
, access_size_max
;
362 uint64_t access_mask
;
363 uint32_t data
= 0, tmp
;
366 if (!memory_region_access_valid(mr
, addr
, size
)) {
367 return -1U; /* FIXME: better signalling */
370 /* FIXME: support unaligned access */
372 access_size_min
= mr
->ops
->impl
.min_access_size
;
373 if (!access_size_min
) {
376 access_size_max
= mr
->ops
->impl
.max_access_size
;
377 if (!access_size_max
) {
380 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
381 access_mask
= -1ULL >> (64 - access_size
* 8);
383 for (i
= 0; i
< size
; i
+= access_size
) {
384 /* FIXME: big-endian support */
385 tmp
= mr
->ops
->read(mr
->opaque
, addr
+ i
, access_size
);
386 data
|= (tmp
& access_mask
) << (i
* 8);
392 static void memory_region_write_thunk_n(void *_mr
,
393 target_phys_addr_t addr
,
397 MemoryRegion
*mr
= _mr
;
398 unsigned access_size
, access_size_min
, access_size_max
;
399 uint64_t access_mask
;
402 if (!memory_region_access_valid(mr
, addr
, size
)) {
403 return; /* FIXME: better signalling */
406 /* FIXME: support unaligned access */
408 access_size_min
= mr
->ops
->impl
.min_access_size
;
409 if (!access_size_min
) {
412 access_size_max
= mr
->ops
->impl
.max_access_size
;
413 if (!access_size_max
) {
416 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
417 access_mask
= -1ULL >> (64 - access_size
* 8);
419 for (i
= 0; i
< size
; i
+= access_size
) {
420 /* FIXME: big-endian support */
421 mr
->ops
->write(mr
->opaque
, addr
+ i
, (data
>> (i
* 8)) & access_mask
,
426 static uint32_t memory_region_read_thunk_b(void *mr
, target_phys_addr_t addr
)
428 return memory_region_read_thunk_n(mr
, addr
, 1);
431 static uint32_t memory_region_read_thunk_w(void *mr
, target_phys_addr_t addr
)
433 return memory_region_read_thunk_n(mr
, addr
, 2);
436 static uint32_t memory_region_read_thunk_l(void *mr
, target_phys_addr_t addr
)
438 return memory_region_read_thunk_n(mr
, addr
, 4);
441 static void memory_region_write_thunk_b(void *mr
, target_phys_addr_t addr
,
444 memory_region_write_thunk_n(mr
, addr
, 1, data
);
447 static void memory_region_write_thunk_w(void *mr
, target_phys_addr_t addr
,
450 memory_region_write_thunk_n(mr
, addr
, 2, data
);
453 static void memory_region_write_thunk_l(void *mr
, target_phys_addr_t addr
,
456 memory_region_write_thunk_n(mr
, addr
, 4, data
);
459 static CPUReadMemoryFunc
* const memory_region_read_thunk
[] = {
460 memory_region_read_thunk_b
,
461 memory_region_read_thunk_w
,
462 memory_region_read_thunk_l
,
465 static CPUWriteMemoryFunc
* const memory_region_write_thunk
[] = {
466 memory_region_write_thunk_b
,
467 memory_region_write_thunk_w
,
468 memory_region_write_thunk_l
,
471 void memory_region_init_io(MemoryRegion
*mr
,
472 const MemoryRegionOps
*ops
,
477 memory_region_init(mr
, name
, size
);
480 mr
->has_ram_addr
= true;
481 mr
->ram_addr
= cpu_register_io_memory(memory_region_read_thunk
,
482 memory_region_write_thunk
,
484 mr
->ops
->endianness
);
487 void memory_region_init_ram(MemoryRegion
*mr
,
492 memory_region_init(mr
, name
, size
);
493 mr
->has_ram_addr
= true;
494 mr
->ram_addr
= qemu_ram_alloc(dev
, name
, size
);
497 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
503 memory_region_init(mr
, name
, size
);
504 mr
->has_ram_addr
= true;
505 mr
->ram_addr
= qemu_ram_alloc_from_ptr(dev
, name
, size
, ptr
);
508 void memory_region_init_alias(MemoryRegion
*mr
,
511 target_phys_addr_t offset
,
514 memory_region_init(mr
, name
, size
);
516 mr
->alias_offset
= offset
;
519 void memory_region_destroy(MemoryRegion
*mr
)
521 assert(QTAILQ_EMPTY(&mr
->subregions
));
522 memory_region_clear_coalescing(mr
);
523 qemu_free((char *)mr
->name
);
526 uint64_t memory_region_size(MemoryRegion
*mr
)
531 void memory_region_set_offset(MemoryRegion
*mr
, target_phys_addr_t offset
)
536 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
538 uint8_t mask
= 1 << client
;
540 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
541 memory_region_update_topology();
544 bool memory_region_get_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
,
547 assert(mr
->has_ram_addr
);
548 return cpu_physical_memory_get_dirty(mr
->ram_addr
+ addr
, 1 << client
);
551 void memory_region_set_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
)
553 assert(mr
->has_ram_addr
);
554 return cpu_physical_memory_set_dirty(mr
->ram_addr
+ addr
);
557 void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
)
561 FOR_EACH_FLAT_RANGE(fr
, ¤t_memory_map
) {
563 cpu_physical_sync_dirty_bitmap(fr
->addr
.start
,
564 fr
->addr
.start
+ fr
->addr
.size
);
569 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
574 void memory_region_reset_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
,
575 target_phys_addr_t size
, unsigned client
)
577 assert(mr
->has_ram_addr
);
578 cpu_physical_memory_reset_dirty(mr
->ram_addr
+ addr
,
579 mr
->ram_addr
+ addr
+ size
,
583 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
586 return memory_region_get_ram_ptr(mr
->alias
) + mr
->alias_offset
;
589 assert(mr
->has_ram_addr
);
591 return qemu_get_ram_ptr(mr
->ram_addr
);
594 static void memory_region_update_coalesced_range(MemoryRegion
*mr
)
597 CoalescedMemoryRange
*cmr
;
600 FOR_EACH_FLAT_RANGE(fr
, ¤t_memory_map
) {
602 qemu_unregister_coalesced_mmio(fr
->addr
.start
, fr
->addr
.size
);
603 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
604 tmp
= addrrange_shift(cmr
->addr
,
605 fr
->addr
.start
- fr
->offset_in_region
);
606 if (!addrrange_intersects(tmp
, fr
->addr
)) {
609 tmp
= addrrange_intersection(tmp
, fr
->addr
);
610 qemu_register_coalesced_mmio(tmp
.start
, tmp
.size
);
616 void memory_region_set_coalescing(MemoryRegion
*mr
)
618 memory_region_clear_coalescing(mr
);
619 memory_region_add_coalescing(mr
, 0, mr
->size
);
622 void memory_region_add_coalescing(MemoryRegion
*mr
,
623 target_phys_addr_t offset
,
626 CoalescedMemoryRange
*cmr
= qemu_malloc(sizeof(*cmr
));
628 cmr
->addr
= addrrange_make(offset
, size
);
629 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
630 memory_region_update_coalesced_range(mr
);
633 void memory_region_clear_coalescing(MemoryRegion
*mr
)
635 CoalescedMemoryRange
*cmr
;
637 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
638 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
639 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
642 memory_region_update_coalesced_range(mr
);
645 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
646 target_phys_addr_t offset
,
647 MemoryRegion
*subregion
)
651 assert(!subregion
->parent
);
652 subregion
->parent
= mr
;
653 subregion
->addr
= offset
;
654 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
655 if (subregion
->may_overlap
|| other
->may_overlap
) {
658 if (offset
>= other
->offset
+ other
->size
659 || offset
+ subregion
->size
<= other
->offset
) {
662 printf("warning: subregion collision %llx/%llx vs %llx/%llx\n",
663 (unsigned long long)offset
,
664 (unsigned long long)subregion
->size
,
665 (unsigned long long)other
->offset
,
666 (unsigned long long)other
->size
);
668 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
669 if (subregion
->priority
>= other
->priority
) {
670 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
674 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
676 memory_region_update_topology();
680 void memory_region_add_subregion(MemoryRegion
*mr
,
681 target_phys_addr_t offset
,
682 MemoryRegion
*subregion
)
684 subregion
->may_overlap
= false;
685 subregion
->priority
= 0;
686 memory_region_add_subregion_common(mr
, offset
, subregion
);
689 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
690 target_phys_addr_t offset
,
691 MemoryRegion
*subregion
,
694 subregion
->may_overlap
= true;
695 subregion
->priority
= priority
;
696 memory_region_add_subregion_common(mr
, offset
, subregion
);
699 void memory_region_del_subregion(MemoryRegion
*mr
,
700 MemoryRegion
*subregion
)
702 assert(subregion
->parent
== mr
);
703 subregion
->parent
= NULL
;
704 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
705 memory_region_update_topology();
708 void set_system_memory_map(MemoryRegion
*mr
)
710 root_memory_region
= mr
;
711 memory_region_update_topology();