2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
15 #include "exec-memory.h"
21 typedef struct AddrRange AddrRange
;
28 static AddrRange
addrrange_make(uint64_t start
, uint64_t size
)
30 return (AddrRange
) { start
, size
};
33 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
35 return r1
.start
== r2
.start
&& r1
.size
== r2
.size
;
38 static uint64_t addrrange_end(AddrRange r
)
40 return r
.start
+ r
.size
;
43 static AddrRange
addrrange_shift(AddrRange range
, int64_t delta
)
49 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
51 return (r1
.start
>= r2
.start
&& r1
.start
< r2
.start
+ r2
.size
)
52 || (r2
.start
>= r1
.start
&& r2
.start
< r1
.start
+ r1
.size
);
55 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
57 uint64_t start
= MAX(r1
.start
, r2
.start
);
58 /* off-by-one arithmetic to prevent overflow */
59 uint64_t end
= MIN(addrrange_end(r1
) - 1, addrrange_end(r2
) - 1);
60 return addrrange_make(start
, end
- start
+ 1);
63 struct CoalescedMemoryRange
{
65 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
68 struct MemoryRegionIoeventfd
{
75 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a
,
76 MemoryRegionIoeventfd b
)
78 if (a
.addr
.start
< b
.addr
.start
) {
80 } else if (a
.addr
.start
> b
.addr
.start
) {
82 } else if (a
.addr
.size
< b
.addr
.size
) {
84 } else if (a
.addr
.size
> b
.addr
.size
) {
86 } else if (a
.match_data
< b
.match_data
) {
88 } else if (a
.match_data
> b
.match_data
) {
90 } else if (a
.match_data
) {
91 if (a
.data
< b
.data
) {
93 } else if (a
.data
> b
.data
) {
99 } else if (a
.fd
> b
.fd
) {
105 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a
,
106 MemoryRegionIoeventfd b
)
108 return !memory_region_ioeventfd_before(a
, b
)
109 && !memory_region_ioeventfd_before(b
, a
);
112 typedef struct FlatRange FlatRange
;
113 typedef struct FlatView FlatView
;
115 /* Range of memory in the global map. Addresses are absolute. */
118 target_phys_addr_t offset_in_region
;
120 uint8_t dirty_log_mask
;
123 /* Flattened global view of current active memory hierarchy. Kept in sorted
129 unsigned nr_allocated
;
132 typedef struct AddressSpace AddressSpace
;
133 typedef struct AddressSpaceOps AddressSpaceOps
;
135 /* A system address space - I/O, memory, etc. */
136 struct AddressSpace
{
137 const AddressSpaceOps
*ops
;
139 FlatView current_map
;
141 MemoryRegionIoeventfd
*ioeventfds
;
144 struct AddressSpaceOps
{
145 void (*range_add
)(AddressSpace
*as
, FlatRange
*fr
);
146 void (*range_del
)(AddressSpace
*as
, FlatRange
*fr
);
147 void (*log_start
)(AddressSpace
*as
, FlatRange
*fr
);
148 void (*log_stop
)(AddressSpace
*as
, FlatRange
*fr
);
149 void (*ioeventfd_add
)(AddressSpace
*as
, MemoryRegionIoeventfd
*fd
);
150 void (*ioeventfd_del
)(AddressSpace
*as
, MemoryRegionIoeventfd
*fd
);
153 #define FOR_EACH_FLAT_RANGE(var, view) \
154 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
156 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
158 return a
->mr
== b
->mr
159 && addrrange_equal(a
->addr
, b
->addr
)
160 && a
->offset_in_region
== b
->offset_in_region
;
163 static void flatview_init(FlatView
*view
)
167 view
->nr_allocated
= 0;
170 /* Insert a range into a given position. Caller is responsible for maintaining
173 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
175 if (view
->nr
== view
->nr_allocated
) {
176 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
177 view
->ranges
= qemu_realloc(view
->ranges
,
178 view
->nr_allocated
* sizeof(*view
->ranges
));
180 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
181 (view
->nr
- pos
) * sizeof(FlatRange
));
182 view
->ranges
[pos
] = *range
;
186 static void flatview_destroy(FlatView
*view
)
188 qemu_free(view
->ranges
);
191 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
193 return addrrange_end(r1
->addr
) == r2
->addr
.start
195 && r1
->offset_in_region
+ r1
->addr
.size
== r2
->offset_in_region
196 && r1
->dirty_log_mask
== r2
->dirty_log_mask
;
199 /* Attempt to simplify a view by merging ajacent ranges */
200 static void flatview_simplify(FlatView
*view
)
205 while (i
< view
->nr
) {
208 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
209 view
->ranges
[i
].addr
.size
+= view
->ranges
[j
].addr
.size
;
213 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
214 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
219 static void memory_region_prepare_ram_addr(MemoryRegion
*mr
);
221 static void as_memory_range_add(AddressSpace
*as
, FlatRange
*fr
)
223 ram_addr_t phys_offset
, region_offset
;
225 memory_region_prepare_ram_addr(fr
->mr
);
227 phys_offset
= fr
->mr
->ram_addr
;
228 region_offset
= fr
->offset_in_region
;
229 /* cpu_register_physical_memory_log() wants region_offset for
230 * mmio, but prefers offseting phys_offset for RAM. Humour it.
232 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
233 phys_offset
+= region_offset
;
237 cpu_register_physical_memory_log(fr
->addr
.start
,
244 static void as_memory_range_del(AddressSpace
*as
, FlatRange
*fr
)
246 cpu_register_physical_memory(fr
->addr
.start
, fr
->addr
.size
,
250 static void as_memory_log_start(AddressSpace
*as
, FlatRange
*fr
)
252 cpu_physical_log_start(fr
->addr
.start
, fr
->addr
.size
);
255 static void as_memory_log_stop(AddressSpace
*as
, FlatRange
*fr
)
257 cpu_physical_log_stop(fr
->addr
.start
, fr
->addr
.size
);
260 static void as_memory_ioeventfd_add(AddressSpace
*as
, MemoryRegionIoeventfd
*fd
)
264 assert(fd
->match_data
&& fd
->addr
.size
== 4);
266 r
= kvm_set_ioeventfd_mmio_long(fd
->fd
, fd
->addr
.start
, fd
->data
, true);
272 static void as_memory_ioeventfd_del(AddressSpace
*as
, MemoryRegionIoeventfd
*fd
)
276 r
= kvm_set_ioeventfd_mmio_long(fd
->fd
, fd
->addr
.start
, fd
->data
, false);
282 static const AddressSpaceOps address_space_ops_memory
= {
283 .range_add
= as_memory_range_add
,
284 .range_del
= as_memory_range_del
,
285 .log_start
= as_memory_log_start
,
286 .log_stop
= as_memory_log_stop
,
287 .ioeventfd_add
= as_memory_ioeventfd_add
,
288 .ioeventfd_del
= as_memory_ioeventfd_del
,
291 static AddressSpace address_space_memory
= {
292 .ops
= &address_space_ops_memory
,
295 static const MemoryRegionPortio
*find_portio(MemoryRegion
*mr
, uint64_t offset
,
296 unsigned width
, bool write
)
298 const MemoryRegionPortio
*mrp
;
300 for (mrp
= mr
->ops
->old_portio
; mrp
->size
; ++mrp
) {
301 if (offset
>= mrp
->offset
&& offset
< mrp
->offset
+ mrp
->len
302 && width
== mrp
->size
303 && (write
? (bool)mrp
->write
: (bool)mrp
->read
)) {
310 static void memory_region_iorange_read(IORange
*iorange
,
315 MemoryRegion
*mr
= container_of(iorange
, MemoryRegion
, iorange
);
317 if (mr
->ops
->old_portio
) {
318 const MemoryRegionPortio
*mrp
= find_portio(mr
, offset
, width
, false);
320 *data
= ((uint64_t)1 << (width
* 8)) - 1;
322 *data
= mrp
->read(mr
->opaque
, offset
- mrp
->offset
);
326 *data
= mr
->ops
->read(mr
->opaque
, offset
, width
);
329 static void memory_region_iorange_write(IORange
*iorange
,
334 MemoryRegion
*mr
= container_of(iorange
, MemoryRegion
, iorange
);
336 if (mr
->ops
->old_portio
) {
337 const MemoryRegionPortio
*mrp
= find_portio(mr
, offset
, width
, true);
340 mrp
->write(mr
->opaque
, offset
- mrp
->offset
, data
);
344 mr
->ops
->write(mr
->opaque
, offset
, data
, width
);
347 static const IORangeOps memory_region_iorange_ops
= {
348 .read
= memory_region_iorange_read
,
349 .write
= memory_region_iorange_write
,
352 static void as_io_range_add(AddressSpace
*as
, FlatRange
*fr
)
354 iorange_init(&fr
->mr
->iorange
, &memory_region_iorange_ops
,
355 fr
->addr
.start
,fr
->addr
.size
);
356 ioport_register(&fr
->mr
->iorange
);
359 static void as_io_range_del(AddressSpace
*as
, FlatRange
*fr
)
361 isa_unassign_ioport(fr
->addr
.start
, fr
->addr
.size
);
364 static void as_io_ioeventfd_add(AddressSpace
*as
, MemoryRegionIoeventfd
*fd
)
368 assert(fd
->match_data
&& fd
->addr
.size
== 2);
370 r
= kvm_set_ioeventfd_pio_word(fd
->fd
, fd
->addr
.start
, fd
->data
, true);
376 static void as_io_ioeventfd_del(AddressSpace
*as
, MemoryRegionIoeventfd
*fd
)
380 r
= kvm_set_ioeventfd_pio_word(fd
->fd
, fd
->addr
.start
, fd
->data
, false);
386 static const AddressSpaceOps address_space_ops_io
= {
387 .range_add
= as_io_range_add
,
388 .range_del
= as_io_range_del
,
389 .ioeventfd_add
= as_io_ioeventfd_add
,
390 .ioeventfd_del
= as_io_ioeventfd_del
,
393 static AddressSpace address_space_io
= {
394 .ops
= &address_space_ops_io
,
397 /* Render a memory region into the global view. Ranges in @view obscure
400 static void render_memory_region(FlatView
*view
,
402 target_phys_addr_t base
,
405 MemoryRegion
*subregion
;
407 target_phys_addr_t offset_in_region
;
415 tmp
= addrrange_make(base
, mr
->size
);
417 if (!addrrange_intersects(tmp
, clip
)) {
421 clip
= addrrange_intersection(tmp
, clip
);
424 base
-= mr
->alias
->addr
;
425 base
-= mr
->alias_offset
;
426 render_memory_region(view
, mr
->alias
, base
, clip
);
430 /* Render subregions in priority order. */
431 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
432 render_memory_region(view
, subregion
, base
, clip
);
435 if (!mr
->terminates
) {
439 offset_in_region
= clip
.start
- base
;
443 /* Render the region itself into any gaps left by the current view. */
444 for (i
= 0; i
< view
->nr
&& remain
; ++i
) {
445 if (base
>= addrrange_end(view
->ranges
[i
].addr
)) {
448 if (base
< view
->ranges
[i
].addr
.start
) {
449 now
= MIN(remain
, view
->ranges
[i
].addr
.start
- base
);
451 fr
.offset_in_region
= offset_in_region
;
452 fr
.addr
= addrrange_make(base
, now
);
453 fr
.dirty_log_mask
= mr
->dirty_log_mask
;
454 flatview_insert(view
, i
, &fr
);
457 offset_in_region
+= now
;
460 if (base
== view
->ranges
[i
].addr
.start
) {
461 now
= MIN(remain
, view
->ranges
[i
].addr
.size
);
463 offset_in_region
+= now
;
469 fr
.offset_in_region
= offset_in_region
;
470 fr
.addr
= addrrange_make(base
, remain
);
471 fr
.dirty_log_mask
= mr
->dirty_log_mask
;
472 flatview_insert(view
, i
, &fr
);
476 /* Render a memory topology into a list of disjoint absolute ranges. */
477 static FlatView
generate_memory_topology(MemoryRegion
*mr
)
481 flatview_init(&view
);
483 render_memory_region(&view
, mr
, 0, addrrange_make(0, UINT64_MAX
));
484 flatview_simplify(&view
);
489 static void address_space_add_del_ioeventfds(AddressSpace
*as
,
490 MemoryRegionIoeventfd
*fds_new
,
492 MemoryRegionIoeventfd
*fds_old
,
497 /* Generate a symmetric difference of the old and new fd sets, adding
498 * and deleting as necessary.
502 while (iold
< fds_old_nb
|| inew
< fds_new_nb
) {
503 if (iold
< fds_old_nb
504 && (inew
== fds_new_nb
505 || memory_region_ioeventfd_before(fds_old
[iold
],
507 as
->ops
->ioeventfd_del(as
, &fds_old
[iold
]);
509 } else if (inew
< fds_new_nb
510 && (iold
== fds_old_nb
511 || memory_region_ioeventfd_before(fds_new
[inew
],
513 as
->ops
->ioeventfd_add(as
, &fds_new
[inew
]);
522 static void address_space_update_ioeventfds(AddressSpace
*as
)
525 unsigned ioeventfd_nb
= 0;
526 MemoryRegionIoeventfd
*ioeventfds
= NULL
;
530 FOR_EACH_FLAT_RANGE(fr
, &as
->current_map
) {
531 for (i
= 0; i
< fr
->mr
->ioeventfd_nb
; ++i
) {
532 tmp
= addrrange_shift(fr
->mr
->ioeventfds
[i
].addr
,
533 fr
->addr
.start
- fr
->offset_in_region
);
534 if (addrrange_intersects(fr
->addr
, tmp
)) {
536 ioeventfds
= qemu_realloc(ioeventfds
,
537 ioeventfd_nb
* sizeof(*ioeventfds
));
538 ioeventfds
[ioeventfd_nb
-1] = fr
->mr
->ioeventfds
[i
];
539 ioeventfds
[ioeventfd_nb
-1].addr
= tmp
;
544 address_space_add_del_ioeventfds(as
, ioeventfds
, ioeventfd_nb
,
545 as
->ioeventfds
, as
->ioeventfd_nb
);
547 qemu_free(as
->ioeventfds
);
548 as
->ioeventfds
= ioeventfds
;
549 as
->ioeventfd_nb
= ioeventfd_nb
;
552 static void address_space_update_topology(AddressSpace
*as
)
554 FlatView old_view
= as
->current_map
;
555 FlatView new_view
= generate_memory_topology(as
->root
);
557 FlatRange
*frold
, *frnew
;
559 /* Generate a symmetric difference of the old and new memory maps.
560 * Kill ranges in the old map, and instantiate ranges in the new map.
563 while (iold
< old_view
.nr
|| inew
< new_view
.nr
) {
564 if (iold
< old_view
.nr
) {
565 frold
= &old_view
.ranges
[iold
];
569 if (inew
< new_view
.nr
) {
570 frnew
= &new_view
.ranges
[inew
];
577 || frold
->addr
.start
< frnew
->addr
.start
578 || (frold
->addr
.start
== frnew
->addr
.start
579 && !flatrange_equal(frold
, frnew
)))) {
580 /* In old, but (not in new, or in new but attributes changed). */
582 as
->ops
->range_del(as
, frold
);
584 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
585 /* In both (logging may have changed) */
587 if (frold
->dirty_log_mask
&& !frnew
->dirty_log_mask
) {
588 as
->ops
->log_stop(as
, frnew
);
589 } else if (frnew
->dirty_log_mask
&& !frold
->dirty_log_mask
) {
590 as
->ops
->log_start(as
, frnew
);
598 as
->ops
->range_add(as
, frnew
);
602 as
->current_map
= new_view
;
603 flatview_destroy(&old_view
);
604 address_space_update_ioeventfds(as
);
607 static void memory_region_update_topology(void)
609 if (address_space_memory
.root
) {
610 address_space_update_topology(&address_space_memory
);
612 if (address_space_io
.root
) {
613 address_space_update_topology(&address_space_io
);
617 void memory_region_init(MemoryRegion
*mr
,
626 mr
->terminates
= false;
628 mr
->may_overlap
= false;
630 QTAILQ_INIT(&mr
->subregions
);
631 memset(&mr
->subregions_link
, 0, sizeof mr
->subregions_link
);
632 QTAILQ_INIT(&mr
->coalesced
);
633 mr
->name
= qemu_strdup(name
);
634 mr
->dirty_log_mask
= 0;
635 mr
->ioeventfd_nb
= 0;
636 mr
->ioeventfds
= NULL
;
639 static bool memory_region_access_valid(MemoryRegion
*mr
,
640 target_phys_addr_t addr
,
643 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
647 /* Treat zero as compatibility all valid */
648 if (!mr
->ops
->valid
.max_access_size
) {
652 if (size
> mr
->ops
->valid
.max_access_size
653 || size
< mr
->ops
->valid
.min_access_size
) {
659 static uint32_t memory_region_read_thunk_n(void *_mr
,
660 target_phys_addr_t addr
,
663 MemoryRegion
*mr
= _mr
;
664 unsigned access_size
, access_size_min
, access_size_max
;
665 uint64_t access_mask
;
666 uint32_t data
= 0, tmp
;
669 if (!memory_region_access_valid(mr
, addr
, size
)) {
670 return -1U; /* FIXME: better signalling */
673 if (!mr
->ops
->read
) {
674 return mr
->ops
->old_mmio
.read
[bitops_ffsl(size
)](mr
->opaque
, addr
);
677 /* FIXME: support unaligned access */
679 access_size_min
= mr
->ops
->impl
.min_access_size
;
680 if (!access_size_min
) {
683 access_size_max
= mr
->ops
->impl
.max_access_size
;
684 if (!access_size_max
) {
687 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
688 access_mask
= -1ULL >> (64 - access_size
* 8);
690 for (i
= 0; i
< size
; i
+= access_size
) {
691 /* FIXME: big-endian support */
692 tmp
= mr
->ops
->read(mr
->opaque
, addr
+ i
, access_size
);
693 data
|= (tmp
& access_mask
) << (i
* 8);
699 static void memory_region_write_thunk_n(void *_mr
,
700 target_phys_addr_t addr
,
704 MemoryRegion
*mr
= _mr
;
705 unsigned access_size
, access_size_min
, access_size_max
;
706 uint64_t access_mask
;
709 if (!memory_region_access_valid(mr
, addr
, size
)) {
710 return; /* FIXME: better signalling */
713 if (!mr
->ops
->write
) {
714 mr
->ops
->old_mmio
.write
[bitops_ffsl(size
)](mr
->opaque
, addr
, data
);
718 /* FIXME: support unaligned access */
720 access_size_min
= mr
->ops
->impl
.min_access_size
;
721 if (!access_size_min
) {
724 access_size_max
= mr
->ops
->impl
.max_access_size
;
725 if (!access_size_max
) {
728 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
729 access_mask
= -1ULL >> (64 - access_size
* 8);
731 for (i
= 0; i
< size
; i
+= access_size
) {
732 /* FIXME: big-endian support */
733 mr
->ops
->write(mr
->opaque
, addr
+ i
, (data
>> (i
* 8)) & access_mask
,
738 static uint32_t memory_region_read_thunk_b(void *mr
, target_phys_addr_t addr
)
740 return memory_region_read_thunk_n(mr
, addr
, 1);
743 static uint32_t memory_region_read_thunk_w(void *mr
, target_phys_addr_t addr
)
745 return memory_region_read_thunk_n(mr
, addr
, 2);
748 static uint32_t memory_region_read_thunk_l(void *mr
, target_phys_addr_t addr
)
750 return memory_region_read_thunk_n(mr
, addr
, 4);
753 static void memory_region_write_thunk_b(void *mr
, target_phys_addr_t addr
,
756 memory_region_write_thunk_n(mr
, addr
, 1, data
);
759 static void memory_region_write_thunk_w(void *mr
, target_phys_addr_t addr
,
762 memory_region_write_thunk_n(mr
, addr
, 2, data
);
765 static void memory_region_write_thunk_l(void *mr
, target_phys_addr_t addr
,
768 memory_region_write_thunk_n(mr
, addr
, 4, data
);
771 static CPUReadMemoryFunc
* const memory_region_read_thunk
[] = {
772 memory_region_read_thunk_b
,
773 memory_region_read_thunk_w
,
774 memory_region_read_thunk_l
,
777 static CPUWriteMemoryFunc
* const memory_region_write_thunk
[] = {
778 memory_region_write_thunk_b
,
779 memory_region_write_thunk_w
,
780 memory_region_write_thunk_l
,
783 static void memory_region_prepare_ram_addr(MemoryRegion
*mr
)
785 if (mr
->backend_registered
) {
789 mr
->ram_addr
= cpu_register_io_memory(memory_region_read_thunk
,
790 memory_region_write_thunk
,
792 mr
->ops
->endianness
);
793 mr
->backend_registered
= true;
796 void memory_region_init_io(MemoryRegion
*mr
,
797 const MemoryRegionOps
*ops
,
802 memory_region_init(mr
, name
, size
);
805 mr
->terminates
= true;
806 mr
->backend_registered
= false;
809 void memory_region_init_ram(MemoryRegion
*mr
,
814 memory_region_init(mr
, name
, size
);
815 mr
->terminates
= true;
816 mr
->ram_addr
= qemu_ram_alloc(dev
, name
, size
);
817 mr
->backend_registered
= true;
820 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
826 memory_region_init(mr
, name
, size
);
827 mr
->terminates
= true;
828 mr
->ram_addr
= qemu_ram_alloc_from_ptr(dev
, name
, size
, ptr
);
829 mr
->backend_registered
= true;
832 void memory_region_init_alias(MemoryRegion
*mr
,
835 target_phys_addr_t offset
,
838 memory_region_init(mr
, name
, size
);
840 mr
->alias_offset
= offset
;
843 void memory_region_destroy(MemoryRegion
*mr
)
845 assert(QTAILQ_EMPTY(&mr
->subregions
));
846 memory_region_clear_coalescing(mr
);
847 qemu_free((char *)mr
->name
);
848 qemu_free(mr
->ioeventfds
);
851 uint64_t memory_region_size(MemoryRegion
*mr
)
856 void memory_region_set_offset(MemoryRegion
*mr
, target_phys_addr_t offset
)
861 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
863 uint8_t mask
= 1 << client
;
865 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
866 memory_region_update_topology();
869 bool memory_region_get_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
,
872 assert(mr
->terminates
);
873 return cpu_physical_memory_get_dirty(mr
->ram_addr
+ addr
, 1 << client
);
876 void memory_region_set_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
)
878 assert(mr
->terminates
);
879 return cpu_physical_memory_set_dirty(mr
->ram_addr
+ addr
);
882 void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
)
886 FOR_EACH_FLAT_RANGE(fr
, &address_space_memory
.current_map
) {
888 cpu_physical_sync_dirty_bitmap(fr
->addr
.start
,
889 fr
->addr
.start
+ fr
->addr
.size
);
894 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
899 void memory_region_reset_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
,
900 target_phys_addr_t size
, unsigned client
)
902 assert(mr
->terminates
);
903 cpu_physical_memory_reset_dirty(mr
->ram_addr
+ addr
,
904 mr
->ram_addr
+ addr
+ size
,
908 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
911 return memory_region_get_ram_ptr(mr
->alias
) + mr
->alias_offset
;
914 assert(mr
->terminates
);
916 return qemu_get_ram_ptr(mr
->ram_addr
);
919 static void memory_region_update_coalesced_range(MemoryRegion
*mr
)
922 CoalescedMemoryRange
*cmr
;
925 FOR_EACH_FLAT_RANGE(fr
, &address_space_memory
.current_map
) {
927 qemu_unregister_coalesced_mmio(fr
->addr
.start
, fr
->addr
.size
);
928 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
929 tmp
= addrrange_shift(cmr
->addr
,
930 fr
->addr
.start
- fr
->offset_in_region
);
931 if (!addrrange_intersects(tmp
, fr
->addr
)) {
934 tmp
= addrrange_intersection(tmp
, fr
->addr
);
935 qemu_register_coalesced_mmio(tmp
.start
, tmp
.size
);
941 void memory_region_set_coalescing(MemoryRegion
*mr
)
943 memory_region_clear_coalescing(mr
);
944 memory_region_add_coalescing(mr
, 0, mr
->size
);
947 void memory_region_add_coalescing(MemoryRegion
*mr
,
948 target_phys_addr_t offset
,
951 CoalescedMemoryRange
*cmr
= qemu_malloc(sizeof(*cmr
));
953 cmr
->addr
= addrrange_make(offset
, size
);
954 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
955 memory_region_update_coalesced_range(mr
);
958 void memory_region_clear_coalescing(MemoryRegion
*mr
)
960 CoalescedMemoryRange
*cmr
;
962 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
963 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
964 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
967 memory_region_update_coalesced_range(mr
);
970 void memory_region_add_eventfd(MemoryRegion
*mr
,
971 target_phys_addr_t addr
,
977 MemoryRegionIoeventfd mrfd
= {
980 .match_data
= match_data
,
986 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
987 if (memory_region_ioeventfd_before(mrfd
, mr
->ioeventfds
[i
])) {
992 mr
->ioeventfds
= qemu_realloc(mr
->ioeventfds
,
993 sizeof(*mr
->ioeventfds
) * mr
->ioeventfd_nb
);
994 memmove(&mr
->ioeventfds
[i
+1], &mr
->ioeventfds
[i
],
995 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
-1 - i
));
996 mr
->ioeventfds
[i
] = mrfd
;
997 memory_region_update_topology();
1000 void memory_region_del_eventfd(MemoryRegion
*mr
,
1001 target_phys_addr_t addr
,
1007 MemoryRegionIoeventfd mrfd
= {
1010 .match_data
= match_data
,
1016 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
1017 if (memory_region_ioeventfd_equal(mrfd
, mr
->ioeventfds
[i
])) {
1021 assert(i
!= mr
->ioeventfd_nb
);
1022 memmove(&mr
->ioeventfds
[i
], &mr
->ioeventfds
[i
+1],
1023 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
- (i
+1)));
1025 mr
->ioeventfds
= qemu_realloc(mr
->ioeventfds
,
1026 sizeof(*mr
->ioeventfds
)*mr
->ioeventfd_nb
+ 1);
1027 memory_region_update_topology();
1030 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
1031 target_phys_addr_t offset
,
1032 MemoryRegion
*subregion
)
1034 MemoryRegion
*other
;
1036 assert(!subregion
->parent
);
1037 subregion
->parent
= mr
;
1038 subregion
->addr
= offset
;
1039 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
1040 if (subregion
->may_overlap
|| other
->may_overlap
) {
1043 if (offset
>= other
->offset
+ other
->size
1044 || offset
+ subregion
->size
<= other
->offset
) {
1047 printf("warning: subregion collision %llx/%llx vs %llx/%llx\n",
1048 (unsigned long long)offset
,
1049 (unsigned long long)subregion
->size
,
1050 (unsigned long long)other
->offset
,
1051 (unsigned long long)other
->size
);
1053 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
1054 if (subregion
->priority
>= other
->priority
) {
1055 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
1059 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
1061 memory_region_update_topology();
1065 void memory_region_add_subregion(MemoryRegion
*mr
,
1066 target_phys_addr_t offset
,
1067 MemoryRegion
*subregion
)
1069 subregion
->may_overlap
= false;
1070 subregion
->priority
= 0;
1071 memory_region_add_subregion_common(mr
, offset
, subregion
);
1074 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
1075 target_phys_addr_t offset
,
1076 MemoryRegion
*subregion
,
1079 subregion
->may_overlap
= true;
1080 subregion
->priority
= priority
;
1081 memory_region_add_subregion_common(mr
, offset
, subregion
);
1084 void memory_region_del_subregion(MemoryRegion
*mr
,
1085 MemoryRegion
*subregion
)
1087 assert(subregion
->parent
== mr
);
1088 subregion
->parent
= NULL
;
1089 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
1090 memory_region_update_topology();
1093 void set_system_memory_map(MemoryRegion
*mr
)
1095 address_space_memory
.root
= mr
;
1096 memory_region_update_topology();
1099 void set_system_io_map(MemoryRegion
*mr
)
1101 address_space_io
.root
= mr
;
1102 memory_region_update_topology();