2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
18 #include "qapi/error.h"
19 #include "exec/memory.h"
20 #include "qapi/visitor.h"
21 #include "qemu/bitops.h"
22 #include "qemu/error-report.h"
23 #include "qemu/main-loop.h"
24 #include "qemu/qemu-print.h"
25 #include "qom/object.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/runstate.h"
32 #include "sysemu/tcg.h"
33 #include "qemu/accel.h"
34 #include "hw/boards.h"
35 #include "migration/vmstate.h"
37 //#define DEBUG_UNASSIGNED
39 static unsigned memory_region_transaction_depth
;
40 static bool memory_region_update_pending
;
41 static bool ioeventfd_update_pending
;
42 bool global_dirty_log
;
44 static QTAILQ_HEAD(, MemoryListener
) memory_listeners
45 = QTAILQ_HEAD_INITIALIZER(memory_listeners
);
47 static QTAILQ_HEAD(, AddressSpace
) address_spaces
48 = QTAILQ_HEAD_INITIALIZER(address_spaces
);
50 static GHashTable
*flat_views
;
52 typedef struct AddrRange AddrRange
;
55 * Note that signed integers are needed for negative offsetting in aliases
56 * (large MemoryRegion::alias_offset).
63 static AddrRange
addrrange_make(Int128 start
, Int128 size
)
65 return (AddrRange
) { start
, size
};
68 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
70 return int128_eq(r1
.start
, r2
.start
) && int128_eq(r1
.size
, r2
.size
);
73 static Int128
addrrange_end(AddrRange r
)
75 return int128_add(r
.start
, r
.size
);
78 static AddrRange
addrrange_shift(AddrRange range
, Int128 delta
)
80 int128_addto(&range
.start
, delta
);
84 static bool addrrange_contains(AddrRange range
, Int128 addr
)
86 return int128_ge(addr
, range
.start
)
87 && int128_lt(addr
, addrrange_end(range
));
90 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
92 return addrrange_contains(r1
, r2
.start
)
93 || addrrange_contains(r2
, r1
.start
);
96 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
98 Int128 start
= int128_max(r1
.start
, r2
.start
);
99 Int128 end
= int128_min(addrrange_end(r1
), addrrange_end(r2
));
100 return addrrange_make(start
, int128_sub(end
, start
));
103 enum ListenerDirection
{ Forward
, Reverse
};
105 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
107 MemoryListener *_listener; \
109 switch (_direction) { \
111 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
112 if (_listener->_callback) { \
113 _listener->_callback(_listener, ##_args); \
118 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
119 if (_listener->_callback) { \
120 _listener->_callback(_listener, ##_args); \
129 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
131 MemoryListener *_listener; \
133 switch (_direction) { \
135 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
136 if (_listener->_callback) { \
137 _listener->_callback(_listener, _section, ##_args); \
142 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
143 if (_listener->_callback) { \
144 _listener->_callback(_listener, _section, ##_args); \
153 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
154 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
156 MemoryRegionSection mrs = section_from_flat_range(fr, \
157 address_space_to_flatview(as)); \
158 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
161 struct CoalescedMemoryRange
{
163 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
166 struct MemoryRegionIoeventfd
{
173 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd
*a
,
174 MemoryRegionIoeventfd
*b
)
176 if (int128_lt(a
->addr
.start
, b
->addr
.start
)) {
178 } else if (int128_gt(a
->addr
.start
, b
->addr
.start
)) {
180 } else if (int128_lt(a
->addr
.size
, b
->addr
.size
)) {
182 } else if (int128_gt(a
->addr
.size
, b
->addr
.size
)) {
184 } else if (a
->match_data
< b
->match_data
) {
186 } else if (a
->match_data
> b
->match_data
) {
188 } else if (a
->match_data
) {
189 if (a
->data
< b
->data
) {
191 } else if (a
->data
> b
->data
) {
197 } else if (a
->e
> b
->e
) {
203 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd
*a
,
204 MemoryRegionIoeventfd
*b
)
206 if (int128_eq(a
->addr
.start
, b
->addr
.start
) &&
207 (!int128_nz(a
->addr
.size
) || !int128_nz(b
->addr
.size
) ||
208 (int128_eq(a
->addr
.size
, b
->addr
.size
) &&
209 (a
->match_data
== b
->match_data
) &&
210 ((a
->match_data
&& (a
->data
== b
->data
)) || !a
->match_data
) &&
217 /* Range of memory in the global map. Addresses are absolute. */
220 hwaddr offset_in_region
;
222 uint8_t dirty_log_mask
;
228 #define FOR_EACH_FLAT_RANGE(var, view) \
229 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
231 static inline MemoryRegionSection
232 section_from_flat_range(FlatRange
*fr
, FlatView
*fv
)
234 return (MemoryRegionSection
) {
237 .offset_within_region
= fr
->offset_in_region
,
238 .size
= fr
->addr
.size
,
239 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
240 .readonly
= fr
->readonly
,
241 .nonvolatile
= fr
->nonvolatile
,
245 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
247 return a
->mr
== b
->mr
248 && addrrange_equal(a
->addr
, b
->addr
)
249 && a
->offset_in_region
== b
->offset_in_region
250 && a
->romd_mode
== b
->romd_mode
251 && a
->readonly
== b
->readonly
252 && a
->nonvolatile
== b
->nonvolatile
;
255 static FlatView
*flatview_new(MemoryRegion
*mr_root
)
259 view
= g_new0(FlatView
, 1);
261 view
->root
= mr_root
;
262 memory_region_ref(mr_root
);
263 trace_flatview_new(view
, mr_root
);
268 /* Insert a range into a given position. Caller is responsible for maintaining
271 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
273 if (view
->nr
== view
->nr_allocated
) {
274 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
275 view
->ranges
= g_realloc(view
->ranges
,
276 view
->nr_allocated
* sizeof(*view
->ranges
));
278 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
279 (view
->nr
- pos
) * sizeof(FlatRange
));
280 view
->ranges
[pos
] = *range
;
281 memory_region_ref(range
->mr
);
285 static void flatview_destroy(FlatView
*view
)
289 trace_flatview_destroy(view
, view
->root
);
290 if (view
->dispatch
) {
291 address_space_dispatch_free(view
->dispatch
);
293 for (i
= 0; i
< view
->nr
; i
++) {
294 memory_region_unref(view
->ranges
[i
].mr
);
296 g_free(view
->ranges
);
297 memory_region_unref(view
->root
);
301 static bool flatview_ref(FlatView
*view
)
303 return qatomic_fetch_inc_nonzero(&view
->ref
) > 0;
306 void flatview_unref(FlatView
*view
)
308 if (qatomic_fetch_dec(&view
->ref
) == 1) {
309 trace_flatview_destroy_rcu(view
, view
->root
);
311 call_rcu(view
, flatview_destroy
, rcu
);
315 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
317 return int128_eq(addrrange_end(r1
->addr
), r2
->addr
.start
)
319 && int128_eq(int128_add(int128_make64(r1
->offset_in_region
),
321 int128_make64(r2
->offset_in_region
))
322 && r1
->dirty_log_mask
== r2
->dirty_log_mask
323 && r1
->romd_mode
== r2
->romd_mode
324 && r1
->readonly
== r2
->readonly
325 && r1
->nonvolatile
== r2
->nonvolatile
;
328 /* Attempt to simplify a view by merging adjacent ranges */
329 static void flatview_simplify(FlatView
*view
)
334 while (i
< view
->nr
) {
337 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
338 int128_addto(&view
->ranges
[i
].addr
.size
, view
->ranges
[j
].addr
.size
);
342 for (k
= i
; k
< j
; k
++) {
343 memory_region_unref(view
->ranges
[k
].mr
);
345 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
346 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
351 static bool memory_region_big_endian(MemoryRegion
*mr
)
353 #ifdef TARGET_WORDS_BIGENDIAN
354 return mr
->ops
->endianness
!= DEVICE_LITTLE_ENDIAN
;
356 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
360 static void adjust_endianness(MemoryRegion
*mr
, uint64_t *data
, MemOp op
)
362 if ((op
& MO_BSWAP
) != devend_memop(mr
->ops
->endianness
)) {
363 switch (op
& MO_SIZE
) {
367 *data
= bswap16(*data
);
370 *data
= bswap32(*data
);
373 *data
= bswap64(*data
);
376 g_assert_not_reached();
381 static inline void memory_region_shift_read_access(uint64_t *value
,
387 *value
|= (tmp
& mask
) << shift
;
389 *value
|= (tmp
& mask
) >> -shift
;
393 static inline uint64_t memory_region_shift_write_access(uint64_t *value
,
400 tmp
= (*value
>> shift
) & mask
;
402 tmp
= (*value
<< -shift
) & mask
;
408 static hwaddr
memory_region_to_absolute_addr(MemoryRegion
*mr
, hwaddr offset
)
411 hwaddr abs_addr
= offset
;
413 abs_addr
+= mr
->addr
;
414 for (root
= mr
; root
->container
; ) {
415 root
= root
->container
;
416 abs_addr
+= root
->addr
;
422 static int get_cpu_index(void)
425 return current_cpu
->cpu_index
;
430 static MemTxResult
memory_region_read_accessor(MemoryRegion
*mr
,
440 tmp
= mr
->ops
->read(mr
->opaque
, addr
, size
);
442 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
443 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ
)) {
444 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
445 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
447 memory_region_shift_read_access(value
, shift
, mask
, tmp
);
451 static MemTxResult
memory_region_read_with_attrs_accessor(MemoryRegion
*mr
,
462 r
= mr
->ops
->read_with_attrs(mr
->opaque
, addr
, &tmp
, size
, attrs
);
464 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
465 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ
)) {
466 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
467 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
469 memory_region_shift_read_access(value
, shift
, mask
, tmp
);
473 static MemTxResult
memory_region_write_accessor(MemoryRegion
*mr
,
481 uint64_t tmp
= memory_region_shift_write_access(value
, shift
, mask
);
484 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
485 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE
)) {
486 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
487 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
489 mr
->ops
->write(mr
->opaque
, addr
, tmp
, size
);
493 static MemTxResult
memory_region_write_with_attrs_accessor(MemoryRegion
*mr
,
501 uint64_t tmp
= memory_region_shift_write_access(value
, shift
, mask
);
504 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
505 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE
)) {
506 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
507 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
509 return mr
->ops
->write_with_attrs(mr
->opaque
, addr
, tmp
, size
, attrs
);
512 static MemTxResult
access_with_adjusted_size(hwaddr addr
,
515 unsigned access_size_min
,
516 unsigned access_size_max
,
517 MemTxResult (*access_fn
)
528 uint64_t access_mask
;
529 unsigned access_size
;
531 MemTxResult r
= MEMTX_OK
;
533 if (!access_size_min
) {
536 if (!access_size_max
) {
540 /* FIXME: support unaligned access? */
541 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
542 access_mask
= MAKE_64BIT_MASK(0, access_size
* 8);
543 if (memory_region_big_endian(mr
)) {
544 for (i
= 0; i
< size
; i
+= access_size
) {
545 r
|= access_fn(mr
, addr
+ i
, value
, access_size
,
546 (size
- access_size
- i
) * 8, access_mask
, attrs
);
549 for (i
= 0; i
< size
; i
+= access_size
) {
550 r
|= access_fn(mr
, addr
+ i
, value
, access_size
, i
* 8,
557 static AddressSpace
*memory_region_to_address_space(MemoryRegion
*mr
)
561 while (mr
->container
) {
564 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
565 if (mr
== as
->root
) {
572 /* Render a memory region into the global view. Ranges in @view obscure
575 static void render_memory_region(FlatView
*view
,
582 MemoryRegion
*subregion
;
584 hwaddr offset_in_region
;
594 int128_addto(&base
, int128_make64(mr
->addr
));
595 readonly
|= mr
->readonly
;
596 nonvolatile
|= mr
->nonvolatile
;
598 tmp
= addrrange_make(base
, mr
->size
);
600 if (!addrrange_intersects(tmp
, clip
)) {
604 clip
= addrrange_intersection(tmp
, clip
);
607 int128_subfrom(&base
, int128_make64(mr
->alias
->addr
));
608 int128_subfrom(&base
, int128_make64(mr
->alias_offset
));
609 render_memory_region(view
, mr
->alias
, base
, clip
,
610 readonly
, nonvolatile
);
614 /* Render subregions in priority order. */
615 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
616 render_memory_region(view
, subregion
, base
, clip
,
617 readonly
, nonvolatile
);
620 if (!mr
->terminates
) {
624 offset_in_region
= int128_get64(int128_sub(clip
.start
, base
));
629 fr
.dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
630 fr
.romd_mode
= mr
->romd_mode
;
631 fr
.readonly
= readonly
;
632 fr
.nonvolatile
= nonvolatile
;
634 /* Render the region itself into any gaps left by the current view. */
635 for (i
= 0; i
< view
->nr
&& int128_nz(remain
); ++i
) {
636 if (int128_ge(base
, addrrange_end(view
->ranges
[i
].addr
))) {
639 if (int128_lt(base
, view
->ranges
[i
].addr
.start
)) {
640 now
= int128_min(remain
,
641 int128_sub(view
->ranges
[i
].addr
.start
, base
));
642 fr
.offset_in_region
= offset_in_region
;
643 fr
.addr
= addrrange_make(base
, now
);
644 flatview_insert(view
, i
, &fr
);
646 int128_addto(&base
, now
);
647 offset_in_region
+= int128_get64(now
);
648 int128_subfrom(&remain
, now
);
650 now
= int128_sub(int128_min(int128_add(base
, remain
),
651 addrrange_end(view
->ranges
[i
].addr
)),
653 int128_addto(&base
, now
);
654 offset_in_region
+= int128_get64(now
);
655 int128_subfrom(&remain
, now
);
657 if (int128_nz(remain
)) {
658 fr
.offset_in_region
= offset_in_region
;
659 fr
.addr
= addrrange_make(base
, remain
);
660 flatview_insert(view
, i
, &fr
);
664 void flatview_for_each_range(FlatView
*fv
, flatview_cb cb
, void *opaque
)
671 FOR_EACH_FLAT_RANGE(fr
, fv
) {
672 if (cb(fr
->addr
.start
, fr
->addr
.size
, fr
->mr
,
673 fr
->offset_in_region
, opaque
)) {
679 static MemoryRegion
*memory_region_get_flatview_root(MemoryRegion
*mr
)
681 while (mr
->enabled
) {
683 if (!mr
->alias_offset
&& int128_ge(mr
->size
, mr
->alias
->size
)) {
684 /* The alias is included in its entirety. Use it as
685 * the "real" root, so that we can share more FlatViews.
690 } else if (!mr
->terminates
) {
691 unsigned int found
= 0;
692 MemoryRegion
*child
, *next
= NULL
;
693 QTAILQ_FOREACH(child
, &mr
->subregions
, subregions_link
) {
694 if (child
->enabled
) {
699 if (!child
->addr
&& int128_ge(mr
->size
, child
->size
)) {
700 /* A child is included in its entirety. If it's the only
701 * enabled one, use it in the hope of finding an alias down the
702 * way. This will also let us share FlatViews.
723 /* Render a memory topology into a list of disjoint absolute ranges. */
724 static FlatView
*generate_memory_topology(MemoryRegion
*mr
)
729 view
= flatview_new(mr
);
732 render_memory_region(view
, mr
, int128_zero(),
733 addrrange_make(int128_zero(), int128_2_64()),
736 flatview_simplify(view
);
738 view
->dispatch
= address_space_dispatch_new(view
);
739 for (i
= 0; i
< view
->nr
; i
++) {
740 MemoryRegionSection mrs
=
741 section_from_flat_range(&view
->ranges
[i
], view
);
742 flatview_add_to_dispatch(view
, &mrs
);
744 address_space_dispatch_compact(view
->dispatch
);
745 g_hash_table_replace(flat_views
, mr
, view
);
750 static void address_space_add_del_ioeventfds(AddressSpace
*as
,
751 MemoryRegionIoeventfd
*fds_new
,
753 MemoryRegionIoeventfd
*fds_old
,
757 MemoryRegionIoeventfd
*fd
;
758 MemoryRegionSection section
;
760 /* Generate a symmetric difference of the old and new fd sets, adding
761 * and deleting as necessary.
765 while (iold
< fds_old_nb
|| inew
< fds_new_nb
) {
766 if (iold
< fds_old_nb
767 && (inew
== fds_new_nb
768 || memory_region_ioeventfd_before(&fds_old
[iold
],
771 section
= (MemoryRegionSection
) {
772 .fv
= address_space_to_flatview(as
),
773 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
774 .size
= fd
->addr
.size
,
776 MEMORY_LISTENER_CALL(as
, eventfd_del
, Forward
, §ion
,
777 fd
->match_data
, fd
->data
, fd
->e
);
779 } else if (inew
< fds_new_nb
780 && (iold
== fds_old_nb
781 || memory_region_ioeventfd_before(&fds_new
[inew
],
784 section
= (MemoryRegionSection
) {
785 .fv
= address_space_to_flatview(as
),
786 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
787 .size
= fd
->addr
.size
,
789 MEMORY_LISTENER_CALL(as
, eventfd_add
, Reverse
, §ion
,
790 fd
->match_data
, fd
->data
, fd
->e
);
799 FlatView
*address_space_get_flatview(AddressSpace
*as
)
803 RCU_READ_LOCK_GUARD();
805 view
= address_space_to_flatview(as
);
806 /* If somebody has replaced as->current_map concurrently,
807 * flatview_ref returns false.
809 } while (!flatview_ref(view
));
813 static void address_space_update_ioeventfds(AddressSpace
*as
)
817 unsigned ioeventfd_nb
= 0;
818 unsigned ioeventfd_max
;
819 MemoryRegionIoeventfd
*ioeventfds
;
824 * It is likely that the number of ioeventfds hasn't changed much, so use
825 * the previous size as the starting value, with some headroom to avoid
826 * gratuitous reallocations.
828 ioeventfd_max
= QEMU_ALIGN_UP(as
->ioeventfd_nb
, 4);
829 ioeventfds
= g_new(MemoryRegionIoeventfd
, ioeventfd_max
);
831 view
= address_space_get_flatview(as
);
832 FOR_EACH_FLAT_RANGE(fr
, view
) {
833 for (i
= 0; i
< fr
->mr
->ioeventfd_nb
; ++i
) {
834 tmp
= addrrange_shift(fr
->mr
->ioeventfds
[i
].addr
,
835 int128_sub(fr
->addr
.start
,
836 int128_make64(fr
->offset_in_region
)));
837 if (addrrange_intersects(fr
->addr
, tmp
)) {
839 if (ioeventfd_nb
> ioeventfd_max
) {
840 ioeventfd_max
= MAX(ioeventfd_max
* 2, 4);
841 ioeventfds
= g_realloc(ioeventfds
,
842 ioeventfd_max
* sizeof(*ioeventfds
));
844 ioeventfds
[ioeventfd_nb
-1] = fr
->mr
->ioeventfds
[i
];
845 ioeventfds
[ioeventfd_nb
-1].addr
= tmp
;
850 address_space_add_del_ioeventfds(as
, ioeventfds
, ioeventfd_nb
,
851 as
->ioeventfds
, as
->ioeventfd_nb
);
853 g_free(as
->ioeventfds
);
854 as
->ioeventfds
= ioeventfds
;
855 as
->ioeventfd_nb
= ioeventfd_nb
;
856 flatview_unref(view
);
860 * Notify the memory listeners about the coalesced IO change events of
861 * range `cmr'. Only the part that has intersection of the specified
862 * FlatRange will be sent.
864 static void flat_range_coalesced_io_notify(FlatRange
*fr
, AddressSpace
*as
,
865 CoalescedMemoryRange
*cmr
, bool add
)
869 tmp
= addrrange_shift(cmr
->addr
,
870 int128_sub(fr
->addr
.start
,
871 int128_make64(fr
->offset_in_region
)));
872 if (!addrrange_intersects(tmp
, fr
->addr
)) {
875 tmp
= addrrange_intersection(tmp
, fr
->addr
);
878 MEMORY_LISTENER_UPDATE_REGION(fr
, as
, Forward
, coalesced_io_add
,
879 int128_get64(tmp
.start
),
880 int128_get64(tmp
.size
));
882 MEMORY_LISTENER_UPDATE_REGION(fr
, as
, Reverse
, coalesced_io_del
,
883 int128_get64(tmp
.start
),
884 int128_get64(tmp
.size
));
888 static void flat_range_coalesced_io_del(FlatRange
*fr
, AddressSpace
*as
)
890 CoalescedMemoryRange
*cmr
;
892 QTAILQ_FOREACH(cmr
, &fr
->mr
->coalesced
, link
) {
893 flat_range_coalesced_io_notify(fr
, as
, cmr
, false);
897 static void flat_range_coalesced_io_add(FlatRange
*fr
, AddressSpace
*as
)
899 MemoryRegion
*mr
= fr
->mr
;
900 CoalescedMemoryRange
*cmr
;
902 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
906 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
907 flat_range_coalesced_io_notify(fr
, as
, cmr
, true);
911 static void address_space_update_topology_pass(AddressSpace
*as
,
912 const FlatView
*old_view
,
913 const FlatView
*new_view
,
917 FlatRange
*frold
, *frnew
;
919 /* Generate a symmetric difference of the old and new memory maps.
920 * Kill ranges in the old map, and instantiate ranges in the new map.
923 while (iold
< old_view
->nr
|| inew
< new_view
->nr
) {
924 if (iold
< old_view
->nr
) {
925 frold
= &old_view
->ranges
[iold
];
929 if (inew
< new_view
->nr
) {
930 frnew
= &new_view
->ranges
[inew
];
937 || int128_lt(frold
->addr
.start
, frnew
->addr
.start
)
938 || (int128_eq(frold
->addr
.start
, frnew
->addr
.start
)
939 && !flatrange_equal(frold
, frnew
)))) {
940 /* In old but not in new, or in both but attributes changed. */
943 flat_range_coalesced_io_del(frold
, as
);
944 MEMORY_LISTENER_UPDATE_REGION(frold
, as
, Reverse
, region_del
);
948 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
949 /* In both and unchanged (except logging may have changed) */
952 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_nop
);
953 if (frnew
->dirty_log_mask
& ~frold
->dirty_log_mask
) {
954 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, log_start
,
955 frold
->dirty_log_mask
,
956 frnew
->dirty_log_mask
);
958 if (frold
->dirty_log_mask
& ~frnew
->dirty_log_mask
) {
959 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Reverse
, log_stop
,
960 frold
->dirty_log_mask
,
961 frnew
->dirty_log_mask
);
971 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_add
);
972 flat_range_coalesced_io_add(frnew
, as
);
980 static void flatviews_init(void)
982 static FlatView
*empty_view
;
988 flat_views
= g_hash_table_new_full(g_direct_hash
, g_direct_equal
, NULL
,
989 (GDestroyNotify
) flatview_unref
);
991 empty_view
= generate_memory_topology(NULL
);
992 /* We keep it alive forever in the global variable. */
993 flatview_ref(empty_view
);
995 g_hash_table_replace(flat_views
, NULL
, empty_view
);
996 flatview_ref(empty_view
);
1000 static void flatviews_reset(void)
1005 g_hash_table_unref(flat_views
);
1010 /* Render unique FVs */
1011 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1012 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1014 if (g_hash_table_lookup(flat_views
, physmr
)) {
1018 generate_memory_topology(physmr
);
1022 static void address_space_set_flatview(AddressSpace
*as
)
1024 FlatView
*old_view
= address_space_to_flatview(as
);
1025 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1026 FlatView
*new_view
= g_hash_table_lookup(flat_views
, physmr
);
1030 if (old_view
== new_view
) {
1035 flatview_ref(old_view
);
1038 flatview_ref(new_view
);
1040 if (!QTAILQ_EMPTY(&as
->listeners
)) {
1041 FlatView tmpview
= { .nr
= 0 }, *old_view2
= old_view
;
1044 old_view2
= &tmpview
;
1046 address_space_update_topology_pass(as
, old_view2
, new_view
, false);
1047 address_space_update_topology_pass(as
, old_view2
, new_view
, true);
1050 /* Writes are protected by the BQL. */
1051 qatomic_rcu_set(&as
->current_map
, new_view
);
1053 flatview_unref(old_view
);
1056 /* Note that all the old MemoryRegions are still alive up to this
1057 * point. This relieves most MemoryListeners from the need to
1058 * ref/unref the MemoryRegions they get---unless they use them
1059 * outside the iothread mutex, in which case precise reference
1060 * counting is necessary.
1063 flatview_unref(old_view
);
1067 static void address_space_update_topology(AddressSpace
*as
)
1069 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1072 if (!g_hash_table_lookup(flat_views
, physmr
)) {
1073 generate_memory_topology(physmr
);
1075 address_space_set_flatview(as
);
1078 void memory_region_transaction_begin(void)
1080 qemu_flush_coalesced_mmio_buffer();
1081 ++memory_region_transaction_depth
;
1084 void memory_region_transaction_commit(void)
1088 assert(memory_region_transaction_depth
);
1089 assert(qemu_mutex_iothread_locked());
1091 --memory_region_transaction_depth
;
1092 if (!memory_region_transaction_depth
) {
1093 if (memory_region_update_pending
) {
1096 MEMORY_LISTENER_CALL_GLOBAL(begin
, Forward
);
1098 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1099 address_space_set_flatview(as
);
1100 address_space_update_ioeventfds(as
);
1102 memory_region_update_pending
= false;
1103 ioeventfd_update_pending
= false;
1104 MEMORY_LISTENER_CALL_GLOBAL(commit
, Forward
);
1105 } else if (ioeventfd_update_pending
) {
1106 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1107 address_space_update_ioeventfds(as
);
1109 ioeventfd_update_pending
= false;
1114 static void memory_region_destructor_none(MemoryRegion
*mr
)
1118 static void memory_region_destructor_ram(MemoryRegion
*mr
)
1120 qemu_ram_free(mr
->ram_block
);
1123 static bool memory_region_need_escape(char c
)
1125 return c
== '/' || c
== '[' || c
== '\\' || c
== ']';
1128 static char *memory_region_escape_name(const char *name
)
1135 for (p
= name
; *p
; p
++) {
1136 bytes
+= memory_region_need_escape(*p
) ? 4 : 1;
1138 if (bytes
== p
- name
) {
1139 return g_memdup(name
, bytes
+ 1);
1142 escaped
= g_malloc(bytes
+ 1);
1143 for (p
= name
, q
= escaped
; *p
; p
++) {
1145 if (unlikely(memory_region_need_escape(c
))) {
1148 *q
++ = "0123456789abcdef"[c
>> 4];
1149 c
= "0123456789abcdef"[c
& 15];
1157 static void memory_region_do_init(MemoryRegion
*mr
,
1162 mr
->size
= int128_make64(size
);
1163 if (size
== UINT64_MAX
) {
1164 mr
->size
= int128_2_64();
1166 mr
->name
= g_strdup(name
);
1168 mr
->ram_block
= NULL
;
1171 char *escaped_name
= memory_region_escape_name(name
);
1172 char *name_array
= g_strdup_printf("%s[*]", escaped_name
);
1175 owner
= container_get(qdev_get_machine(), "/unattached");
1178 object_property_add_child(owner
, name_array
, OBJECT(mr
));
1179 object_unref(OBJECT(mr
));
1181 g_free(escaped_name
);
1185 void memory_region_init(MemoryRegion
*mr
,
1190 object_initialize(mr
, sizeof(*mr
), TYPE_MEMORY_REGION
);
1191 memory_region_do_init(mr
, owner
, name
, size
);
1194 static void memory_region_get_container(Object
*obj
, Visitor
*v
,
1195 const char *name
, void *opaque
,
1198 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1199 char *path
= (char *)"";
1201 if (mr
->container
) {
1202 path
= object_get_canonical_path(OBJECT(mr
->container
));
1204 visit_type_str(v
, name
, &path
, errp
);
1205 if (mr
->container
) {
1210 static Object
*memory_region_resolve_container(Object
*obj
, void *opaque
,
1213 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1215 return OBJECT(mr
->container
);
1218 static void memory_region_get_priority(Object
*obj
, Visitor
*v
,
1219 const char *name
, void *opaque
,
1222 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1223 int32_t value
= mr
->priority
;
1225 visit_type_int32(v
, name
, &value
, errp
);
1228 static void memory_region_get_size(Object
*obj
, Visitor
*v
, const char *name
,
1229 void *opaque
, Error
**errp
)
1231 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1232 uint64_t value
= memory_region_size(mr
);
1234 visit_type_uint64(v
, name
, &value
, errp
);
1237 static void memory_region_initfn(Object
*obj
)
1239 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1242 mr
->ops
= &unassigned_mem_ops
;
1244 mr
->romd_mode
= true;
1245 mr
->destructor
= memory_region_destructor_none
;
1246 QTAILQ_INIT(&mr
->subregions
);
1247 QTAILQ_INIT(&mr
->coalesced
);
1249 op
= object_property_add(OBJECT(mr
), "container",
1250 "link<" TYPE_MEMORY_REGION
">",
1251 memory_region_get_container
,
1252 NULL
, /* memory_region_set_container */
1254 op
->resolve
= memory_region_resolve_container
;
1256 object_property_add_uint64_ptr(OBJECT(mr
), "addr",
1257 &mr
->addr
, OBJ_PROP_FLAG_READ
);
1258 object_property_add(OBJECT(mr
), "priority", "uint32",
1259 memory_region_get_priority
,
1260 NULL
, /* memory_region_set_priority */
1262 object_property_add(OBJECT(mr
), "size", "uint64",
1263 memory_region_get_size
,
1264 NULL
, /* memory_region_set_size, */
1268 static void iommu_memory_region_initfn(Object
*obj
)
1270 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1272 mr
->is_iommu
= true;
1275 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1278 #ifdef DEBUG_UNASSIGNED
1279 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
1284 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1285 uint64_t val
, unsigned size
)
1287 #ifdef DEBUG_UNASSIGNED
1288 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
1292 static bool unassigned_mem_accepts(void *opaque
, hwaddr addr
,
1293 unsigned size
, bool is_write
,
1299 const MemoryRegionOps unassigned_mem_ops
= {
1300 .valid
.accepts
= unassigned_mem_accepts
,
1301 .endianness
= DEVICE_NATIVE_ENDIAN
,
1304 static uint64_t memory_region_ram_device_read(void *opaque
,
1305 hwaddr addr
, unsigned size
)
1307 MemoryRegion
*mr
= opaque
;
1308 uint64_t data
= (uint64_t)~0;
1312 data
= *(uint8_t *)(mr
->ram_block
->host
+ addr
);
1315 data
= *(uint16_t *)(mr
->ram_block
->host
+ addr
);
1318 data
= *(uint32_t *)(mr
->ram_block
->host
+ addr
);
1321 data
= *(uint64_t *)(mr
->ram_block
->host
+ addr
);
1325 trace_memory_region_ram_device_read(get_cpu_index(), mr
, addr
, data
, size
);
1330 static void memory_region_ram_device_write(void *opaque
, hwaddr addr
,
1331 uint64_t data
, unsigned size
)
1333 MemoryRegion
*mr
= opaque
;
1335 trace_memory_region_ram_device_write(get_cpu_index(), mr
, addr
, data
, size
);
1339 *(uint8_t *)(mr
->ram_block
->host
+ addr
) = (uint8_t)data
;
1342 *(uint16_t *)(mr
->ram_block
->host
+ addr
) = (uint16_t)data
;
1345 *(uint32_t *)(mr
->ram_block
->host
+ addr
) = (uint32_t)data
;
1348 *(uint64_t *)(mr
->ram_block
->host
+ addr
) = data
;
1353 static const MemoryRegionOps ram_device_mem_ops
= {
1354 .read
= memory_region_ram_device_read
,
1355 .write
= memory_region_ram_device_write
,
1356 .endianness
= DEVICE_HOST_ENDIAN
,
1358 .min_access_size
= 1,
1359 .max_access_size
= 8,
1363 .min_access_size
= 1,
1364 .max_access_size
= 8,
1369 bool memory_region_access_valid(MemoryRegion
*mr
,
1375 if (mr
->ops
->valid
.accepts
1376 && !mr
->ops
->valid
.accepts(mr
->opaque
, addr
, size
, is_write
, attrs
)) {
1377 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid access at addr "
1378 "0x%" HWADDR_PRIX
", size %u, "
1379 "region '%s', reason: rejected\n",
1380 addr
, size
, memory_region_name(mr
));
1384 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
1385 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid access at addr "
1386 "0x%" HWADDR_PRIX
", size %u, "
1387 "region '%s', reason: unaligned\n",
1388 addr
, size
, memory_region_name(mr
));
1392 /* Treat zero as compatibility all valid */
1393 if (!mr
->ops
->valid
.max_access_size
) {
1397 if (size
> mr
->ops
->valid
.max_access_size
1398 || size
< mr
->ops
->valid
.min_access_size
) {
1399 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid access at addr "
1400 "0x%" HWADDR_PRIX
", size %u, "
1401 "region '%s', reason: invalid size "
1402 "(min:%u max:%u)\n",
1403 addr
, size
, memory_region_name(mr
),
1404 mr
->ops
->valid
.min_access_size
,
1405 mr
->ops
->valid
.max_access_size
);
1411 static MemTxResult
memory_region_dispatch_read1(MemoryRegion
*mr
,
1419 if (mr
->ops
->read
) {
1420 return access_with_adjusted_size(addr
, pval
, size
,
1421 mr
->ops
->impl
.min_access_size
,
1422 mr
->ops
->impl
.max_access_size
,
1423 memory_region_read_accessor
,
1426 return access_with_adjusted_size(addr
, pval
, size
,
1427 mr
->ops
->impl
.min_access_size
,
1428 mr
->ops
->impl
.max_access_size
,
1429 memory_region_read_with_attrs_accessor
,
1434 MemTxResult
memory_region_dispatch_read(MemoryRegion
*mr
,
1440 unsigned size
= memop_size(op
);
1443 if (!memory_region_access_valid(mr
, addr
, size
, false, attrs
)) {
1444 *pval
= unassigned_mem_read(mr
, addr
, size
);
1445 return MEMTX_DECODE_ERROR
;
1448 r
= memory_region_dispatch_read1(mr
, addr
, pval
, size
, attrs
);
1449 adjust_endianness(mr
, pval
, op
);
1453 /* Return true if an eventfd was signalled */
1454 static bool memory_region_dispatch_write_eventfds(MemoryRegion
*mr
,
1460 MemoryRegionIoeventfd ioeventfd
= {
1461 .addr
= addrrange_make(int128_make64(addr
), int128_make64(size
)),
1466 for (i
= 0; i
< mr
->ioeventfd_nb
; i
++) {
1467 ioeventfd
.match_data
= mr
->ioeventfds
[i
].match_data
;
1468 ioeventfd
.e
= mr
->ioeventfds
[i
].e
;
1470 if (memory_region_ioeventfd_equal(&ioeventfd
, &mr
->ioeventfds
[i
])) {
1471 event_notifier_set(ioeventfd
.e
);
1479 MemTxResult
memory_region_dispatch_write(MemoryRegion
*mr
,
1485 unsigned size
= memop_size(op
);
1487 if (!memory_region_access_valid(mr
, addr
, size
, true, attrs
)) {
1488 unassigned_mem_write(mr
, addr
, data
, size
);
1489 return MEMTX_DECODE_ERROR
;
1492 adjust_endianness(mr
, &data
, op
);
1494 if ((!kvm_eventfds_enabled()) &&
1495 memory_region_dispatch_write_eventfds(mr
, addr
, data
, size
, attrs
)) {
1499 if (mr
->ops
->write
) {
1500 return access_with_adjusted_size(addr
, &data
, size
,
1501 mr
->ops
->impl
.min_access_size
,
1502 mr
->ops
->impl
.max_access_size
,
1503 memory_region_write_accessor
, mr
,
1507 access_with_adjusted_size(addr
, &data
, size
,
1508 mr
->ops
->impl
.min_access_size
,
1509 mr
->ops
->impl
.max_access_size
,
1510 memory_region_write_with_attrs_accessor
,
1515 void memory_region_init_io(MemoryRegion
*mr
,
1517 const MemoryRegionOps
*ops
,
1522 memory_region_init(mr
, owner
, name
, size
);
1523 mr
->ops
= ops
? ops
: &unassigned_mem_ops
;
1524 mr
->opaque
= opaque
;
1525 mr
->terminates
= true;
1528 void memory_region_init_ram_nomigrate(MemoryRegion
*mr
,
1534 memory_region_init_ram_flags_nomigrate(mr
, owner
, name
, size
, 0, errp
);
1537 void memory_region_init_ram_flags_nomigrate(MemoryRegion
*mr
,
1545 memory_region_init(mr
, owner
, name
, size
);
1547 mr
->terminates
= true;
1548 mr
->destructor
= memory_region_destructor_ram
;
1549 mr
->ram_block
= qemu_ram_alloc(size
, ram_flags
, mr
, &err
);
1551 mr
->size
= int128_zero();
1552 object_unparent(OBJECT(mr
));
1553 error_propagate(errp
, err
);
1557 void memory_region_init_resizeable_ram(MemoryRegion
*mr
,
1562 void (*resized
)(const char*,
1568 memory_region_init(mr
, owner
, name
, size
);
1570 mr
->terminates
= true;
1571 mr
->destructor
= memory_region_destructor_ram
;
1572 mr
->ram_block
= qemu_ram_alloc_resizeable(size
, max_size
, resized
,
1575 mr
->size
= int128_zero();
1576 object_unparent(OBJECT(mr
));
1577 error_propagate(errp
, err
);
1582 void memory_region_init_ram_from_file(MemoryRegion
*mr
,
1593 memory_region_init(mr
, owner
, name
, size
);
1595 mr
->readonly
= readonly
;
1596 mr
->terminates
= true;
1597 mr
->destructor
= memory_region_destructor_ram
;
1599 mr
->ram_block
= qemu_ram_alloc_from_file(size
, mr
, ram_flags
, path
,
1602 mr
->size
= int128_zero();
1603 object_unparent(OBJECT(mr
));
1604 error_propagate(errp
, err
);
1608 void memory_region_init_ram_from_fd(MemoryRegion
*mr
,
1618 memory_region_init(mr
, owner
, name
, size
);
1620 mr
->terminates
= true;
1621 mr
->destructor
= memory_region_destructor_ram
;
1622 mr
->ram_block
= qemu_ram_alloc_from_fd(size
, mr
, ram_flags
, fd
, offset
,
1625 mr
->size
= int128_zero();
1626 object_unparent(OBJECT(mr
));
1627 error_propagate(errp
, err
);
1632 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
1638 memory_region_init(mr
, owner
, name
, size
);
1640 mr
->terminates
= true;
1641 mr
->destructor
= memory_region_destructor_ram
;
1643 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1644 assert(ptr
!= NULL
);
1645 mr
->ram_block
= qemu_ram_alloc_from_ptr(size
, ptr
, mr
, &error_fatal
);
1648 void memory_region_init_ram_device_ptr(MemoryRegion
*mr
,
1654 memory_region_init(mr
, owner
, name
, size
);
1656 mr
->terminates
= true;
1657 mr
->ram_device
= true;
1658 mr
->ops
= &ram_device_mem_ops
;
1660 mr
->destructor
= memory_region_destructor_ram
;
1662 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1663 assert(ptr
!= NULL
);
1664 mr
->ram_block
= qemu_ram_alloc_from_ptr(size
, ptr
, mr
, &error_fatal
);
1667 void memory_region_init_alias(MemoryRegion
*mr
,
1674 memory_region_init(mr
, owner
, name
, size
);
1676 mr
->alias_offset
= offset
;
1679 void memory_region_init_rom_nomigrate(MemoryRegion
*mr
,
1685 memory_region_init_ram_flags_nomigrate(mr
, owner
, name
, size
, 0, errp
);
1686 mr
->readonly
= true;
1689 void memory_region_init_rom_device_nomigrate(MemoryRegion
*mr
,
1691 const MemoryRegionOps
*ops
,
1699 memory_region_init(mr
, owner
, name
, size
);
1701 mr
->opaque
= opaque
;
1702 mr
->terminates
= true;
1703 mr
->rom_device
= true;
1704 mr
->destructor
= memory_region_destructor_ram
;
1705 mr
->ram_block
= qemu_ram_alloc(size
, 0, mr
, &err
);
1707 mr
->size
= int128_zero();
1708 object_unparent(OBJECT(mr
));
1709 error_propagate(errp
, err
);
1713 void memory_region_init_iommu(void *_iommu_mr
,
1714 size_t instance_size
,
1715 const char *mrtypename
,
1720 struct IOMMUMemoryRegion
*iommu_mr
;
1721 struct MemoryRegion
*mr
;
1723 object_initialize(_iommu_mr
, instance_size
, mrtypename
);
1724 mr
= MEMORY_REGION(_iommu_mr
);
1725 memory_region_do_init(mr
, owner
, name
, size
);
1726 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1727 mr
->terminates
= true; /* then re-forwards */
1728 QLIST_INIT(&iommu_mr
->iommu_notify
);
1729 iommu_mr
->iommu_notify_flags
= IOMMU_NOTIFIER_NONE
;
1732 static void memory_region_finalize(Object
*obj
)
1734 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1736 assert(!mr
->container
);
1738 /* We know the region is not visible in any address space (it
1739 * does not have a container and cannot be a root either because
1740 * it has no references, so we can blindly clear mr->enabled.
1741 * memory_region_set_enabled instead could trigger a transaction
1742 * and cause an infinite loop.
1744 mr
->enabled
= false;
1745 memory_region_transaction_begin();
1746 while (!QTAILQ_EMPTY(&mr
->subregions
)) {
1747 MemoryRegion
*subregion
= QTAILQ_FIRST(&mr
->subregions
);
1748 memory_region_del_subregion(mr
, subregion
);
1750 memory_region_transaction_commit();
1753 memory_region_clear_coalescing(mr
);
1754 g_free((char *)mr
->name
);
1755 g_free(mr
->ioeventfds
);
1758 Object
*memory_region_owner(MemoryRegion
*mr
)
1760 Object
*obj
= OBJECT(mr
);
1764 void memory_region_ref(MemoryRegion
*mr
)
1766 /* MMIO callbacks most likely will access data that belongs
1767 * to the owner, hence the need to ref/unref the owner whenever
1768 * the memory region is in use.
1770 * The memory region is a child of its owner. As long as the
1771 * owner doesn't call unparent itself on the memory region,
1772 * ref-ing the owner will also keep the memory region alive.
1773 * Memory regions without an owner are supposed to never go away;
1774 * we do not ref/unref them because it slows down DMA sensibly.
1776 if (mr
&& mr
->owner
) {
1777 object_ref(mr
->owner
);
1781 void memory_region_unref(MemoryRegion
*mr
)
1783 if (mr
&& mr
->owner
) {
1784 object_unref(mr
->owner
);
1788 uint64_t memory_region_size(MemoryRegion
*mr
)
1790 if (int128_eq(mr
->size
, int128_2_64())) {
1793 return int128_get64(mr
->size
);
1796 const char *memory_region_name(const MemoryRegion
*mr
)
1799 ((MemoryRegion
*)mr
)->name
=
1800 g_strdup(object_get_canonical_path_component(OBJECT(mr
)));
1805 bool memory_region_is_ram_device(MemoryRegion
*mr
)
1807 return mr
->ram_device
;
1810 uint8_t memory_region_get_dirty_log_mask(MemoryRegion
*mr
)
1812 uint8_t mask
= mr
->dirty_log_mask
;
1813 RAMBlock
*rb
= mr
->ram_block
;
1815 if (global_dirty_log
&& ((rb
&& qemu_ram_is_migratable(rb
)) ||
1816 memory_region_is_iommu(mr
))) {
1817 mask
|= (1 << DIRTY_MEMORY_MIGRATION
);
1820 if (tcg_enabled() && rb
) {
1821 /* TCG only cares about dirty memory logging for RAM, not IOMMU. */
1822 mask
|= (1 << DIRTY_MEMORY_CODE
);
1827 bool memory_region_is_logging(MemoryRegion
*mr
, uint8_t client
)
1829 return memory_region_get_dirty_log_mask(mr
) & (1 << client
);
1832 static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion
*iommu_mr
,
1835 IOMMUNotifierFlag flags
= IOMMU_NOTIFIER_NONE
;
1836 IOMMUNotifier
*iommu_notifier
;
1837 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1840 IOMMU_NOTIFIER_FOREACH(iommu_notifier
, iommu_mr
) {
1841 flags
|= iommu_notifier
->notifier_flags
;
1844 if (flags
!= iommu_mr
->iommu_notify_flags
&& imrc
->notify_flag_changed
) {
1845 ret
= imrc
->notify_flag_changed(iommu_mr
,
1846 iommu_mr
->iommu_notify_flags
,
1851 iommu_mr
->iommu_notify_flags
= flags
;
1856 int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion
*iommu_mr
,
1857 uint64_t page_size_mask
,
1860 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1863 if (imrc
->iommu_set_page_size_mask
) {
1864 ret
= imrc
->iommu_set_page_size_mask(iommu_mr
, page_size_mask
, errp
);
1869 int memory_region_register_iommu_notifier(MemoryRegion
*mr
,
1870 IOMMUNotifier
*n
, Error
**errp
)
1872 IOMMUMemoryRegion
*iommu_mr
;
1876 return memory_region_register_iommu_notifier(mr
->alias
, n
, errp
);
1879 /* We need to register for at least one bitfield */
1880 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1881 assert(n
->notifier_flags
!= IOMMU_NOTIFIER_NONE
);
1882 assert(n
->start
<= n
->end
);
1883 assert(n
->iommu_idx
>= 0 &&
1884 n
->iommu_idx
< memory_region_iommu_num_indexes(iommu_mr
));
1886 QLIST_INSERT_HEAD(&iommu_mr
->iommu_notify
, n
, node
);
1887 ret
= memory_region_update_iommu_notify_flags(iommu_mr
, errp
);
1889 QLIST_REMOVE(n
, node
);
1894 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion
*iommu_mr
)
1896 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1898 if (imrc
->get_min_page_size
) {
1899 return imrc
->get_min_page_size(iommu_mr
);
1901 return TARGET_PAGE_SIZE
;
1904 void memory_region_iommu_replay(IOMMUMemoryRegion
*iommu_mr
, IOMMUNotifier
*n
)
1906 MemoryRegion
*mr
= MEMORY_REGION(iommu_mr
);
1907 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1908 hwaddr addr
, granularity
;
1909 IOMMUTLBEntry iotlb
;
1911 /* If the IOMMU has its own replay callback, override */
1913 imrc
->replay(iommu_mr
, n
);
1917 granularity
= memory_region_iommu_get_min_page_size(iommu_mr
);
1919 for (addr
= 0; addr
< memory_region_size(mr
); addr
+= granularity
) {
1920 iotlb
= imrc
->translate(iommu_mr
, addr
, IOMMU_NONE
, n
->iommu_idx
);
1921 if (iotlb
.perm
!= IOMMU_NONE
) {
1922 n
->notify(n
, &iotlb
);
1925 /* if (2^64 - MR size) < granularity, it's possible to get an
1926 * infinite loop here. This should catch such a wraparound */
1927 if ((addr
+ granularity
) < addr
) {
1933 void memory_region_unregister_iommu_notifier(MemoryRegion
*mr
,
1936 IOMMUMemoryRegion
*iommu_mr
;
1939 memory_region_unregister_iommu_notifier(mr
->alias
, n
);
1942 QLIST_REMOVE(n
, node
);
1943 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1944 memory_region_update_iommu_notify_flags(iommu_mr
, NULL
);
1947 void memory_region_notify_iommu_one(IOMMUNotifier
*notifier
,
1948 IOMMUTLBEvent
*event
)
1950 IOMMUTLBEntry
*entry
= &event
->entry
;
1951 hwaddr entry_end
= entry
->iova
+ entry
->addr_mask
;
1952 IOMMUTLBEntry tmp
= *entry
;
1954 if (event
->type
== IOMMU_NOTIFIER_UNMAP
) {
1955 assert(entry
->perm
== IOMMU_NONE
);
1959 * Skip the notification if the notification does not overlap
1960 * with registered range.
1962 if (notifier
->start
> entry_end
|| notifier
->end
< entry
->iova
) {
1966 if (notifier
->notifier_flags
& IOMMU_NOTIFIER_DEVIOTLB_UNMAP
) {
1967 /* Crop (iova, addr_mask) to range */
1968 tmp
.iova
= MAX(tmp
.iova
, notifier
->start
);
1969 tmp
.addr_mask
= MIN(entry_end
, notifier
->end
) - tmp
.iova
;
1971 assert(entry
->iova
>= notifier
->start
&& entry_end
<= notifier
->end
);
1974 if (event
->type
& notifier
->notifier_flags
) {
1975 notifier
->notify(notifier
, &tmp
);
1979 void memory_region_notify_iommu(IOMMUMemoryRegion
*iommu_mr
,
1981 IOMMUTLBEvent event
)
1983 IOMMUNotifier
*iommu_notifier
;
1985 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr
)));
1987 IOMMU_NOTIFIER_FOREACH(iommu_notifier
, iommu_mr
) {
1988 if (iommu_notifier
->iommu_idx
== iommu_idx
) {
1989 memory_region_notify_iommu_one(iommu_notifier
, &event
);
1994 int memory_region_iommu_get_attr(IOMMUMemoryRegion
*iommu_mr
,
1995 enum IOMMUMemoryRegionAttr attr
,
1998 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
2000 if (!imrc
->get_attr
) {
2004 return imrc
->get_attr(iommu_mr
, attr
, data
);
2007 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion
*iommu_mr
,
2010 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
2012 if (!imrc
->attrs_to_index
) {
2016 return imrc
->attrs_to_index(iommu_mr
, attrs
);
2019 int memory_region_iommu_num_indexes(IOMMUMemoryRegion
*iommu_mr
)
2021 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
2023 if (!imrc
->num_indexes
) {
2027 return imrc
->num_indexes(iommu_mr
);
2030 RamDiscardManager
*memory_region_get_ram_discard_manager(MemoryRegion
*mr
)
2032 if (!memory_region_is_mapped(mr
) || !memory_region_is_ram(mr
)) {
2038 void memory_region_set_ram_discard_manager(MemoryRegion
*mr
,
2039 RamDiscardManager
*rdm
)
2041 g_assert(memory_region_is_ram(mr
) && !memory_region_is_mapped(mr
));
2042 g_assert(!rdm
|| !mr
->rdm
);
2046 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager
*rdm
,
2047 const MemoryRegion
*mr
)
2049 RamDiscardManagerClass
*rdmc
= RAM_DISCARD_MANAGER_GET_CLASS(rdm
);
2051 g_assert(rdmc
->get_min_granularity
);
2052 return rdmc
->get_min_granularity(rdm
, mr
);
2055 bool ram_discard_manager_is_populated(const RamDiscardManager
*rdm
,
2056 const MemoryRegionSection
*section
)
2058 RamDiscardManagerClass
*rdmc
= RAM_DISCARD_MANAGER_GET_CLASS(rdm
);
2060 g_assert(rdmc
->is_populated
);
2061 return rdmc
->is_populated(rdm
, section
);
2064 int ram_discard_manager_replay_populated(const RamDiscardManager
*rdm
,
2065 MemoryRegionSection
*section
,
2066 ReplayRamPopulate replay_fn
,
2069 RamDiscardManagerClass
*rdmc
= RAM_DISCARD_MANAGER_GET_CLASS(rdm
);
2071 g_assert(rdmc
->replay_populated
);
2072 return rdmc
->replay_populated(rdm
, section
, replay_fn
, opaque
);
2075 void ram_discard_manager_register_listener(RamDiscardManager
*rdm
,
2076 RamDiscardListener
*rdl
,
2077 MemoryRegionSection
*section
)
2079 RamDiscardManagerClass
*rdmc
= RAM_DISCARD_MANAGER_GET_CLASS(rdm
);
2081 g_assert(rdmc
->register_listener
);
2082 rdmc
->register_listener(rdm
, rdl
, section
);
2085 void ram_discard_manager_unregister_listener(RamDiscardManager
*rdm
,
2086 RamDiscardListener
*rdl
)
2088 RamDiscardManagerClass
*rdmc
= RAM_DISCARD_MANAGER_GET_CLASS(rdm
);
2090 g_assert(rdmc
->unregister_listener
);
2091 rdmc
->unregister_listener(rdm
, rdl
);
2094 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
2096 uint8_t mask
= 1 << client
;
2097 uint8_t old_logging
;
2099 assert(client
== DIRTY_MEMORY_VGA
);
2100 old_logging
= mr
->vga_logging_count
;
2101 mr
->vga_logging_count
+= log
? 1 : -1;
2102 if (!!old_logging
== !!mr
->vga_logging_count
) {
2106 memory_region_transaction_begin();
2107 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
2108 memory_region_update_pending
|= mr
->enabled
;
2109 memory_region_transaction_commit();
2112 void memory_region_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2115 assert(mr
->ram_block
);
2116 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
2118 memory_region_get_dirty_log_mask(mr
));
2122 * If memory region `mr' is NULL, do global sync. Otherwise, sync
2123 * dirty bitmap for the specified memory region.
2125 static void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
)
2127 MemoryListener
*listener
;
2132 /* If the same address space has multiple log_sync listeners, we
2133 * visit that address space's FlatView multiple times. But because
2134 * log_sync listeners are rare, it's still cheaper than walking each
2135 * address space once.
2137 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2138 if (listener
->log_sync
) {
2139 as
= listener
->address_space
;
2140 view
= address_space_get_flatview(as
);
2141 FOR_EACH_FLAT_RANGE(fr
, view
) {
2142 if (fr
->dirty_log_mask
&& (!mr
|| fr
->mr
== mr
)) {
2143 MemoryRegionSection mrs
= section_from_flat_range(fr
, view
);
2144 listener
->log_sync(listener
, &mrs
);
2147 flatview_unref(view
);
2148 } else if (listener
->log_sync_global
) {
2150 * No matter whether MR is specified, what we can do here
2151 * is to do a global sync, because we are not capable to
2152 * sync in a finer granularity.
2154 listener
->log_sync_global(listener
);
2159 void memory_region_clear_dirty_bitmap(MemoryRegion
*mr
, hwaddr start
,
2162 MemoryRegionSection mrs
;
2163 MemoryListener
*listener
;
2167 hwaddr sec_start
, sec_end
, sec_size
;
2169 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2170 if (!listener
->log_clear
) {
2173 as
= listener
->address_space
;
2174 view
= address_space_get_flatview(as
);
2175 FOR_EACH_FLAT_RANGE(fr
, view
) {
2176 if (!fr
->dirty_log_mask
|| fr
->mr
!= mr
) {
2178 * Clear dirty bitmap operation only applies to those
2179 * regions whose dirty logging is at least enabled
2184 mrs
= section_from_flat_range(fr
, view
);
2186 sec_start
= MAX(mrs
.offset_within_region
, start
);
2187 sec_end
= mrs
.offset_within_region
+ int128_get64(mrs
.size
);
2188 sec_end
= MIN(sec_end
, start
+ len
);
2190 if (sec_start
>= sec_end
) {
2192 * If this memory region section has no intersection
2193 * with the requested range, skip.
2198 /* Valid case; shrink the section if needed */
2199 mrs
.offset_within_address_space
+=
2200 sec_start
- mrs
.offset_within_region
;
2201 mrs
.offset_within_region
= sec_start
;
2202 sec_size
= sec_end
- sec_start
;
2203 mrs
.size
= int128_make64(sec_size
);
2204 listener
->log_clear(listener
, &mrs
);
2206 flatview_unref(view
);
2210 DirtyBitmapSnapshot
*memory_region_snapshot_and_clear_dirty(MemoryRegion
*mr
,
2215 DirtyBitmapSnapshot
*snapshot
;
2216 assert(mr
->ram_block
);
2217 memory_region_sync_dirty_bitmap(mr
);
2218 snapshot
= cpu_physical_memory_snapshot_and_clear_dirty(mr
, addr
, size
, client
);
2219 memory_global_after_dirty_log_sync();
2223 bool memory_region_snapshot_get_dirty(MemoryRegion
*mr
, DirtyBitmapSnapshot
*snap
,
2224 hwaddr addr
, hwaddr size
)
2226 assert(mr
->ram_block
);
2227 return cpu_physical_memory_snapshot_get_dirty(snap
,
2228 memory_region_get_ram_addr(mr
) + addr
, size
);
2231 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
2233 if (mr
->readonly
!= readonly
) {
2234 memory_region_transaction_begin();
2235 mr
->readonly
= readonly
;
2236 memory_region_update_pending
|= mr
->enabled
;
2237 memory_region_transaction_commit();
2241 void memory_region_set_nonvolatile(MemoryRegion
*mr
, bool nonvolatile
)
2243 if (mr
->nonvolatile
!= nonvolatile
) {
2244 memory_region_transaction_begin();
2245 mr
->nonvolatile
= nonvolatile
;
2246 memory_region_update_pending
|= mr
->enabled
;
2247 memory_region_transaction_commit();
2251 void memory_region_rom_device_set_romd(MemoryRegion
*mr
, bool romd_mode
)
2253 if (mr
->romd_mode
!= romd_mode
) {
2254 memory_region_transaction_begin();
2255 mr
->romd_mode
= romd_mode
;
2256 memory_region_update_pending
|= mr
->enabled
;
2257 memory_region_transaction_commit();
2261 void memory_region_reset_dirty(MemoryRegion
*mr
, hwaddr addr
,
2262 hwaddr size
, unsigned client
)
2264 assert(mr
->ram_block
);
2265 cpu_physical_memory_test_and_clear_dirty(
2266 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
2269 int memory_region_get_fd(MemoryRegion
*mr
)
2273 RCU_READ_LOCK_GUARD();
2277 fd
= mr
->ram_block
->fd
;
2282 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
2285 uint64_t offset
= 0;
2287 RCU_READ_LOCK_GUARD();
2289 offset
+= mr
->alias_offset
;
2292 assert(mr
->ram_block
);
2293 ptr
= qemu_map_ram_ptr(mr
->ram_block
, offset
);
2298 MemoryRegion
*memory_region_from_host(void *ptr
, ram_addr_t
*offset
)
2302 block
= qemu_ram_block_from_host(ptr
, false, offset
);
2310 ram_addr_t
memory_region_get_ram_addr(MemoryRegion
*mr
)
2312 return mr
->ram_block
? mr
->ram_block
->offset
: RAM_ADDR_INVALID
;
2315 void memory_region_ram_resize(MemoryRegion
*mr
, ram_addr_t newsize
, Error
**errp
)
2317 assert(mr
->ram_block
);
2319 qemu_ram_resize(mr
->ram_block
, newsize
, errp
);
2322 void memory_region_msync(MemoryRegion
*mr
, hwaddr addr
, hwaddr size
)
2324 if (mr
->ram_block
) {
2325 qemu_ram_msync(mr
->ram_block
, addr
, size
);
2329 void memory_region_writeback(MemoryRegion
*mr
, hwaddr addr
, hwaddr size
)
2332 * Might be extended case needed to cover
2333 * different types of memory regions
2335 if (mr
->dirty_log_mask
) {
2336 memory_region_msync(mr
, addr
, size
);
2341 * Call proper memory listeners about the change on the newly
2342 * added/removed CoalescedMemoryRange.
2344 static void memory_region_update_coalesced_range(MemoryRegion
*mr
,
2345 CoalescedMemoryRange
*cmr
,
2352 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2353 view
= address_space_get_flatview(as
);
2354 FOR_EACH_FLAT_RANGE(fr
, view
) {
2356 flat_range_coalesced_io_notify(fr
, as
, cmr
, add
);
2359 flatview_unref(view
);
2363 void memory_region_set_coalescing(MemoryRegion
*mr
)
2365 memory_region_clear_coalescing(mr
);
2366 memory_region_add_coalescing(mr
, 0, int128_get64(mr
->size
));
2369 void memory_region_add_coalescing(MemoryRegion
*mr
,
2373 CoalescedMemoryRange
*cmr
= g_malloc(sizeof(*cmr
));
2375 cmr
->addr
= addrrange_make(int128_make64(offset
), int128_make64(size
));
2376 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
2377 memory_region_update_coalesced_range(mr
, cmr
, true);
2378 memory_region_set_flush_coalesced(mr
);
2381 void memory_region_clear_coalescing(MemoryRegion
*mr
)
2383 CoalescedMemoryRange
*cmr
;
2385 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
2389 qemu_flush_coalesced_mmio_buffer();
2390 mr
->flush_coalesced_mmio
= false;
2392 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
2393 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
2394 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
2395 memory_region_update_coalesced_range(mr
, cmr
, false);
2400 void memory_region_set_flush_coalesced(MemoryRegion
*mr
)
2402 mr
->flush_coalesced_mmio
= true;
2405 void memory_region_clear_flush_coalesced(MemoryRegion
*mr
)
2407 qemu_flush_coalesced_mmio_buffer();
2408 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
2409 mr
->flush_coalesced_mmio
= false;
2413 static bool userspace_eventfd_warning
;
2415 void memory_region_add_eventfd(MemoryRegion
*mr
,
2422 MemoryRegionIoeventfd mrfd
= {
2423 .addr
.start
= int128_make64(addr
),
2424 .addr
.size
= int128_make64(size
),
2425 .match_data
= match_data
,
2431 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2432 userspace_eventfd_warning
))) {
2433 userspace_eventfd_warning
= true;
2434 error_report("Using eventfd without MMIO binding in KVM. "
2435 "Suboptimal performance expected");
2439 adjust_endianness(mr
, &mrfd
.data
, size_memop(size
) | MO_TE
);
2441 memory_region_transaction_begin();
2442 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2443 if (memory_region_ioeventfd_before(&mrfd
, &mr
->ioeventfds
[i
])) {
2448 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2449 sizeof(*mr
->ioeventfds
) * mr
->ioeventfd_nb
);
2450 memmove(&mr
->ioeventfds
[i
+1], &mr
->ioeventfds
[i
],
2451 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
-1 - i
));
2452 mr
->ioeventfds
[i
] = mrfd
;
2453 ioeventfd_update_pending
|= mr
->enabled
;
2454 memory_region_transaction_commit();
2457 void memory_region_del_eventfd(MemoryRegion
*mr
,
2464 MemoryRegionIoeventfd mrfd
= {
2465 .addr
.start
= int128_make64(addr
),
2466 .addr
.size
= int128_make64(size
),
2467 .match_data
= match_data
,
2474 adjust_endianness(mr
, &mrfd
.data
, size_memop(size
) | MO_TE
);
2476 memory_region_transaction_begin();
2477 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2478 if (memory_region_ioeventfd_equal(&mrfd
, &mr
->ioeventfds
[i
])) {
2482 assert(i
!= mr
->ioeventfd_nb
);
2483 memmove(&mr
->ioeventfds
[i
], &mr
->ioeventfds
[i
+1],
2484 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
- (i
+1)));
2486 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2487 sizeof(*mr
->ioeventfds
)*mr
->ioeventfd_nb
+ 1);
2488 ioeventfd_update_pending
|= mr
->enabled
;
2489 memory_region_transaction_commit();
2492 static void memory_region_update_container_subregions(MemoryRegion
*subregion
)
2494 MemoryRegion
*mr
= subregion
->container
;
2495 MemoryRegion
*other
;
2497 memory_region_transaction_begin();
2499 memory_region_ref(subregion
);
2500 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
2501 if (subregion
->priority
>= other
->priority
) {
2502 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
2506 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
2508 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2509 memory_region_transaction_commit();
2512 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
2514 MemoryRegion
*subregion
)
2516 assert(!subregion
->container
);
2517 subregion
->container
= mr
;
2518 subregion
->addr
= offset
;
2519 memory_region_update_container_subregions(subregion
);
2522 void memory_region_add_subregion(MemoryRegion
*mr
,
2524 MemoryRegion
*subregion
)
2526 subregion
->priority
= 0;
2527 memory_region_add_subregion_common(mr
, offset
, subregion
);
2530 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
2532 MemoryRegion
*subregion
,
2535 subregion
->priority
= priority
;
2536 memory_region_add_subregion_common(mr
, offset
, subregion
);
2539 void memory_region_del_subregion(MemoryRegion
*mr
,
2540 MemoryRegion
*subregion
)
2542 memory_region_transaction_begin();
2543 assert(subregion
->container
== mr
);
2544 subregion
->container
= NULL
;
2545 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
2546 memory_region_unref(subregion
);
2547 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2548 memory_region_transaction_commit();
2551 void memory_region_set_enabled(MemoryRegion
*mr
, bool enabled
)
2553 if (enabled
== mr
->enabled
) {
2556 memory_region_transaction_begin();
2557 mr
->enabled
= enabled
;
2558 memory_region_update_pending
= true;
2559 memory_region_transaction_commit();
2562 void memory_region_set_size(MemoryRegion
*mr
, uint64_t size
)
2564 Int128 s
= int128_make64(size
);
2566 if (size
== UINT64_MAX
) {
2569 if (int128_eq(s
, mr
->size
)) {
2572 memory_region_transaction_begin();
2574 memory_region_update_pending
= true;
2575 memory_region_transaction_commit();
2578 static void memory_region_readd_subregion(MemoryRegion
*mr
)
2580 MemoryRegion
*container
= mr
->container
;
2583 memory_region_transaction_begin();
2584 memory_region_ref(mr
);
2585 memory_region_del_subregion(container
, mr
);
2586 mr
->container
= container
;
2587 memory_region_update_container_subregions(mr
);
2588 memory_region_unref(mr
);
2589 memory_region_transaction_commit();
2593 void memory_region_set_address(MemoryRegion
*mr
, hwaddr addr
)
2595 if (addr
!= mr
->addr
) {
2597 memory_region_readd_subregion(mr
);
2601 void memory_region_set_alias_offset(MemoryRegion
*mr
, hwaddr offset
)
2605 if (offset
== mr
->alias_offset
) {
2609 memory_region_transaction_begin();
2610 mr
->alias_offset
= offset
;
2611 memory_region_update_pending
|= mr
->enabled
;
2612 memory_region_transaction_commit();
2615 uint64_t memory_region_get_alignment(const MemoryRegion
*mr
)
2620 static int cmp_flatrange_addr(const void *addr_
, const void *fr_
)
2622 const AddrRange
*addr
= addr_
;
2623 const FlatRange
*fr
= fr_
;
2625 if (int128_le(addrrange_end(*addr
), fr
->addr
.start
)) {
2627 } else if (int128_ge(addr
->start
, addrrange_end(fr
->addr
))) {
2633 static FlatRange
*flatview_lookup(FlatView
*view
, AddrRange addr
)
2635 return bsearch(&addr
, view
->ranges
, view
->nr
,
2636 sizeof(FlatRange
), cmp_flatrange_addr
);
2639 bool memory_region_is_mapped(MemoryRegion
*mr
)
2641 return mr
->container
? true : false;
2644 /* Same as memory_region_find, but it does not add a reference to the
2645 * returned region. It must be called from an RCU critical section.
2647 static MemoryRegionSection
memory_region_find_rcu(MemoryRegion
*mr
,
2648 hwaddr addr
, uint64_t size
)
2650 MemoryRegionSection ret
= { .mr
= NULL
};
2658 for (root
= mr
; root
->container
; ) {
2659 root
= root
->container
;
2663 as
= memory_region_to_address_space(root
);
2667 range
= addrrange_make(int128_make64(addr
), int128_make64(size
));
2669 view
= address_space_to_flatview(as
);
2670 fr
= flatview_lookup(view
, range
);
2675 while (fr
> view
->ranges
&& addrrange_intersects(fr
[-1].addr
, range
)) {
2681 range
= addrrange_intersection(range
, fr
->addr
);
2682 ret
.offset_within_region
= fr
->offset_in_region
;
2683 ret
.offset_within_region
+= int128_get64(int128_sub(range
.start
,
2685 ret
.size
= range
.size
;
2686 ret
.offset_within_address_space
= int128_get64(range
.start
);
2687 ret
.readonly
= fr
->readonly
;
2688 ret
.nonvolatile
= fr
->nonvolatile
;
2692 MemoryRegionSection
memory_region_find(MemoryRegion
*mr
,
2693 hwaddr addr
, uint64_t size
)
2695 MemoryRegionSection ret
;
2696 RCU_READ_LOCK_GUARD();
2697 ret
= memory_region_find_rcu(mr
, addr
, size
);
2699 memory_region_ref(ret
.mr
);
2704 MemoryRegionSection
*memory_region_section_new_copy(MemoryRegionSection
*s
)
2706 MemoryRegionSection
*tmp
= g_new(MemoryRegionSection
, 1);
2710 memory_region_ref(tmp
->mr
);
2713 bool ret
= flatview_ref(tmp
->fv
);
2720 void memory_region_section_free_copy(MemoryRegionSection
*s
)
2723 flatview_unref(s
->fv
);
2726 memory_region_unref(s
->mr
);
2731 bool memory_region_present(MemoryRegion
*container
, hwaddr addr
)
2735 RCU_READ_LOCK_GUARD();
2736 mr
= memory_region_find_rcu(container
, addr
, 1).mr
;
2737 return mr
&& mr
!= container
;
2740 void memory_global_dirty_log_sync(void)
2742 memory_region_sync_dirty_bitmap(NULL
);
2745 void memory_global_after_dirty_log_sync(void)
2747 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync
, Forward
);
2750 static VMChangeStateEntry
*vmstate_change
;
2752 void memory_global_dirty_log_start(void)
2754 if (vmstate_change
) {
2755 qemu_del_vm_change_state_handler(vmstate_change
);
2756 vmstate_change
= NULL
;
2759 global_dirty_log
= true;
2761 MEMORY_LISTENER_CALL_GLOBAL(log_global_start
, Forward
);
2763 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
2764 memory_region_transaction_begin();
2765 memory_region_update_pending
= true;
2766 memory_region_transaction_commit();
2769 static void memory_global_dirty_log_do_stop(void)
2771 global_dirty_log
= false;
2773 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
2774 memory_region_transaction_begin();
2775 memory_region_update_pending
= true;
2776 memory_region_transaction_commit();
2778 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop
, Reverse
);
2781 static void memory_vm_change_state_handler(void *opaque
, bool running
,
2785 memory_global_dirty_log_do_stop();
2787 if (vmstate_change
) {
2788 qemu_del_vm_change_state_handler(vmstate_change
);
2789 vmstate_change
= NULL
;
2794 void memory_global_dirty_log_stop(void)
2796 if (!runstate_is_running()) {
2797 if (vmstate_change
) {
2800 vmstate_change
= qemu_add_vm_change_state_handler(
2801 memory_vm_change_state_handler
, NULL
);
2805 memory_global_dirty_log_do_stop();
2808 static void listener_add_address_space(MemoryListener
*listener
,
2814 if (listener
->begin
) {
2815 listener
->begin(listener
);
2817 if (global_dirty_log
) {
2818 if (listener
->log_global_start
) {
2819 listener
->log_global_start(listener
);
2823 view
= address_space_get_flatview(as
);
2824 FOR_EACH_FLAT_RANGE(fr
, view
) {
2825 MemoryRegionSection section
= section_from_flat_range(fr
, view
);
2827 if (listener
->region_add
) {
2828 listener
->region_add(listener
, §ion
);
2830 if (fr
->dirty_log_mask
&& listener
->log_start
) {
2831 listener
->log_start(listener
, §ion
, 0, fr
->dirty_log_mask
);
2834 if (listener
->commit
) {
2835 listener
->commit(listener
);
2837 flatview_unref(view
);
2840 static void listener_del_address_space(MemoryListener
*listener
,
2846 if (listener
->begin
) {
2847 listener
->begin(listener
);
2849 view
= address_space_get_flatview(as
);
2850 FOR_EACH_FLAT_RANGE(fr
, view
) {
2851 MemoryRegionSection section
= section_from_flat_range(fr
, view
);
2853 if (fr
->dirty_log_mask
&& listener
->log_stop
) {
2854 listener
->log_stop(listener
, §ion
, fr
->dirty_log_mask
, 0);
2856 if (listener
->region_del
) {
2857 listener
->region_del(listener
, §ion
);
2860 if (listener
->commit
) {
2861 listener
->commit(listener
);
2863 flatview_unref(view
);
2866 void memory_listener_register(MemoryListener
*listener
, AddressSpace
*as
)
2868 MemoryListener
*other
= NULL
;
2870 /* Only one of them can be defined for a listener */
2871 assert(!(listener
->log_sync
&& listener
->log_sync_global
));
2873 listener
->address_space
= as
;
2874 if (QTAILQ_EMPTY(&memory_listeners
)
2875 || listener
->priority
>= QTAILQ_LAST(&memory_listeners
)->priority
) {
2876 QTAILQ_INSERT_TAIL(&memory_listeners
, listener
, link
);
2878 QTAILQ_FOREACH(other
, &memory_listeners
, link
) {
2879 if (listener
->priority
< other
->priority
) {
2883 QTAILQ_INSERT_BEFORE(other
, listener
, link
);
2886 if (QTAILQ_EMPTY(&as
->listeners
)
2887 || listener
->priority
>= QTAILQ_LAST(&as
->listeners
)->priority
) {
2888 QTAILQ_INSERT_TAIL(&as
->listeners
, listener
, link_as
);
2890 QTAILQ_FOREACH(other
, &as
->listeners
, link_as
) {
2891 if (listener
->priority
< other
->priority
) {
2895 QTAILQ_INSERT_BEFORE(other
, listener
, link_as
);
2898 listener_add_address_space(listener
, as
);
2901 void memory_listener_unregister(MemoryListener
*listener
)
2903 if (!listener
->address_space
) {
2907 listener_del_address_space(listener
, listener
->address_space
);
2908 QTAILQ_REMOVE(&memory_listeners
, listener
, link
);
2909 QTAILQ_REMOVE(&listener
->address_space
->listeners
, listener
, link_as
);
2910 listener
->address_space
= NULL
;
2913 void address_space_remove_listeners(AddressSpace
*as
)
2915 while (!QTAILQ_EMPTY(&as
->listeners
)) {
2916 memory_listener_unregister(QTAILQ_FIRST(&as
->listeners
));
2920 void address_space_init(AddressSpace
*as
, MemoryRegion
*root
, const char *name
)
2922 memory_region_ref(root
);
2924 as
->current_map
= NULL
;
2925 as
->ioeventfd_nb
= 0;
2926 as
->ioeventfds
= NULL
;
2927 QTAILQ_INIT(&as
->listeners
);
2928 QTAILQ_INSERT_TAIL(&address_spaces
, as
, address_spaces_link
);
2929 as
->name
= g_strdup(name
? name
: "anonymous");
2930 address_space_update_topology(as
);
2931 address_space_update_ioeventfds(as
);
2934 static void do_address_space_destroy(AddressSpace
*as
)
2936 assert(QTAILQ_EMPTY(&as
->listeners
));
2938 flatview_unref(as
->current_map
);
2940 g_free(as
->ioeventfds
);
2941 memory_region_unref(as
->root
);
2944 void address_space_destroy(AddressSpace
*as
)
2946 MemoryRegion
*root
= as
->root
;
2948 /* Flush out anything from MemoryListeners listening in on this */
2949 memory_region_transaction_begin();
2951 memory_region_transaction_commit();
2952 QTAILQ_REMOVE(&address_spaces
, as
, address_spaces_link
);
2954 /* At this point, as->dispatch and as->current_map are dummy
2955 * entries that the guest should never use. Wait for the old
2956 * values to expire before freeing the data.
2959 call_rcu(as
, do_address_space_destroy
, rcu
);
2962 static const char *memory_region_type(MemoryRegion
*mr
)
2965 return memory_region_type(mr
->alias
);
2967 if (memory_region_is_ram_device(mr
)) {
2969 } else if (memory_region_is_romd(mr
)) {
2971 } else if (memory_region_is_rom(mr
)) {
2973 } else if (memory_region_is_ram(mr
)) {
2980 typedef struct MemoryRegionList MemoryRegionList
;
2982 struct MemoryRegionList
{
2983 const MemoryRegion
*mr
;
2984 QTAILQ_ENTRY(MemoryRegionList
) mrqueue
;
2987 typedef QTAILQ_HEAD(, MemoryRegionList
) MemoryRegionListHead
;
2989 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2990 int128_sub((size), int128_one())) : 0)
2991 #define MTREE_INDENT " "
2993 static void mtree_expand_owner(const char *label
, Object
*obj
)
2995 DeviceState
*dev
= (DeviceState
*) object_dynamic_cast(obj
, TYPE_DEVICE
);
2997 qemu_printf(" %s:{%s", label
, dev
? "dev" : "obj");
2998 if (dev
&& dev
->id
) {
2999 qemu_printf(" id=%s", dev
->id
);
3001 char *canonical_path
= object_get_canonical_path(obj
);
3002 if (canonical_path
) {
3003 qemu_printf(" path=%s", canonical_path
);
3004 g_free(canonical_path
);
3006 qemu_printf(" type=%s", object_get_typename(obj
));
3012 static void mtree_print_mr_owner(const MemoryRegion
*mr
)
3014 Object
*owner
= mr
->owner
;
3015 Object
*parent
= memory_region_owner((MemoryRegion
*)mr
);
3017 if (!owner
&& !parent
) {
3018 qemu_printf(" orphan");
3022 mtree_expand_owner("owner", owner
);
3024 if (parent
&& parent
!= owner
) {
3025 mtree_expand_owner("parent", parent
);
3029 static void mtree_print_mr(const MemoryRegion
*mr
, unsigned int level
,
3031 MemoryRegionListHead
*alias_print_queue
,
3032 bool owner
, bool display_disabled
)
3034 MemoryRegionList
*new_ml
, *ml
, *next_ml
;
3035 MemoryRegionListHead submr_print_queue
;
3036 const MemoryRegion
*submr
;
3038 hwaddr cur_start
, cur_end
;
3044 cur_start
= base
+ mr
->addr
;
3045 cur_end
= cur_start
+ MR_SIZE(mr
->size
);
3048 * Try to detect overflow of memory region. This should never
3049 * happen normally. When it happens, we dump something to warn the
3050 * user who is observing this.
3052 if (cur_start
< base
|| cur_end
< cur_start
) {
3053 qemu_printf("[DETECTED OVERFLOW!] ");
3057 MemoryRegionList
*ml
;
3060 /* check if the alias is already in the queue */
3061 QTAILQ_FOREACH(ml
, alias_print_queue
, mrqueue
) {
3062 if (ml
->mr
== mr
->alias
) {
3068 ml
= g_new(MemoryRegionList
, 1);
3070 QTAILQ_INSERT_TAIL(alias_print_queue
, ml
, mrqueue
);
3072 if (mr
->enabled
|| display_disabled
) {
3073 for (i
= 0; i
< level
; i
++) {
3074 qemu_printf(MTREE_INDENT
);
3076 qemu_printf(TARGET_FMT_plx
"-" TARGET_FMT_plx
3077 " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
3078 "-" TARGET_FMT_plx
"%s",
3081 mr
->nonvolatile
? "nv-" : "",
3082 memory_region_type((MemoryRegion
*)mr
),
3083 memory_region_name(mr
),
3084 memory_region_name(mr
->alias
),
3086 mr
->alias_offset
+ MR_SIZE(mr
->size
),
3087 mr
->enabled
? "" : " [disabled]");
3089 mtree_print_mr_owner(mr
);
3094 if (mr
->enabled
|| display_disabled
) {
3095 for (i
= 0; i
< level
; i
++) {
3096 qemu_printf(MTREE_INDENT
);
3098 qemu_printf(TARGET_FMT_plx
"-" TARGET_FMT_plx
3099 " (prio %d, %s%s): %s%s",
3102 mr
->nonvolatile
? "nv-" : "",
3103 memory_region_type((MemoryRegion
*)mr
),
3104 memory_region_name(mr
),
3105 mr
->enabled
? "" : " [disabled]");
3107 mtree_print_mr_owner(mr
);
3113 QTAILQ_INIT(&submr_print_queue
);
3115 QTAILQ_FOREACH(submr
, &mr
->subregions
, subregions_link
) {
3116 new_ml
= g_new(MemoryRegionList
, 1);
3118 QTAILQ_FOREACH(ml
, &submr_print_queue
, mrqueue
) {
3119 if (new_ml
->mr
->addr
< ml
->mr
->addr
||
3120 (new_ml
->mr
->addr
== ml
->mr
->addr
&&
3121 new_ml
->mr
->priority
> ml
->mr
->priority
)) {
3122 QTAILQ_INSERT_BEFORE(ml
, new_ml
, mrqueue
);
3128 QTAILQ_INSERT_TAIL(&submr_print_queue
, new_ml
, mrqueue
);
3132 QTAILQ_FOREACH(ml
, &submr_print_queue
, mrqueue
) {
3133 mtree_print_mr(ml
->mr
, level
+ 1, cur_start
,
3134 alias_print_queue
, owner
, display_disabled
);
3137 QTAILQ_FOREACH_SAFE(ml
, &submr_print_queue
, mrqueue
, next_ml
) {
3142 struct FlatViewInfo
{
3149 static void mtree_print_flatview(gpointer key
, gpointer value
,
3152 FlatView
*view
= key
;
3153 GArray
*fv_address_spaces
= value
;
3154 struct FlatViewInfo
*fvi
= user_data
;
3155 FlatRange
*range
= &view
->ranges
[0];
3161 qemu_printf("FlatView #%d\n", fvi
->counter
);
3164 for (i
= 0; i
< fv_address_spaces
->len
; ++i
) {
3165 as
= g_array_index(fv_address_spaces
, AddressSpace
*, i
);
3166 qemu_printf(" AS \"%s\", root: %s",
3167 as
->name
, memory_region_name(as
->root
));
3168 if (as
->root
->alias
) {
3169 qemu_printf(", alias %s", memory_region_name(as
->root
->alias
));
3174 qemu_printf(" Root memory region: %s\n",
3175 view
->root
? memory_region_name(view
->root
) : "(none)");
3178 qemu_printf(MTREE_INDENT
"No rendered FlatView\n\n");
3184 if (range
->offset_in_region
) {
3185 qemu_printf(MTREE_INDENT TARGET_FMT_plx
"-" TARGET_FMT_plx
3186 " (prio %d, %s%s): %s @" TARGET_FMT_plx
,
3187 int128_get64(range
->addr
.start
),
3188 int128_get64(range
->addr
.start
)
3189 + MR_SIZE(range
->addr
.size
),
3191 range
->nonvolatile
? "nv-" : "",
3192 range
->readonly
? "rom" : memory_region_type(mr
),
3193 memory_region_name(mr
),
3194 range
->offset_in_region
);
3196 qemu_printf(MTREE_INDENT TARGET_FMT_plx
"-" TARGET_FMT_plx
3197 " (prio %d, %s%s): %s",
3198 int128_get64(range
->addr
.start
),
3199 int128_get64(range
->addr
.start
)
3200 + MR_SIZE(range
->addr
.size
),
3202 range
->nonvolatile
? "nv-" : "",
3203 range
->readonly
? "rom" : memory_region_type(mr
),
3204 memory_region_name(mr
));
3207 mtree_print_mr_owner(mr
);
3211 for (i
= 0; i
< fv_address_spaces
->len
; ++i
) {
3212 as
= g_array_index(fv_address_spaces
, AddressSpace
*, i
);
3213 if (fvi
->ac
->has_memory(current_machine
, as
,
3214 int128_get64(range
->addr
.start
),
3215 MR_SIZE(range
->addr
.size
) + 1)) {
3216 qemu_printf(" %s", fvi
->ac
->name
);
3224 #if !defined(CONFIG_USER_ONLY)
3225 if (fvi
->dispatch_tree
&& view
->root
) {
3226 mtree_print_dispatch(view
->dispatch
, view
->root
);
3233 static gboolean
mtree_info_flatview_free(gpointer key
, gpointer value
,
3236 FlatView
*view
= key
;
3237 GArray
*fv_address_spaces
= value
;
3239 g_array_unref(fv_address_spaces
);
3240 flatview_unref(view
);
3245 void mtree_info(bool flatview
, bool dispatch_tree
, bool owner
, bool disabled
)
3247 MemoryRegionListHead ml_head
;
3248 MemoryRegionList
*ml
, *ml2
;
3253 struct FlatViewInfo fvi
= {
3255 .dispatch_tree
= dispatch_tree
,
3258 GArray
*fv_address_spaces
;
3259 GHashTable
*views
= g_hash_table_new(g_direct_hash
, g_direct_equal
);
3260 AccelClass
*ac
= ACCEL_GET_CLASS(current_accel());
3262 if (ac
->has_memory
) {
3266 /* Gather all FVs in one table */
3267 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
3268 view
= address_space_get_flatview(as
);
3270 fv_address_spaces
= g_hash_table_lookup(views
, view
);
3271 if (!fv_address_spaces
) {
3272 fv_address_spaces
= g_array_new(false, false, sizeof(as
));
3273 g_hash_table_insert(views
, view
, fv_address_spaces
);
3276 g_array_append_val(fv_address_spaces
, as
);
3280 g_hash_table_foreach(views
, mtree_print_flatview
, &fvi
);
3283 g_hash_table_foreach_remove(views
, mtree_info_flatview_free
, 0);
3284 g_hash_table_unref(views
);
3289 QTAILQ_INIT(&ml_head
);
3291 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
3292 qemu_printf("address-space: %s\n", as
->name
);
3293 mtree_print_mr(as
->root
, 1, 0, &ml_head
, owner
, disabled
);
3297 /* print aliased regions */
3298 QTAILQ_FOREACH(ml
, &ml_head
, mrqueue
) {
3299 qemu_printf("memory-region: %s\n", memory_region_name(ml
->mr
));
3300 mtree_print_mr(ml
->mr
, 1, 0, &ml_head
, owner
, disabled
);
3304 QTAILQ_FOREACH_SAFE(ml
, &ml_head
, mrqueue
, ml2
) {
3309 void memory_region_init_ram(MemoryRegion
*mr
,
3315 DeviceState
*owner_dev
;
3318 memory_region_init_ram_nomigrate(mr
, owner
, name
, size
, &err
);
3320 error_propagate(errp
, err
);
3323 /* This will assert if owner is neither NULL nor a DeviceState.
3324 * We only want the owner here for the purposes of defining a
3325 * unique name for migration. TODO: Ideally we should implement
3326 * a naming scheme for Objects which are not DeviceStates, in
3327 * which case we can relax this restriction.
3329 owner_dev
= DEVICE(owner
);
3330 vmstate_register_ram(mr
, owner_dev
);
3333 void memory_region_init_rom(MemoryRegion
*mr
,
3339 DeviceState
*owner_dev
;
3342 memory_region_init_rom_nomigrate(mr
, owner
, name
, size
, &err
);
3344 error_propagate(errp
, err
);
3347 /* This will assert if owner is neither NULL nor a DeviceState.
3348 * We only want the owner here for the purposes of defining a
3349 * unique name for migration. TODO: Ideally we should implement
3350 * a naming scheme for Objects which are not DeviceStates, in
3351 * which case we can relax this restriction.
3353 owner_dev
= DEVICE(owner
);
3354 vmstate_register_ram(mr
, owner_dev
);
3357 void memory_region_init_rom_device(MemoryRegion
*mr
,
3359 const MemoryRegionOps
*ops
,
3365 DeviceState
*owner_dev
;
3368 memory_region_init_rom_device_nomigrate(mr
, owner
, ops
, opaque
,
3371 error_propagate(errp
, err
);
3374 /* This will assert if owner is neither NULL nor a DeviceState.
3375 * We only want the owner here for the purposes of defining a
3376 * unique name for migration. TODO: Ideally we should implement
3377 * a naming scheme for Objects which are not DeviceStates, in
3378 * which case we can relax this restriction.
3380 owner_dev
= DEVICE(owner
);
3381 vmstate_register_ram(mr
, owner_dev
);
3385 * Support softmmu builds with CONFIG_FUZZ using a weak symbol and a stub for
3386 * the fuzz_dma_read_cb callback
3389 void __attribute__((weak
)) fuzz_dma_read_cb(size_t addr
,
3396 static const TypeInfo memory_region_info
= {
3397 .parent
= TYPE_OBJECT
,
3398 .name
= TYPE_MEMORY_REGION
,
3399 .class_size
= sizeof(MemoryRegionClass
),
3400 .instance_size
= sizeof(MemoryRegion
),
3401 .instance_init
= memory_region_initfn
,
3402 .instance_finalize
= memory_region_finalize
,
3405 static const TypeInfo iommu_memory_region_info
= {
3406 .parent
= TYPE_MEMORY_REGION
,
3407 .name
= TYPE_IOMMU_MEMORY_REGION
,
3408 .class_size
= sizeof(IOMMUMemoryRegionClass
),
3409 .instance_size
= sizeof(IOMMUMemoryRegion
),
3410 .instance_init
= iommu_memory_region_initfn
,
3414 static const TypeInfo ram_discard_manager_info
= {
3415 .parent
= TYPE_INTERFACE
,
3416 .name
= TYPE_RAM_DISCARD_MANAGER
,
3417 .class_size
= sizeof(RamDiscardManagerClass
),
3420 static void memory_register_types(void)
3422 type_register_static(&memory_region_info
);
3423 type_register_static(&iommu_memory_region_info
);
3424 type_register_static(&ram_discard_manager_info
);
3427 type_init(memory_register_types
)