2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
20 #include "exec/memory.h"
21 #include "exec/address-spaces.h"
22 #include "exec/ioport.h"
23 #include "qapi/visitor.h"
24 #include "qemu/bitops.h"
25 #include "qemu/error-report.h"
26 #include "qom/object.h"
29 #include "exec/memory-internal.h"
30 #include "exec/ram_addr.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
34 //#define DEBUG_UNASSIGNED
36 static unsigned memory_region_transaction_depth
;
37 static bool memory_region_update_pending
;
38 static bool ioeventfd_update_pending
;
39 static bool global_dirty_log
= false;
41 static QTAILQ_HEAD(memory_listeners
, MemoryListener
) memory_listeners
42 = QTAILQ_HEAD_INITIALIZER(memory_listeners
);
44 static QTAILQ_HEAD(, AddressSpace
) address_spaces
45 = QTAILQ_HEAD_INITIALIZER(address_spaces
);
47 typedef struct AddrRange AddrRange
;
50 * Note that signed integers are needed for negative offsetting in aliases
51 * (large MemoryRegion::alias_offset).
58 static AddrRange
addrrange_make(Int128 start
, Int128 size
)
60 return (AddrRange
) { start
, size
};
63 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
65 return int128_eq(r1
.start
, r2
.start
) && int128_eq(r1
.size
, r2
.size
);
68 static Int128
addrrange_end(AddrRange r
)
70 return int128_add(r
.start
, r
.size
);
73 static AddrRange
addrrange_shift(AddrRange range
, Int128 delta
)
75 int128_addto(&range
.start
, delta
);
79 static bool addrrange_contains(AddrRange range
, Int128 addr
)
81 return int128_ge(addr
, range
.start
)
82 && int128_lt(addr
, addrrange_end(range
));
85 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
87 return addrrange_contains(r1
, r2
.start
)
88 || addrrange_contains(r2
, r1
.start
);
91 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
93 Int128 start
= int128_max(r1
.start
, r2
.start
);
94 Int128 end
= int128_min(addrrange_end(r1
), addrrange_end(r2
));
95 return addrrange_make(start
, int128_sub(end
, start
));
98 enum ListenerDirection
{ Forward
, Reverse
};
100 static bool memory_listener_match(MemoryListener
*listener
,
101 MemoryRegionSection
*section
)
103 return listener
->address_space
== section
->address_space
;
106 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
108 MemoryListener *_listener; \
110 switch (_direction) { \
112 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
113 if (_listener->_callback) { \
114 _listener->_callback(_listener, ##_args); \
119 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
120 memory_listeners, link) { \
121 if (_listener->_callback) { \
122 _listener->_callback(_listener, ##_args); \
131 #define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
133 MemoryListener *_listener; \
135 switch (_direction) { \
137 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
138 if (_listener->_callback \
139 && memory_listener_match(_listener, _section)) { \
140 _listener->_callback(_listener, _section, ##_args); \
145 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
146 memory_listeners, link) { \
147 if (_listener->_callback \
148 && memory_listener_match(_listener, _section)) { \
149 _listener->_callback(_listener, _section, ##_args); \
158 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
159 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
161 MemoryRegionSection mrs = section_from_flat_range(fr, as); \
162 MEMORY_LISTENER_CALL(callback, dir, &mrs, ##_args); \
165 struct CoalescedMemoryRange
{
167 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
170 struct MemoryRegionIoeventfd
{
177 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a
,
178 MemoryRegionIoeventfd b
)
180 if (int128_lt(a
.addr
.start
, b
.addr
.start
)) {
182 } else if (int128_gt(a
.addr
.start
, b
.addr
.start
)) {
184 } else if (int128_lt(a
.addr
.size
, b
.addr
.size
)) {
186 } else if (int128_gt(a
.addr
.size
, b
.addr
.size
)) {
188 } else if (a
.match_data
< b
.match_data
) {
190 } else if (a
.match_data
> b
.match_data
) {
192 } else if (a
.match_data
) {
193 if (a
.data
< b
.data
) {
195 } else if (a
.data
> b
.data
) {
201 } else if (a
.e
> b
.e
) {
207 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a
,
208 MemoryRegionIoeventfd b
)
210 return !memory_region_ioeventfd_before(a
, b
)
211 && !memory_region_ioeventfd_before(b
, a
);
214 typedef struct FlatRange FlatRange
;
215 typedef struct FlatView FlatView
;
217 /* Range of memory in the global map. Addresses are absolute. */
220 hwaddr offset_in_region
;
222 uint8_t dirty_log_mask
;
227 /* Flattened global view of current active memory hierarchy. Kept in sorted
235 unsigned nr_allocated
;
238 typedef struct AddressSpaceOps AddressSpaceOps
;
240 #define FOR_EACH_FLAT_RANGE(var, view) \
241 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
243 static inline MemoryRegionSection
244 section_from_flat_range(FlatRange
*fr
, AddressSpace
*as
)
246 return (MemoryRegionSection
) {
249 .offset_within_region
= fr
->offset_in_region
,
250 .size
= fr
->addr
.size
,
251 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
252 .readonly
= fr
->readonly
,
256 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
258 return a
->mr
== b
->mr
259 && addrrange_equal(a
->addr
, b
->addr
)
260 && a
->offset_in_region
== b
->offset_in_region
261 && a
->romd_mode
== b
->romd_mode
262 && a
->readonly
== b
->readonly
;
265 static void flatview_init(FlatView
*view
)
270 view
->nr_allocated
= 0;
273 /* Insert a range into a given position. Caller is responsible for maintaining
276 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
278 if (view
->nr
== view
->nr_allocated
) {
279 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
280 view
->ranges
= g_realloc(view
->ranges
,
281 view
->nr_allocated
* sizeof(*view
->ranges
));
283 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
284 (view
->nr
- pos
) * sizeof(FlatRange
));
285 view
->ranges
[pos
] = *range
;
286 memory_region_ref(range
->mr
);
290 static void flatview_destroy(FlatView
*view
)
294 for (i
= 0; i
< view
->nr
; i
++) {
295 memory_region_unref(view
->ranges
[i
].mr
);
297 g_free(view
->ranges
);
301 static void flatview_ref(FlatView
*view
)
303 atomic_inc(&view
->ref
);
306 static void flatview_unref(FlatView
*view
)
308 if (atomic_fetch_dec(&view
->ref
) == 1) {
309 flatview_destroy(view
);
313 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
315 return int128_eq(addrrange_end(r1
->addr
), r2
->addr
.start
)
317 && int128_eq(int128_add(int128_make64(r1
->offset_in_region
),
319 int128_make64(r2
->offset_in_region
))
320 && r1
->dirty_log_mask
== r2
->dirty_log_mask
321 && r1
->romd_mode
== r2
->romd_mode
322 && r1
->readonly
== r2
->readonly
;
325 /* Attempt to simplify a view by merging adjacent ranges */
326 static void flatview_simplify(FlatView
*view
)
331 while (i
< view
->nr
) {
334 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
335 int128_addto(&view
->ranges
[i
].addr
.size
, view
->ranges
[j
].addr
.size
);
339 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
340 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
345 static bool memory_region_big_endian(MemoryRegion
*mr
)
347 #ifdef TARGET_WORDS_BIGENDIAN
348 return mr
->ops
->endianness
!= DEVICE_LITTLE_ENDIAN
;
350 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
354 static bool memory_region_wrong_endianness(MemoryRegion
*mr
)
356 #ifdef TARGET_WORDS_BIGENDIAN
357 return mr
->ops
->endianness
== DEVICE_LITTLE_ENDIAN
;
359 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
363 static void adjust_endianness(MemoryRegion
*mr
, uint64_t *data
, unsigned size
)
365 if (memory_region_wrong_endianness(mr
)) {
370 *data
= bswap16(*data
);
373 *data
= bswap32(*data
);
376 *data
= bswap64(*data
);
384 static hwaddr
memory_region_to_absolute_addr(MemoryRegion
*mr
, hwaddr offset
)
387 hwaddr abs_addr
= offset
;
389 abs_addr
+= mr
->addr
;
390 for (root
= mr
; root
->container
; ) {
391 root
= root
->container
;
392 abs_addr
+= root
->addr
;
398 static int get_cpu_index(void)
401 return current_cpu
->cpu_index
;
406 static MemTxResult
memory_region_oldmmio_read_accessor(MemoryRegion
*mr
,
416 tmp
= mr
->ops
->old_mmio
.read
[ctz32(size
)](mr
->opaque
, addr
);
418 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
419 } else if (mr
== &io_mem_notdirty
) {
420 /* Accesses to code which has previously been translated into a TB show
421 * up in the MMIO path, as accesses to the io_mem_notdirty
423 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
424 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
425 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
426 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
428 *value
|= (tmp
& mask
) << shift
;
432 static MemTxResult
memory_region_read_accessor(MemoryRegion
*mr
,
442 tmp
= mr
->ops
->read(mr
->opaque
, addr
, size
);
444 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
445 } else if (mr
== &io_mem_notdirty
) {
446 /* Accesses to code which has previously been translated into a TB show
447 * up in the MMIO path, as accesses to the io_mem_notdirty
449 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
450 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
451 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
452 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
454 *value
|= (tmp
& mask
) << shift
;
458 static MemTxResult
memory_region_read_with_attrs_accessor(MemoryRegion
*mr
,
469 r
= mr
->ops
->read_with_attrs(mr
->opaque
, addr
, &tmp
, size
, attrs
);
471 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
472 } else if (mr
== &io_mem_notdirty
) {
473 /* Accesses to code which has previously been translated into a TB show
474 * up in the MMIO path, as accesses to the io_mem_notdirty
476 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
477 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
478 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
479 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
481 *value
|= (tmp
& mask
) << shift
;
485 static MemTxResult
memory_region_oldmmio_write_accessor(MemoryRegion
*mr
,
495 tmp
= (*value
>> shift
) & mask
;
497 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
498 } else if (mr
== &io_mem_notdirty
) {
499 /* Accesses to code which has previously been translated into a TB show
500 * up in the MMIO path, as accesses to the io_mem_notdirty
502 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
503 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
504 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
505 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
507 mr
->ops
->old_mmio
.write
[ctz32(size
)](mr
->opaque
, addr
, tmp
);
511 static MemTxResult
memory_region_write_accessor(MemoryRegion
*mr
,
521 tmp
= (*value
>> shift
) & mask
;
523 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
524 } else if (mr
== &io_mem_notdirty
) {
525 /* Accesses to code which has previously been translated into a TB show
526 * up in the MMIO path, as accesses to the io_mem_notdirty
528 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
529 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
530 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
531 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
533 mr
->ops
->write(mr
->opaque
, addr
, tmp
, size
);
537 static MemTxResult
memory_region_write_with_attrs_accessor(MemoryRegion
*mr
,
547 tmp
= (*value
>> shift
) & mask
;
549 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
550 } else if (mr
== &io_mem_notdirty
) {
551 /* Accesses to code which has previously been translated into a TB show
552 * up in the MMIO path, as accesses to the io_mem_notdirty
554 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
555 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
556 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
557 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
559 return mr
->ops
->write_with_attrs(mr
->opaque
, addr
, tmp
, size
, attrs
);
562 static MemTxResult
access_with_adjusted_size(hwaddr addr
,
565 unsigned access_size_min
,
566 unsigned access_size_max
,
567 MemTxResult (*access
)(MemoryRegion
*mr
,
577 uint64_t access_mask
;
578 unsigned access_size
;
580 MemTxResult r
= MEMTX_OK
;
582 if (!access_size_min
) {
585 if (!access_size_max
) {
589 /* FIXME: support unaligned access? */
590 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
591 access_mask
= -1ULL >> (64 - access_size
* 8);
592 if (memory_region_big_endian(mr
)) {
593 for (i
= 0; i
< size
; i
+= access_size
) {
594 r
|= access(mr
, addr
+ i
, value
, access_size
,
595 (size
- access_size
- i
) * 8, access_mask
, attrs
);
598 for (i
= 0; i
< size
; i
+= access_size
) {
599 r
|= access(mr
, addr
+ i
, value
, access_size
, i
* 8,
606 static AddressSpace
*memory_region_to_address_space(MemoryRegion
*mr
)
610 while (mr
->container
) {
613 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
614 if (mr
== as
->root
) {
621 /* Render a memory region into the global view. Ranges in @view obscure
624 static void render_memory_region(FlatView
*view
,
630 MemoryRegion
*subregion
;
632 hwaddr offset_in_region
;
642 int128_addto(&base
, int128_make64(mr
->addr
));
643 readonly
|= mr
->readonly
;
645 tmp
= addrrange_make(base
, mr
->size
);
647 if (!addrrange_intersects(tmp
, clip
)) {
651 clip
= addrrange_intersection(tmp
, clip
);
654 int128_subfrom(&base
, int128_make64(mr
->alias
->addr
));
655 int128_subfrom(&base
, int128_make64(mr
->alias_offset
));
656 render_memory_region(view
, mr
->alias
, base
, clip
, readonly
);
660 /* Render subregions in priority order. */
661 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
662 render_memory_region(view
, subregion
, base
, clip
, readonly
);
665 if (!mr
->terminates
) {
669 offset_in_region
= int128_get64(int128_sub(clip
.start
, base
));
674 fr
.dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
675 fr
.romd_mode
= mr
->romd_mode
;
676 fr
.readonly
= readonly
;
678 /* Render the region itself into any gaps left by the current view. */
679 for (i
= 0; i
< view
->nr
&& int128_nz(remain
); ++i
) {
680 if (int128_ge(base
, addrrange_end(view
->ranges
[i
].addr
))) {
683 if (int128_lt(base
, view
->ranges
[i
].addr
.start
)) {
684 now
= int128_min(remain
,
685 int128_sub(view
->ranges
[i
].addr
.start
, base
));
686 fr
.offset_in_region
= offset_in_region
;
687 fr
.addr
= addrrange_make(base
, now
);
688 flatview_insert(view
, i
, &fr
);
690 int128_addto(&base
, now
);
691 offset_in_region
+= int128_get64(now
);
692 int128_subfrom(&remain
, now
);
694 now
= int128_sub(int128_min(int128_add(base
, remain
),
695 addrrange_end(view
->ranges
[i
].addr
)),
697 int128_addto(&base
, now
);
698 offset_in_region
+= int128_get64(now
);
699 int128_subfrom(&remain
, now
);
701 if (int128_nz(remain
)) {
702 fr
.offset_in_region
= offset_in_region
;
703 fr
.addr
= addrrange_make(base
, remain
);
704 flatview_insert(view
, i
, &fr
);
708 /* Render a memory topology into a list of disjoint absolute ranges. */
709 static FlatView
*generate_memory_topology(MemoryRegion
*mr
)
713 view
= g_new(FlatView
, 1);
717 render_memory_region(view
, mr
, int128_zero(),
718 addrrange_make(int128_zero(), int128_2_64()), false);
720 flatview_simplify(view
);
725 static void address_space_add_del_ioeventfds(AddressSpace
*as
,
726 MemoryRegionIoeventfd
*fds_new
,
728 MemoryRegionIoeventfd
*fds_old
,
732 MemoryRegionIoeventfd
*fd
;
733 MemoryRegionSection section
;
735 /* Generate a symmetric difference of the old and new fd sets, adding
736 * and deleting as necessary.
740 while (iold
< fds_old_nb
|| inew
< fds_new_nb
) {
741 if (iold
< fds_old_nb
742 && (inew
== fds_new_nb
743 || memory_region_ioeventfd_before(fds_old
[iold
],
746 section
= (MemoryRegionSection
) {
748 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
749 .size
= fd
->addr
.size
,
751 MEMORY_LISTENER_CALL(eventfd_del
, Forward
, §ion
,
752 fd
->match_data
, fd
->data
, fd
->e
);
754 } else if (inew
< fds_new_nb
755 && (iold
== fds_old_nb
756 || memory_region_ioeventfd_before(fds_new
[inew
],
759 section
= (MemoryRegionSection
) {
761 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
762 .size
= fd
->addr
.size
,
764 MEMORY_LISTENER_CALL(eventfd_add
, Reverse
, §ion
,
765 fd
->match_data
, fd
->data
, fd
->e
);
774 static FlatView
*address_space_get_flatview(AddressSpace
*as
)
779 view
= atomic_rcu_read(&as
->current_map
);
785 static void address_space_update_ioeventfds(AddressSpace
*as
)
789 unsigned ioeventfd_nb
= 0;
790 MemoryRegionIoeventfd
*ioeventfds
= NULL
;
794 view
= address_space_get_flatview(as
);
795 FOR_EACH_FLAT_RANGE(fr
, view
) {
796 for (i
= 0; i
< fr
->mr
->ioeventfd_nb
; ++i
) {
797 tmp
= addrrange_shift(fr
->mr
->ioeventfds
[i
].addr
,
798 int128_sub(fr
->addr
.start
,
799 int128_make64(fr
->offset_in_region
)));
800 if (addrrange_intersects(fr
->addr
, tmp
)) {
802 ioeventfds
= g_realloc(ioeventfds
,
803 ioeventfd_nb
* sizeof(*ioeventfds
));
804 ioeventfds
[ioeventfd_nb
-1] = fr
->mr
->ioeventfds
[i
];
805 ioeventfds
[ioeventfd_nb
-1].addr
= tmp
;
810 address_space_add_del_ioeventfds(as
, ioeventfds
, ioeventfd_nb
,
811 as
->ioeventfds
, as
->ioeventfd_nb
);
813 g_free(as
->ioeventfds
);
814 as
->ioeventfds
= ioeventfds
;
815 as
->ioeventfd_nb
= ioeventfd_nb
;
816 flatview_unref(view
);
819 static void address_space_update_topology_pass(AddressSpace
*as
,
820 const FlatView
*old_view
,
821 const FlatView
*new_view
,
825 FlatRange
*frold
, *frnew
;
827 /* Generate a symmetric difference of the old and new memory maps.
828 * Kill ranges in the old map, and instantiate ranges in the new map.
831 while (iold
< old_view
->nr
|| inew
< new_view
->nr
) {
832 if (iold
< old_view
->nr
) {
833 frold
= &old_view
->ranges
[iold
];
837 if (inew
< new_view
->nr
) {
838 frnew
= &new_view
->ranges
[inew
];
845 || int128_lt(frold
->addr
.start
, frnew
->addr
.start
)
846 || (int128_eq(frold
->addr
.start
, frnew
->addr
.start
)
847 && !flatrange_equal(frold
, frnew
)))) {
848 /* In old but not in new, or in both but attributes changed. */
851 MEMORY_LISTENER_UPDATE_REGION(frold
, as
, Reverse
, region_del
);
855 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
856 /* In both and unchanged (except logging may have changed) */
859 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_nop
);
860 if (frnew
->dirty_log_mask
& ~frold
->dirty_log_mask
) {
861 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, log_start
,
862 frold
->dirty_log_mask
,
863 frnew
->dirty_log_mask
);
865 if (frold
->dirty_log_mask
& ~frnew
->dirty_log_mask
) {
866 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Reverse
, log_stop
,
867 frold
->dirty_log_mask
,
868 frnew
->dirty_log_mask
);
878 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_add
);
887 static void address_space_update_topology(AddressSpace
*as
)
889 FlatView
*old_view
= address_space_get_flatview(as
);
890 FlatView
*new_view
= generate_memory_topology(as
->root
);
892 address_space_update_topology_pass(as
, old_view
, new_view
, false);
893 address_space_update_topology_pass(as
, old_view
, new_view
, true);
895 /* Writes are protected by the BQL. */
896 atomic_rcu_set(&as
->current_map
, new_view
);
897 call_rcu(old_view
, flatview_unref
, rcu
);
899 /* Note that all the old MemoryRegions are still alive up to this
900 * point. This relieves most MemoryListeners from the need to
901 * ref/unref the MemoryRegions they get---unless they use them
902 * outside the iothread mutex, in which case precise reference
903 * counting is necessary.
905 flatview_unref(old_view
);
907 address_space_update_ioeventfds(as
);
910 void memory_region_transaction_begin(void)
912 qemu_flush_coalesced_mmio_buffer();
913 ++memory_region_transaction_depth
;
916 static void memory_region_clear_pending(void)
918 memory_region_update_pending
= false;
919 ioeventfd_update_pending
= false;
922 void memory_region_transaction_commit(void)
926 assert(memory_region_transaction_depth
);
927 --memory_region_transaction_depth
;
928 if (!memory_region_transaction_depth
) {
929 if (memory_region_update_pending
) {
930 MEMORY_LISTENER_CALL_GLOBAL(begin
, Forward
);
932 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
933 address_space_update_topology(as
);
936 MEMORY_LISTENER_CALL_GLOBAL(commit
, Forward
);
937 } else if (ioeventfd_update_pending
) {
938 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
939 address_space_update_ioeventfds(as
);
942 memory_region_clear_pending();
946 static void memory_region_destructor_none(MemoryRegion
*mr
)
950 static void memory_region_destructor_ram(MemoryRegion
*mr
)
952 qemu_ram_free(mr
->ram_block
);
955 static bool memory_region_need_escape(char c
)
957 return c
== '/' || c
== '[' || c
== '\\' || c
== ']';
960 static char *memory_region_escape_name(const char *name
)
967 for (p
= name
; *p
; p
++) {
968 bytes
+= memory_region_need_escape(*p
) ? 4 : 1;
970 if (bytes
== p
- name
) {
971 return g_memdup(name
, bytes
+ 1);
974 escaped
= g_malloc(bytes
+ 1);
975 for (p
= name
, q
= escaped
; *p
; p
++) {
977 if (unlikely(memory_region_need_escape(c
))) {
980 *q
++ = "0123456789abcdef"[c
>> 4];
981 c
= "0123456789abcdef"[c
& 15];
989 void memory_region_init(MemoryRegion
*mr
,
994 object_initialize(mr
, sizeof(*mr
), TYPE_MEMORY_REGION
);
995 mr
->size
= int128_make64(size
);
996 if (size
== UINT64_MAX
) {
997 mr
->size
= int128_2_64();
999 mr
->name
= g_strdup(name
);
1001 mr
->ram_block
= NULL
;
1004 char *escaped_name
= memory_region_escape_name(name
);
1005 char *name_array
= g_strdup_printf("%s[*]", escaped_name
);
1008 owner
= container_get(qdev_get_machine(), "/unattached");
1011 object_property_add_child(owner
, name_array
, OBJECT(mr
), &error_abort
);
1012 object_unref(OBJECT(mr
));
1014 g_free(escaped_name
);
1018 static void memory_region_get_addr(Object
*obj
, Visitor
*v
, const char *name
,
1019 void *opaque
, Error
**errp
)
1021 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1022 uint64_t value
= mr
->addr
;
1024 visit_type_uint64(v
, name
, &value
, errp
);
1027 static void memory_region_get_container(Object
*obj
, Visitor
*v
,
1028 const char *name
, void *opaque
,
1031 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1032 gchar
*path
= (gchar
*)"";
1034 if (mr
->container
) {
1035 path
= object_get_canonical_path(OBJECT(mr
->container
));
1037 visit_type_str(v
, name
, &path
, errp
);
1038 if (mr
->container
) {
1043 static Object
*memory_region_resolve_container(Object
*obj
, void *opaque
,
1046 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1048 return OBJECT(mr
->container
);
1051 static void memory_region_get_priority(Object
*obj
, Visitor
*v
,
1052 const char *name
, void *opaque
,
1055 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1056 int32_t value
= mr
->priority
;
1058 visit_type_int32(v
, name
, &value
, errp
);
1061 static void memory_region_get_size(Object
*obj
, Visitor
*v
, const char *name
,
1062 void *opaque
, Error
**errp
)
1064 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1065 uint64_t value
= memory_region_size(mr
);
1067 visit_type_uint64(v
, name
, &value
, errp
);
1070 static void memory_region_initfn(Object
*obj
)
1072 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1075 mr
->ops
= &unassigned_mem_ops
;
1077 mr
->romd_mode
= true;
1078 mr
->global_locking
= true;
1079 mr
->destructor
= memory_region_destructor_none
;
1080 QTAILQ_INIT(&mr
->subregions
);
1081 QTAILQ_INIT(&mr
->coalesced
);
1083 op
= object_property_add(OBJECT(mr
), "container",
1084 "link<" TYPE_MEMORY_REGION
">",
1085 memory_region_get_container
,
1086 NULL
, /* memory_region_set_container */
1087 NULL
, NULL
, &error_abort
);
1088 op
->resolve
= memory_region_resolve_container
;
1090 object_property_add(OBJECT(mr
), "addr", "uint64",
1091 memory_region_get_addr
,
1092 NULL
, /* memory_region_set_addr */
1093 NULL
, NULL
, &error_abort
);
1094 object_property_add(OBJECT(mr
), "priority", "uint32",
1095 memory_region_get_priority
,
1096 NULL
, /* memory_region_set_priority */
1097 NULL
, NULL
, &error_abort
);
1098 object_property_add(OBJECT(mr
), "size", "uint64",
1099 memory_region_get_size
,
1100 NULL
, /* memory_region_set_size, */
1101 NULL
, NULL
, &error_abort
);
1104 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1107 #ifdef DEBUG_UNASSIGNED
1108 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
1110 if (current_cpu
!= NULL
) {
1111 cpu_unassigned_access(current_cpu
, addr
, false, false, 0, size
);
1116 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1117 uint64_t val
, unsigned size
)
1119 #ifdef DEBUG_UNASSIGNED
1120 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
1122 if (current_cpu
!= NULL
) {
1123 cpu_unassigned_access(current_cpu
, addr
, true, false, 0, size
);
1127 static bool unassigned_mem_accepts(void *opaque
, hwaddr addr
,
1128 unsigned size
, bool is_write
)
1133 const MemoryRegionOps unassigned_mem_ops
= {
1134 .valid
.accepts
= unassigned_mem_accepts
,
1135 .endianness
= DEVICE_NATIVE_ENDIAN
,
1138 bool memory_region_access_valid(MemoryRegion
*mr
,
1143 int access_size_min
, access_size_max
;
1146 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
1150 if (!mr
->ops
->valid
.accepts
) {
1154 access_size_min
= mr
->ops
->valid
.min_access_size
;
1155 if (!mr
->ops
->valid
.min_access_size
) {
1156 access_size_min
= 1;
1159 access_size_max
= mr
->ops
->valid
.max_access_size
;
1160 if (!mr
->ops
->valid
.max_access_size
) {
1161 access_size_max
= 4;
1164 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
1165 for (i
= 0; i
< size
; i
+= access_size
) {
1166 if (!mr
->ops
->valid
.accepts(mr
->opaque
, addr
+ i
, access_size
,
1175 static MemTxResult
memory_region_dispatch_read1(MemoryRegion
*mr
,
1183 if (mr
->ops
->read
) {
1184 return access_with_adjusted_size(addr
, pval
, size
,
1185 mr
->ops
->impl
.min_access_size
,
1186 mr
->ops
->impl
.max_access_size
,
1187 memory_region_read_accessor
,
1189 } else if (mr
->ops
->read_with_attrs
) {
1190 return access_with_adjusted_size(addr
, pval
, size
,
1191 mr
->ops
->impl
.min_access_size
,
1192 mr
->ops
->impl
.max_access_size
,
1193 memory_region_read_with_attrs_accessor
,
1196 return access_with_adjusted_size(addr
, pval
, size
, 1, 4,
1197 memory_region_oldmmio_read_accessor
,
1202 MemTxResult
memory_region_dispatch_read(MemoryRegion
*mr
,
1210 if (!memory_region_access_valid(mr
, addr
, size
, false)) {
1211 *pval
= unassigned_mem_read(mr
, addr
, size
);
1212 return MEMTX_DECODE_ERROR
;
1215 r
= memory_region_dispatch_read1(mr
, addr
, pval
, size
, attrs
);
1216 adjust_endianness(mr
, pval
, size
);
1220 /* Return true if an eventfd was signalled */
1221 static bool memory_region_dispatch_write_eventfds(MemoryRegion
*mr
,
1227 MemoryRegionIoeventfd ioeventfd
= {
1228 .addr
= addrrange_make(int128_make64(addr
), int128_make64(size
)),
1233 for (i
= 0; i
< mr
->ioeventfd_nb
; i
++) {
1234 ioeventfd
.match_data
= mr
->ioeventfds
[i
].match_data
;
1235 ioeventfd
.e
= mr
->ioeventfds
[i
].e
;
1237 if (memory_region_ioeventfd_equal(ioeventfd
, mr
->ioeventfds
[i
])) {
1238 event_notifier_set(ioeventfd
.e
);
1246 MemTxResult
memory_region_dispatch_write(MemoryRegion
*mr
,
1252 if (!memory_region_access_valid(mr
, addr
, size
, true)) {
1253 unassigned_mem_write(mr
, addr
, data
, size
);
1254 return MEMTX_DECODE_ERROR
;
1257 adjust_endianness(mr
, &data
, size
);
1259 if ((!kvm_eventfds_enabled()) &&
1260 memory_region_dispatch_write_eventfds(mr
, addr
, data
, size
, attrs
)) {
1264 if (mr
->ops
->write
) {
1265 return access_with_adjusted_size(addr
, &data
, size
,
1266 mr
->ops
->impl
.min_access_size
,
1267 mr
->ops
->impl
.max_access_size
,
1268 memory_region_write_accessor
, mr
,
1270 } else if (mr
->ops
->write_with_attrs
) {
1272 access_with_adjusted_size(addr
, &data
, size
,
1273 mr
->ops
->impl
.min_access_size
,
1274 mr
->ops
->impl
.max_access_size
,
1275 memory_region_write_with_attrs_accessor
,
1278 return access_with_adjusted_size(addr
, &data
, size
, 1, 4,
1279 memory_region_oldmmio_write_accessor
,
1284 void memory_region_init_io(MemoryRegion
*mr
,
1286 const MemoryRegionOps
*ops
,
1291 memory_region_init(mr
, owner
, name
, size
);
1292 mr
->ops
= ops
? ops
: &unassigned_mem_ops
;
1293 mr
->opaque
= opaque
;
1294 mr
->terminates
= true;
1297 void memory_region_init_ram(MemoryRegion
*mr
,
1303 memory_region_init(mr
, owner
, name
, size
);
1305 mr
->terminates
= true;
1306 mr
->destructor
= memory_region_destructor_ram
;
1307 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1308 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1311 void memory_region_init_resizeable_ram(MemoryRegion
*mr
,
1316 void (*resized
)(const char*,
1321 memory_region_init(mr
, owner
, name
, size
);
1323 mr
->terminates
= true;
1324 mr
->destructor
= memory_region_destructor_ram
;
1325 mr
->ram_block
= qemu_ram_alloc_resizeable(size
, max_size
, resized
,
1327 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1331 void memory_region_init_ram_from_file(MemoryRegion
*mr
,
1332 struct Object
*owner
,
1339 memory_region_init(mr
, owner
, name
, size
);
1341 mr
->terminates
= true;
1342 mr
->destructor
= memory_region_destructor_ram
;
1343 mr
->ram_block
= qemu_ram_alloc_from_file(size
, mr
, share
, path
, errp
);
1344 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1348 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
1354 memory_region_init(mr
, owner
, name
, size
);
1356 mr
->terminates
= true;
1357 mr
->destructor
= memory_region_destructor_ram
;
1358 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1360 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1361 assert(ptr
!= NULL
);
1362 mr
->ram_block
= qemu_ram_alloc_from_ptr(size
, ptr
, mr
, &error_fatal
);
1365 void memory_region_set_skip_dump(MemoryRegion
*mr
)
1367 mr
->skip_dump
= true;
1370 void memory_region_init_alias(MemoryRegion
*mr
,
1377 memory_region_init(mr
, owner
, name
, size
);
1379 mr
->alias_offset
= offset
;
1382 void memory_region_init_rom(MemoryRegion
*mr
,
1383 struct Object
*owner
,
1388 memory_region_init(mr
, owner
, name
, size
);
1390 mr
->readonly
= true;
1391 mr
->terminates
= true;
1392 mr
->destructor
= memory_region_destructor_ram
;
1393 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1394 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1397 void memory_region_init_rom_device(MemoryRegion
*mr
,
1399 const MemoryRegionOps
*ops
,
1406 memory_region_init(mr
, owner
, name
, size
);
1408 mr
->opaque
= opaque
;
1409 mr
->terminates
= true;
1410 mr
->rom_device
= true;
1411 mr
->destructor
= memory_region_destructor_ram
;
1412 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1415 void memory_region_init_iommu(MemoryRegion
*mr
,
1417 const MemoryRegionIOMMUOps
*ops
,
1421 memory_region_init(mr
, owner
, name
, size
);
1422 mr
->iommu_ops
= ops
,
1423 mr
->terminates
= true; /* then re-forwards */
1424 QLIST_INIT(&mr
->iommu_notify
);
1425 mr
->iommu_notify_flags
= IOMMU_NOTIFIER_NONE
;
1428 static void memory_region_finalize(Object
*obj
)
1430 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1432 assert(!mr
->container
);
1434 /* We know the region is not visible in any address space (it
1435 * does not have a container and cannot be a root either because
1436 * it has no references, so we can blindly clear mr->enabled.
1437 * memory_region_set_enabled instead could trigger a transaction
1438 * and cause an infinite loop.
1440 mr
->enabled
= false;
1441 memory_region_transaction_begin();
1442 while (!QTAILQ_EMPTY(&mr
->subregions
)) {
1443 MemoryRegion
*subregion
= QTAILQ_FIRST(&mr
->subregions
);
1444 memory_region_del_subregion(mr
, subregion
);
1446 memory_region_transaction_commit();
1449 memory_region_clear_coalescing(mr
);
1450 g_free((char *)mr
->name
);
1451 g_free(mr
->ioeventfds
);
1454 Object
*memory_region_owner(MemoryRegion
*mr
)
1456 Object
*obj
= OBJECT(mr
);
1460 void memory_region_ref(MemoryRegion
*mr
)
1462 /* MMIO callbacks most likely will access data that belongs
1463 * to the owner, hence the need to ref/unref the owner whenever
1464 * the memory region is in use.
1466 * The memory region is a child of its owner. As long as the
1467 * owner doesn't call unparent itself on the memory region,
1468 * ref-ing the owner will also keep the memory region alive.
1469 * Memory regions without an owner are supposed to never go away;
1470 * we do not ref/unref them because it slows down DMA sensibly.
1472 if (mr
&& mr
->owner
) {
1473 object_ref(mr
->owner
);
1477 void memory_region_unref(MemoryRegion
*mr
)
1479 if (mr
&& mr
->owner
) {
1480 object_unref(mr
->owner
);
1484 uint64_t memory_region_size(MemoryRegion
*mr
)
1486 if (int128_eq(mr
->size
, int128_2_64())) {
1489 return int128_get64(mr
->size
);
1492 const char *memory_region_name(const MemoryRegion
*mr
)
1495 ((MemoryRegion
*)mr
)->name
=
1496 object_get_canonical_path_component(OBJECT(mr
));
1501 bool memory_region_is_skip_dump(MemoryRegion
*mr
)
1503 return mr
->skip_dump
;
1506 uint8_t memory_region_get_dirty_log_mask(MemoryRegion
*mr
)
1508 uint8_t mask
= mr
->dirty_log_mask
;
1509 if (global_dirty_log
) {
1510 mask
|= (1 << DIRTY_MEMORY_MIGRATION
);
1515 bool memory_region_is_logging(MemoryRegion
*mr
, uint8_t client
)
1517 return memory_region_get_dirty_log_mask(mr
) & (1 << client
);
1520 static void memory_region_update_iommu_notify_flags(MemoryRegion
*mr
)
1522 IOMMUNotifierFlag flags
= IOMMU_NOTIFIER_NONE
;
1523 IOMMUNotifier
*iommu_notifier
;
1525 QLIST_FOREACH(iommu_notifier
, &mr
->iommu_notify
, node
) {
1526 flags
|= iommu_notifier
->notifier_flags
;
1529 if (flags
!= mr
->iommu_notify_flags
&&
1530 mr
->iommu_ops
->notify_flag_changed
) {
1531 mr
->iommu_ops
->notify_flag_changed(mr
, mr
->iommu_notify_flags
,
1535 mr
->iommu_notify_flags
= flags
;
1538 void memory_region_register_iommu_notifier(MemoryRegion
*mr
,
1541 /* We need to register for at least one bitfield */
1542 assert(n
->notifier_flags
!= IOMMU_NOTIFIER_NONE
);
1543 QLIST_INSERT_HEAD(&mr
->iommu_notify
, n
, node
);
1544 memory_region_update_iommu_notify_flags(mr
);
1547 uint64_t memory_region_iommu_get_min_page_size(MemoryRegion
*mr
)
1549 assert(memory_region_is_iommu(mr
));
1550 if (mr
->iommu_ops
&& mr
->iommu_ops
->get_min_page_size
) {
1551 return mr
->iommu_ops
->get_min_page_size(mr
);
1553 return TARGET_PAGE_SIZE
;
1556 void memory_region_iommu_replay(MemoryRegion
*mr
, IOMMUNotifier
*n
,
1559 hwaddr addr
, granularity
;
1560 IOMMUTLBEntry iotlb
;
1562 granularity
= memory_region_iommu_get_min_page_size(mr
);
1564 for (addr
= 0; addr
< memory_region_size(mr
); addr
+= granularity
) {
1565 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
1566 if (iotlb
.perm
!= IOMMU_NONE
) {
1567 n
->notify(n
, &iotlb
);
1570 /* if (2^64 - MR size) < granularity, it's possible to get an
1571 * infinite loop here. This should catch such a wraparound */
1572 if ((addr
+ granularity
) < addr
) {
1578 void memory_region_unregister_iommu_notifier(MemoryRegion
*mr
,
1581 QLIST_REMOVE(n
, node
);
1582 memory_region_update_iommu_notify_flags(mr
);
1585 void memory_region_notify_iommu(MemoryRegion
*mr
,
1586 IOMMUTLBEntry entry
)
1588 IOMMUNotifier
*iommu_notifier
;
1589 IOMMUNotifierFlag request_flags
;
1591 assert(memory_region_is_iommu(mr
));
1593 if (entry
.perm
& IOMMU_RW
) {
1594 request_flags
= IOMMU_NOTIFIER_MAP
;
1596 request_flags
= IOMMU_NOTIFIER_UNMAP
;
1599 QLIST_FOREACH(iommu_notifier
, &mr
->iommu_notify
, node
) {
1600 if (iommu_notifier
->notifier_flags
& request_flags
) {
1601 iommu_notifier
->notify(iommu_notifier
, &entry
);
1606 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
1608 uint8_t mask
= 1 << client
;
1609 uint8_t old_logging
;
1611 assert(client
== DIRTY_MEMORY_VGA
);
1612 old_logging
= mr
->vga_logging_count
;
1613 mr
->vga_logging_count
+= log
? 1 : -1;
1614 if (!!old_logging
== !!mr
->vga_logging_count
) {
1618 memory_region_transaction_begin();
1619 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
1620 memory_region_update_pending
|= mr
->enabled
;
1621 memory_region_transaction_commit();
1624 bool memory_region_get_dirty(MemoryRegion
*mr
, hwaddr addr
,
1625 hwaddr size
, unsigned client
)
1627 assert(mr
->ram_block
);
1628 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr
) + addr
,
1632 void memory_region_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
1635 assert(mr
->ram_block
);
1636 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
1638 memory_region_get_dirty_log_mask(mr
));
1641 bool memory_region_test_and_clear_dirty(MemoryRegion
*mr
, hwaddr addr
,
1642 hwaddr size
, unsigned client
)
1644 assert(mr
->ram_block
);
1645 return cpu_physical_memory_test_and_clear_dirty(
1646 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
1650 void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
)
1655 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1656 FlatView
*view
= address_space_get_flatview(as
);
1657 FOR_EACH_FLAT_RANGE(fr
, view
) {
1659 MEMORY_LISTENER_UPDATE_REGION(fr
, as
, Forward
, log_sync
);
1662 flatview_unref(view
);
1666 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
1668 if (mr
->readonly
!= readonly
) {
1669 memory_region_transaction_begin();
1670 mr
->readonly
= readonly
;
1671 memory_region_update_pending
|= mr
->enabled
;
1672 memory_region_transaction_commit();
1676 void memory_region_rom_device_set_romd(MemoryRegion
*mr
, bool romd_mode
)
1678 if (mr
->romd_mode
!= romd_mode
) {
1679 memory_region_transaction_begin();
1680 mr
->romd_mode
= romd_mode
;
1681 memory_region_update_pending
|= mr
->enabled
;
1682 memory_region_transaction_commit();
1686 void memory_region_reset_dirty(MemoryRegion
*mr
, hwaddr addr
,
1687 hwaddr size
, unsigned client
)
1689 assert(mr
->ram_block
);
1690 cpu_physical_memory_test_and_clear_dirty(
1691 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
1694 int memory_region_get_fd(MemoryRegion
*mr
)
1702 fd
= mr
->ram_block
->fd
;
1708 void memory_region_set_fd(MemoryRegion
*mr
, int fd
)
1714 mr
->ram_block
->fd
= fd
;
1718 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
1721 uint64_t offset
= 0;
1725 offset
+= mr
->alias_offset
;
1728 assert(mr
->ram_block
);
1729 ptr
= qemu_map_ram_ptr(mr
->ram_block
, offset
);
1735 MemoryRegion
*memory_region_from_host(void *ptr
, ram_addr_t
*offset
)
1739 block
= qemu_ram_block_from_host(ptr
, false, offset
);
1747 ram_addr_t
memory_region_get_ram_addr(MemoryRegion
*mr
)
1749 return mr
->ram_block
? mr
->ram_block
->offset
: RAM_ADDR_INVALID
;
1752 void memory_region_ram_resize(MemoryRegion
*mr
, ram_addr_t newsize
, Error
**errp
)
1754 assert(mr
->ram_block
);
1756 qemu_ram_resize(mr
->ram_block
, newsize
, errp
);
1759 static void memory_region_update_coalesced_range_as(MemoryRegion
*mr
, AddressSpace
*as
)
1763 CoalescedMemoryRange
*cmr
;
1765 MemoryRegionSection section
;
1767 view
= address_space_get_flatview(as
);
1768 FOR_EACH_FLAT_RANGE(fr
, view
) {
1770 section
= (MemoryRegionSection
) {
1771 .address_space
= as
,
1772 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
1773 .size
= fr
->addr
.size
,
1776 MEMORY_LISTENER_CALL(coalesced_mmio_del
, Reverse
, §ion
,
1777 int128_get64(fr
->addr
.start
),
1778 int128_get64(fr
->addr
.size
));
1779 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
1780 tmp
= addrrange_shift(cmr
->addr
,
1781 int128_sub(fr
->addr
.start
,
1782 int128_make64(fr
->offset_in_region
)));
1783 if (!addrrange_intersects(tmp
, fr
->addr
)) {
1786 tmp
= addrrange_intersection(tmp
, fr
->addr
);
1787 MEMORY_LISTENER_CALL(coalesced_mmio_add
, Forward
, §ion
,
1788 int128_get64(tmp
.start
),
1789 int128_get64(tmp
.size
));
1793 flatview_unref(view
);
1796 static void memory_region_update_coalesced_range(MemoryRegion
*mr
)
1800 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1801 memory_region_update_coalesced_range_as(mr
, as
);
1805 void memory_region_set_coalescing(MemoryRegion
*mr
)
1807 memory_region_clear_coalescing(mr
);
1808 memory_region_add_coalescing(mr
, 0, int128_get64(mr
->size
));
1811 void memory_region_add_coalescing(MemoryRegion
*mr
,
1815 CoalescedMemoryRange
*cmr
= g_malloc(sizeof(*cmr
));
1817 cmr
->addr
= addrrange_make(int128_make64(offset
), int128_make64(size
));
1818 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
1819 memory_region_update_coalesced_range(mr
);
1820 memory_region_set_flush_coalesced(mr
);
1823 void memory_region_clear_coalescing(MemoryRegion
*mr
)
1825 CoalescedMemoryRange
*cmr
;
1826 bool updated
= false;
1828 qemu_flush_coalesced_mmio_buffer();
1829 mr
->flush_coalesced_mmio
= false;
1831 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
1832 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
1833 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
1839 memory_region_update_coalesced_range(mr
);
1843 void memory_region_set_flush_coalesced(MemoryRegion
*mr
)
1845 mr
->flush_coalesced_mmio
= true;
1848 void memory_region_clear_flush_coalesced(MemoryRegion
*mr
)
1850 qemu_flush_coalesced_mmio_buffer();
1851 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
1852 mr
->flush_coalesced_mmio
= false;
1856 void memory_region_set_global_locking(MemoryRegion
*mr
)
1858 mr
->global_locking
= true;
1861 void memory_region_clear_global_locking(MemoryRegion
*mr
)
1863 mr
->global_locking
= false;
1866 static bool userspace_eventfd_warning
;
1868 void memory_region_add_eventfd(MemoryRegion
*mr
,
1875 MemoryRegionIoeventfd mrfd
= {
1876 .addr
.start
= int128_make64(addr
),
1877 .addr
.size
= int128_make64(size
),
1878 .match_data
= match_data
,
1884 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
1885 userspace_eventfd_warning
))) {
1886 userspace_eventfd_warning
= true;
1887 error_report("Using eventfd without MMIO binding in KVM. "
1888 "Suboptimal performance expected");
1892 adjust_endianness(mr
, &mrfd
.data
, size
);
1894 memory_region_transaction_begin();
1895 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
1896 if (memory_region_ioeventfd_before(mrfd
, mr
->ioeventfds
[i
])) {
1901 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
1902 sizeof(*mr
->ioeventfds
) * mr
->ioeventfd_nb
);
1903 memmove(&mr
->ioeventfds
[i
+1], &mr
->ioeventfds
[i
],
1904 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
-1 - i
));
1905 mr
->ioeventfds
[i
] = mrfd
;
1906 ioeventfd_update_pending
|= mr
->enabled
;
1907 memory_region_transaction_commit();
1910 void memory_region_del_eventfd(MemoryRegion
*mr
,
1917 MemoryRegionIoeventfd mrfd
= {
1918 .addr
.start
= int128_make64(addr
),
1919 .addr
.size
= int128_make64(size
),
1920 .match_data
= match_data
,
1927 adjust_endianness(mr
, &mrfd
.data
, size
);
1929 memory_region_transaction_begin();
1930 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
1931 if (memory_region_ioeventfd_equal(mrfd
, mr
->ioeventfds
[i
])) {
1935 assert(i
!= mr
->ioeventfd_nb
);
1936 memmove(&mr
->ioeventfds
[i
], &mr
->ioeventfds
[i
+1],
1937 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
- (i
+1)));
1939 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
1940 sizeof(*mr
->ioeventfds
)*mr
->ioeventfd_nb
+ 1);
1941 ioeventfd_update_pending
|= mr
->enabled
;
1942 memory_region_transaction_commit();
1945 static void memory_region_update_container_subregions(MemoryRegion
*subregion
)
1947 MemoryRegion
*mr
= subregion
->container
;
1948 MemoryRegion
*other
;
1950 memory_region_transaction_begin();
1952 memory_region_ref(subregion
);
1953 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
1954 if (subregion
->priority
>= other
->priority
) {
1955 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
1959 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
1961 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
1962 memory_region_transaction_commit();
1965 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
1967 MemoryRegion
*subregion
)
1969 assert(!subregion
->container
);
1970 subregion
->container
= mr
;
1971 subregion
->addr
= offset
;
1972 memory_region_update_container_subregions(subregion
);
1975 void memory_region_add_subregion(MemoryRegion
*mr
,
1977 MemoryRegion
*subregion
)
1979 subregion
->priority
= 0;
1980 memory_region_add_subregion_common(mr
, offset
, subregion
);
1983 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
1985 MemoryRegion
*subregion
,
1988 subregion
->priority
= priority
;
1989 memory_region_add_subregion_common(mr
, offset
, subregion
);
1992 void memory_region_del_subregion(MemoryRegion
*mr
,
1993 MemoryRegion
*subregion
)
1995 memory_region_transaction_begin();
1996 assert(subregion
->container
== mr
);
1997 subregion
->container
= NULL
;
1998 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
1999 memory_region_unref(subregion
);
2000 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2001 memory_region_transaction_commit();
2004 void memory_region_set_enabled(MemoryRegion
*mr
, bool enabled
)
2006 if (enabled
== mr
->enabled
) {
2009 memory_region_transaction_begin();
2010 mr
->enabled
= enabled
;
2011 memory_region_update_pending
= true;
2012 memory_region_transaction_commit();
2015 void memory_region_set_size(MemoryRegion
*mr
, uint64_t size
)
2017 Int128 s
= int128_make64(size
);
2019 if (size
== UINT64_MAX
) {
2022 if (int128_eq(s
, mr
->size
)) {
2025 memory_region_transaction_begin();
2027 memory_region_update_pending
= true;
2028 memory_region_transaction_commit();
2031 static void memory_region_readd_subregion(MemoryRegion
*mr
)
2033 MemoryRegion
*container
= mr
->container
;
2036 memory_region_transaction_begin();
2037 memory_region_ref(mr
);
2038 memory_region_del_subregion(container
, mr
);
2039 mr
->container
= container
;
2040 memory_region_update_container_subregions(mr
);
2041 memory_region_unref(mr
);
2042 memory_region_transaction_commit();
2046 void memory_region_set_address(MemoryRegion
*mr
, hwaddr addr
)
2048 if (addr
!= mr
->addr
) {
2050 memory_region_readd_subregion(mr
);
2054 void memory_region_set_alias_offset(MemoryRegion
*mr
, hwaddr offset
)
2058 if (offset
== mr
->alias_offset
) {
2062 memory_region_transaction_begin();
2063 mr
->alias_offset
= offset
;
2064 memory_region_update_pending
|= mr
->enabled
;
2065 memory_region_transaction_commit();
2068 uint64_t memory_region_get_alignment(const MemoryRegion
*mr
)
2073 static int cmp_flatrange_addr(const void *addr_
, const void *fr_
)
2075 const AddrRange
*addr
= addr_
;
2076 const FlatRange
*fr
= fr_
;
2078 if (int128_le(addrrange_end(*addr
), fr
->addr
.start
)) {
2080 } else if (int128_ge(addr
->start
, addrrange_end(fr
->addr
))) {
2086 static FlatRange
*flatview_lookup(FlatView
*view
, AddrRange addr
)
2088 return bsearch(&addr
, view
->ranges
, view
->nr
,
2089 sizeof(FlatRange
), cmp_flatrange_addr
);
2092 bool memory_region_is_mapped(MemoryRegion
*mr
)
2094 return mr
->container
? true : false;
2097 /* Same as memory_region_find, but it does not add a reference to the
2098 * returned region. It must be called from an RCU critical section.
2100 static MemoryRegionSection
memory_region_find_rcu(MemoryRegion
*mr
,
2101 hwaddr addr
, uint64_t size
)
2103 MemoryRegionSection ret
= { .mr
= NULL
};
2111 for (root
= mr
; root
->container
; ) {
2112 root
= root
->container
;
2116 as
= memory_region_to_address_space(root
);
2120 range
= addrrange_make(int128_make64(addr
), int128_make64(size
));
2122 view
= atomic_rcu_read(&as
->current_map
);
2123 fr
= flatview_lookup(view
, range
);
2128 while (fr
> view
->ranges
&& addrrange_intersects(fr
[-1].addr
, range
)) {
2133 ret
.address_space
= as
;
2134 range
= addrrange_intersection(range
, fr
->addr
);
2135 ret
.offset_within_region
= fr
->offset_in_region
;
2136 ret
.offset_within_region
+= int128_get64(int128_sub(range
.start
,
2138 ret
.size
= range
.size
;
2139 ret
.offset_within_address_space
= int128_get64(range
.start
);
2140 ret
.readonly
= fr
->readonly
;
2144 MemoryRegionSection
memory_region_find(MemoryRegion
*mr
,
2145 hwaddr addr
, uint64_t size
)
2147 MemoryRegionSection ret
;
2149 ret
= memory_region_find_rcu(mr
, addr
, size
);
2151 memory_region_ref(ret
.mr
);
2157 bool memory_region_present(MemoryRegion
*container
, hwaddr addr
)
2162 mr
= memory_region_find_rcu(container
, addr
, 1).mr
;
2164 return mr
&& mr
!= container
;
2167 void memory_global_dirty_log_sync(void)
2169 MemoryListener
*listener
;
2174 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2175 if (!listener
->log_sync
) {
2178 as
= listener
->address_space
;
2179 view
= address_space_get_flatview(as
);
2180 FOR_EACH_FLAT_RANGE(fr
, view
) {
2181 MemoryRegionSection mrs
= section_from_flat_range(fr
, as
);
2182 listener
->log_sync(listener
, &mrs
);
2184 flatview_unref(view
);
2188 void memory_global_dirty_log_start(void)
2190 global_dirty_log
= true;
2192 MEMORY_LISTENER_CALL_GLOBAL(log_global_start
, Forward
);
2194 /* Refresh DIRTY_LOG_MIGRATION bit. */
2195 memory_region_transaction_begin();
2196 memory_region_update_pending
= true;
2197 memory_region_transaction_commit();
2200 void memory_global_dirty_log_stop(void)
2202 global_dirty_log
= false;
2204 /* Refresh DIRTY_LOG_MIGRATION bit. */
2205 memory_region_transaction_begin();
2206 memory_region_update_pending
= true;
2207 memory_region_transaction_commit();
2209 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop
, Reverse
);
2212 static void listener_add_address_space(MemoryListener
*listener
,
2218 if (listener
->begin
) {
2219 listener
->begin(listener
);
2221 if (global_dirty_log
) {
2222 if (listener
->log_global_start
) {
2223 listener
->log_global_start(listener
);
2227 view
= address_space_get_flatview(as
);
2228 FOR_EACH_FLAT_RANGE(fr
, view
) {
2229 MemoryRegionSection section
= {
2231 .address_space
= as
,
2232 .offset_within_region
= fr
->offset_in_region
,
2233 .size
= fr
->addr
.size
,
2234 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
2235 .readonly
= fr
->readonly
,
2237 if (fr
->dirty_log_mask
&& listener
->log_start
) {
2238 listener
->log_start(listener
, §ion
, 0, fr
->dirty_log_mask
);
2240 if (listener
->region_add
) {
2241 listener
->region_add(listener
, §ion
);
2244 if (listener
->commit
) {
2245 listener
->commit(listener
);
2247 flatview_unref(view
);
2250 void memory_listener_register(MemoryListener
*listener
, AddressSpace
*as
)
2252 MemoryListener
*other
= NULL
;
2254 listener
->address_space
= as
;
2255 if (QTAILQ_EMPTY(&memory_listeners
)
2256 || listener
->priority
>= QTAILQ_LAST(&memory_listeners
,
2257 memory_listeners
)->priority
) {
2258 QTAILQ_INSERT_TAIL(&memory_listeners
, listener
, link
);
2260 QTAILQ_FOREACH(other
, &memory_listeners
, link
) {
2261 if (listener
->priority
< other
->priority
) {
2265 QTAILQ_INSERT_BEFORE(other
, listener
, link
);
2268 listener_add_address_space(listener
, as
);
2271 void memory_listener_unregister(MemoryListener
*listener
)
2273 QTAILQ_REMOVE(&memory_listeners
, listener
, link
);
2276 void address_space_init(AddressSpace
*as
, MemoryRegion
*root
, const char *name
)
2278 memory_region_ref(root
);
2279 memory_region_transaction_begin();
2282 as
->malloced
= false;
2283 as
->current_map
= g_new(FlatView
, 1);
2284 flatview_init(as
->current_map
);
2285 as
->ioeventfd_nb
= 0;
2286 as
->ioeventfds
= NULL
;
2287 QTAILQ_INSERT_TAIL(&address_spaces
, as
, address_spaces_link
);
2288 as
->name
= g_strdup(name
? name
: "anonymous");
2289 address_space_init_dispatch(as
);
2290 memory_region_update_pending
|= root
->enabled
;
2291 memory_region_transaction_commit();
2294 static void do_address_space_destroy(AddressSpace
*as
)
2296 MemoryListener
*listener
;
2297 bool do_free
= as
->malloced
;
2299 address_space_destroy_dispatch(as
);
2301 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2302 assert(listener
->address_space
!= as
);
2305 flatview_unref(as
->current_map
);
2307 g_free(as
->ioeventfds
);
2308 memory_region_unref(as
->root
);
2314 AddressSpace
*address_space_init_shareable(MemoryRegion
*root
, const char *name
)
2318 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2319 if (root
== as
->root
&& as
->malloced
) {
2325 as
= g_malloc0(sizeof *as
);
2326 address_space_init(as
, root
, name
);
2327 as
->malloced
= true;
2331 void address_space_destroy(AddressSpace
*as
)
2333 MemoryRegion
*root
= as
->root
;
2336 if (as
->ref_count
) {
2339 /* Flush out anything from MemoryListeners listening in on this */
2340 memory_region_transaction_begin();
2342 memory_region_transaction_commit();
2343 QTAILQ_REMOVE(&address_spaces
, as
, address_spaces_link
);
2344 address_space_unregister(as
);
2346 /* At this point, as->dispatch and as->current_map are dummy
2347 * entries that the guest should never use. Wait for the old
2348 * values to expire before freeing the data.
2351 call_rcu(as
, do_address_space_destroy
, rcu
);
2354 typedef struct MemoryRegionList MemoryRegionList
;
2356 struct MemoryRegionList
{
2357 const MemoryRegion
*mr
;
2358 QTAILQ_ENTRY(MemoryRegionList
) queue
;
2361 typedef QTAILQ_HEAD(queue
, MemoryRegionList
) MemoryRegionListHead
;
2363 static void mtree_print_mr(fprintf_function mon_printf
, void *f
,
2364 const MemoryRegion
*mr
, unsigned int level
,
2366 MemoryRegionListHead
*alias_print_queue
)
2368 MemoryRegionList
*new_ml
, *ml
, *next_ml
;
2369 MemoryRegionListHead submr_print_queue
;
2370 const MemoryRegion
*submr
;
2377 for (i
= 0; i
< level
; i
++) {
2382 MemoryRegionList
*ml
;
2385 /* check if the alias is already in the queue */
2386 QTAILQ_FOREACH(ml
, alias_print_queue
, queue
) {
2387 if (ml
->mr
== mr
->alias
) {
2393 ml
= g_new(MemoryRegionList
, 1);
2395 QTAILQ_INSERT_TAIL(alias_print_queue
, ml
, queue
);
2397 mon_printf(f
, TARGET_FMT_plx
"-" TARGET_FMT_plx
2398 " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
2399 "-" TARGET_FMT_plx
"%s\n",
2402 + (int128_nz(mr
->size
) ?
2403 (hwaddr
)int128_get64(int128_sub(mr
->size
,
2404 int128_one())) : 0),
2406 mr
->romd_mode
? 'R' : '-',
2407 !mr
->readonly
&& !(mr
->rom_device
&& mr
->romd_mode
) ? 'W'
2409 memory_region_name(mr
),
2410 memory_region_name(mr
->alias
),
2413 + (int128_nz(mr
->size
) ?
2414 (hwaddr
)int128_get64(int128_sub(mr
->size
,
2415 int128_one())) : 0),
2416 mr
->enabled
? "" : " [disabled]");
2419 TARGET_FMT_plx
"-" TARGET_FMT_plx
" (prio %d, %c%c): %s%s\n",
2422 + (int128_nz(mr
->size
) ?
2423 (hwaddr
)int128_get64(int128_sub(mr
->size
,
2424 int128_one())) : 0),
2426 mr
->romd_mode
? 'R' : '-',
2427 !mr
->readonly
&& !(mr
->rom_device
&& mr
->romd_mode
) ? 'W'
2429 memory_region_name(mr
),
2430 mr
->enabled
? "" : " [disabled]");
2433 QTAILQ_INIT(&submr_print_queue
);
2435 QTAILQ_FOREACH(submr
, &mr
->subregions
, subregions_link
) {
2436 new_ml
= g_new(MemoryRegionList
, 1);
2438 QTAILQ_FOREACH(ml
, &submr_print_queue
, queue
) {
2439 if (new_ml
->mr
->addr
< ml
->mr
->addr
||
2440 (new_ml
->mr
->addr
== ml
->mr
->addr
&&
2441 new_ml
->mr
->priority
> ml
->mr
->priority
)) {
2442 QTAILQ_INSERT_BEFORE(ml
, new_ml
, queue
);
2448 QTAILQ_INSERT_TAIL(&submr_print_queue
, new_ml
, queue
);
2452 QTAILQ_FOREACH(ml
, &submr_print_queue
, queue
) {
2453 mtree_print_mr(mon_printf
, f
, ml
->mr
, level
+ 1, base
+ mr
->addr
,
2457 QTAILQ_FOREACH_SAFE(ml
, &submr_print_queue
, queue
, next_ml
) {
2462 void mtree_info(fprintf_function mon_printf
, void *f
)
2464 MemoryRegionListHead ml_head
;
2465 MemoryRegionList
*ml
, *ml2
;
2468 QTAILQ_INIT(&ml_head
);
2470 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2471 mon_printf(f
, "address-space: %s\n", as
->name
);
2472 mtree_print_mr(mon_printf
, f
, as
->root
, 1, 0, &ml_head
);
2473 mon_printf(f
, "\n");
2476 /* print aliased regions */
2477 QTAILQ_FOREACH(ml
, &ml_head
, queue
) {
2478 mon_printf(f
, "memory-region: %s\n", memory_region_name(ml
->mr
));
2479 mtree_print_mr(mon_printf
, f
, ml
->mr
, 1, 0, &ml_head
);
2480 mon_printf(f
, "\n");
2483 QTAILQ_FOREACH_SAFE(ml
, &ml_head
, queue
, ml2
) {
2488 static const TypeInfo memory_region_info
= {
2489 .parent
= TYPE_OBJECT
,
2490 .name
= TYPE_MEMORY_REGION
,
2491 .instance_size
= sizeof(MemoryRegion
),
2492 .instance_init
= memory_region_initfn
,
2493 .instance_finalize
= memory_region_finalize
,
2496 static void memory_register_types(void)
2498 type_register_static(&memory_region_info
);
2501 type_init(memory_register_types
)