]> git.proxmox.com Git - mirror_qemu.git/blob - memory.c
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20180814' into...
[mirror_qemu.git] / memory.c
1 /*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
19 #include "cpu.h"
20 #include "exec/memory.h"
21 #include "exec/address-spaces.h"
22 #include "qapi/visitor.h"
23 #include "qemu/bitops.h"
24 #include "qemu/error-report.h"
25 #include "qom/object.h"
26 #include "trace-root.h"
27
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/sysemu.h"
32 #include "hw/misc/mmio_interface.h"
33 #include "hw/qdev-properties.h"
34 #include "migration/vmstate.h"
35
36 //#define DEBUG_UNASSIGNED
37
38 static unsigned memory_region_transaction_depth;
39 static bool memory_region_update_pending;
40 static bool ioeventfd_update_pending;
41 static bool global_dirty_log = false;
42
43 static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
44 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
45
46 static QTAILQ_HEAD(, AddressSpace) address_spaces
47 = QTAILQ_HEAD_INITIALIZER(address_spaces);
48
49 static GHashTable *flat_views;
50
51 typedef struct AddrRange AddrRange;
52
53 /*
54 * Note that signed integers are needed for negative offsetting in aliases
55 * (large MemoryRegion::alias_offset).
56 */
57 struct AddrRange {
58 Int128 start;
59 Int128 size;
60 };
61
62 static AddrRange addrrange_make(Int128 start, Int128 size)
63 {
64 return (AddrRange) { start, size };
65 }
66
67 static bool addrrange_equal(AddrRange r1, AddrRange r2)
68 {
69 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
70 }
71
72 static Int128 addrrange_end(AddrRange r)
73 {
74 return int128_add(r.start, r.size);
75 }
76
77 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
78 {
79 int128_addto(&range.start, delta);
80 return range;
81 }
82
83 static bool addrrange_contains(AddrRange range, Int128 addr)
84 {
85 return int128_ge(addr, range.start)
86 && int128_lt(addr, addrrange_end(range));
87 }
88
89 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
90 {
91 return addrrange_contains(r1, r2.start)
92 || addrrange_contains(r2, r1.start);
93 }
94
95 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
96 {
97 Int128 start = int128_max(r1.start, r2.start);
98 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
99 return addrrange_make(start, int128_sub(end, start));
100 }
101
102 enum ListenerDirection { Forward, Reverse };
103
104 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
105 do { \
106 MemoryListener *_listener; \
107 \
108 switch (_direction) { \
109 case Forward: \
110 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
111 if (_listener->_callback) { \
112 _listener->_callback(_listener, ##_args); \
113 } \
114 } \
115 break; \
116 case Reverse: \
117 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
118 memory_listeners, link) { \
119 if (_listener->_callback) { \
120 _listener->_callback(_listener, ##_args); \
121 } \
122 } \
123 break; \
124 default: \
125 abort(); \
126 } \
127 } while (0)
128
129 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
130 do { \
131 MemoryListener *_listener; \
132 struct memory_listeners_as *list = &(_as)->listeners; \
133 \
134 switch (_direction) { \
135 case Forward: \
136 QTAILQ_FOREACH(_listener, list, link_as) { \
137 if (_listener->_callback) { \
138 _listener->_callback(_listener, _section, ##_args); \
139 } \
140 } \
141 break; \
142 case Reverse: \
143 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
144 link_as) { \
145 if (_listener->_callback) { \
146 _listener->_callback(_listener, _section, ##_args); \
147 } \
148 } \
149 break; \
150 default: \
151 abort(); \
152 } \
153 } while (0)
154
155 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
156 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
157 do { \
158 MemoryRegionSection mrs = section_from_flat_range(fr, \
159 address_space_to_flatview(as)); \
160 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
161 } while(0)
162
163 struct CoalescedMemoryRange {
164 AddrRange addr;
165 QTAILQ_ENTRY(CoalescedMemoryRange) link;
166 };
167
168 struct MemoryRegionIoeventfd {
169 AddrRange addr;
170 bool match_data;
171 uint64_t data;
172 EventNotifier *e;
173 };
174
175 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
176 MemoryRegionIoeventfd *b)
177 {
178 if (int128_lt(a->addr.start, b->addr.start)) {
179 return true;
180 } else if (int128_gt(a->addr.start, b->addr.start)) {
181 return false;
182 } else if (int128_lt(a->addr.size, b->addr.size)) {
183 return true;
184 } else if (int128_gt(a->addr.size, b->addr.size)) {
185 return false;
186 } else if (a->match_data < b->match_data) {
187 return true;
188 } else if (a->match_data > b->match_data) {
189 return false;
190 } else if (a->match_data) {
191 if (a->data < b->data) {
192 return true;
193 } else if (a->data > b->data) {
194 return false;
195 }
196 }
197 if (a->e < b->e) {
198 return true;
199 } else if (a->e > b->e) {
200 return false;
201 }
202 return false;
203 }
204
205 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
206 MemoryRegionIoeventfd *b)
207 {
208 return !memory_region_ioeventfd_before(a, b)
209 && !memory_region_ioeventfd_before(b, a);
210 }
211
212 /* Range of memory in the global map. Addresses are absolute. */
213 struct FlatRange {
214 MemoryRegion *mr;
215 hwaddr offset_in_region;
216 AddrRange addr;
217 uint8_t dirty_log_mask;
218 bool romd_mode;
219 bool readonly;
220 };
221
222 #define FOR_EACH_FLAT_RANGE(var, view) \
223 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
224
225 static inline MemoryRegionSection
226 section_from_flat_range(FlatRange *fr, FlatView *fv)
227 {
228 return (MemoryRegionSection) {
229 .mr = fr->mr,
230 .fv = fv,
231 .offset_within_region = fr->offset_in_region,
232 .size = fr->addr.size,
233 .offset_within_address_space = int128_get64(fr->addr.start),
234 .readonly = fr->readonly,
235 };
236 }
237
238 static bool flatrange_equal(FlatRange *a, FlatRange *b)
239 {
240 return a->mr == b->mr
241 && addrrange_equal(a->addr, b->addr)
242 && a->offset_in_region == b->offset_in_region
243 && a->romd_mode == b->romd_mode
244 && a->readonly == b->readonly;
245 }
246
247 static FlatView *flatview_new(MemoryRegion *mr_root)
248 {
249 FlatView *view;
250
251 view = g_new0(FlatView, 1);
252 view->ref = 1;
253 view->root = mr_root;
254 memory_region_ref(mr_root);
255 trace_flatview_new(view, mr_root);
256
257 return view;
258 }
259
260 /* Insert a range into a given position. Caller is responsible for maintaining
261 * sorting order.
262 */
263 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
264 {
265 if (view->nr == view->nr_allocated) {
266 view->nr_allocated = MAX(2 * view->nr, 10);
267 view->ranges = g_realloc(view->ranges,
268 view->nr_allocated * sizeof(*view->ranges));
269 }
270 memmove(view->ranges + pos + 1, view->ranges + pos,
271 (view->nr - pos) * sizeof(FlatRange));
272 view->ranges[pos] = *range;
273 memory_region_ref(range->mr);
274 ++view->nr;
275 }
276
277 static void flatview_destroy(FlatView *view)
278 {
279 int i;
280
281 trace_flatview_destroy(view, view->root);
282 if (view->dispatch) {
283 address_space_dispatch_free(view->dispatch);
284 }
285 for (i = 0; i < view->nr; i++) {
286 memory_region_unref(view->ranges[i].mr);
287 }
288 g_free(view->ranges);
289 memory_region_unref(view->root);
290 g_free(view);
291 }
292
293 static bool flatview_ref(FlatView *view)
294 {
295 return atomic_fetch_inc_nonzero(&view->ref) > 0;
296 }
297
298 void flatview_unref(FlatView *view)
299 {
300 if (atomic_fetch_dec(&view->ref) == 1) {
301 trace_flatview_destroy_rcu(view, view->root);
302 assert(view->root);
303 call_rcu(view, flatview_destroy, rcu);
304 }
305 }
306
307 static bool can_merge(FlatRange *r1, FlatRange *r2)
308 {
309 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
310 && r1->mr == r2->mr
311 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
312 r1->addr.size),
313 int128_make64(r2->offset_in_region))
314 && r1->dirty_log_mask == r2->dirty_log_mask
315 && r1->romd_mode == r2->romd_mode
316 && r1->readonly == r2->readonly;
317 }
318
319 /* Attempt to simplify a view by merging adjacent ranges */
320 static void flatview_simplify(FlatView *view)
321 {
322 unsigned i, j;
323
324 i = 0;
325 while (i < view->nr) {
326 j = i + 1;
327 while (j < view->nr
328 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
329 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
330 ++j;
331 }
332 ++i;
333 memmove(&view->ranges[i], &view->ranges[j],
334 (view->nr - j) * sizeof(view->ranges[j]));
335 view->nr -= j - i;
336 }
337 }
338
339 static bool memory_region_big_endian(MemoryRegion *mr)
340 {
341 #ifdef TARGET_WORDS_BIGENDIAN
342 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
343 #else
344 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
345 #endif
346 }
347
348 static bool memory_region_wrong_endianness(MemoryRegion *mr)
349 {
350 #ifdef TARGET_WORDS_BIGENDIAN
351 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
352 #else
353 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
354 #endif
355 }
356
357 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
358 {
359 if (memory_region_wrong_endianness(mr)) {
360 switch (size) {
361 case 1:
362 break;
363 case 2:
364 *data = bswap16(*data);
365 break;
366 case 4:
367 *data = bswap32(*data);
368 break;
369 case 8:
370 *data = bswap64(*data);
371 break;
372 default:
373 abort();
374 }
375 }
376 }
377
378 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
379 {
380 MemoryRegion *root;
381 hwaddr abs_addr = offset;
382
383 abs_addr += mr->addr;
384 for (root = mr; root->container; ) {
385 root = root->container;
386 abs_addr += root->addr;
387 }
388
389 return abs_addr;
390 }
391
392 static int get_cpu_index(void)
393 {
394 if (current_cpu) {
395 return current_cpu->cpu_index;
396 }
397 return -1;
398 }
399
400 static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
401 hwaddr addr,
402 uint64_t *value,
403 unsigned size,
404 unsigned shift,
405 uint64_t mask,
406 MemTxAttrs attrs)
407 {
408 uint64_t tmp;
409
410 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
411 if (mr->subpage) {
412 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
413 } else if (mr == &io_mem_notdirty) {
414 /* Accesses to code which has previously been translated into a TB show
415 * up in the MMIO path, as accesses to the io_mem_notdirty
416 * MemoryRegion. */
417 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
418 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
419 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
420 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
421 }
422 *value |= (tmp & mask) << shift;
423 return MEMTX_OK;
424 }
425
426 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
427 hwaddr addr,
428 uint64_t *value,
429 unsigned size,
430 unsigned shift,
431 uint64_t mask,
432 MemTxAttrs attrs)
433 {
434 uint64_t tmp;
435
436 tmp = mr->ops->read(mr->opaque, addr, size);
437 if (mr->subpage) {
438 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
439 } else if (mr == &io_mem_notdirty) {
440 /* Accesses to code which has previously been translated into a TB show
441 * up in the MMIO path, as accesses to the io_mem_notdirty
442 * MemoryRegion. */
443 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
444 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
445 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
446 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
447 }
448 *value |= (tmp & mask) << shift;
449 return MEMTX_OK;
450 }
451
452 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
453 hwaddr addr,
454 uint64_t *value,
455 unsigned size,
456 unsigned shift,
457 uint64_t mask,
458 MemTxAttrs attrs)
459 {
460 uint64_t tmp = 0;
461 MemTxResult r;
462
463 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
464 if (mr->subpage) {
465 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
466 } else if (mr == &io_mem_notdirty) {
467 /* Accesses to code which has previously been translated into a TB show
468 * up in the MMIO path, as accesses to the io_mem_notdirty
469 * MemoryRegion. */
470 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
471 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
472 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
473 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
474 }
475 *value |= (tmp & mask) << shift;
476 return r;
477 }
478
479 static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
480 hwaddr addr,
481 uint64_t *value,
482 unsigned size,
483 unsigned shift,
484 uint64_t mask,
485 MemTxAttrs attrs)
486 {
487 uint64_t tmp;
488
489 tmp = (*value >> shift) & mask;
490 if (mr->subpage) {
491 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
492 } else if (mr == &io_mem_notdirty) {
493 /* Accesses to code which has previously been translated into a TB show
494 * up in the MMIO path, as accesses to the io_mem_notdirty
495 * MemoryRegion. */
496 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
497 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
498 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
499 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
500 }
501 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
502 return MEMTX_OK;
503 }
504
505 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
506 hwaddr addr,
507 uint64_t *value,
508 unsigned size,
509 unsigned shift,
510 uint64_t mask,
511 MemTxAttrs attrs)
512 {
513 uint64_t tmp;
514
515 tmp = (*value >> shift) & mask;
516 if (mr->subpage) {
517 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
518 } else if (mr == &io_mem_notdirty) {
519 /* Accesses to code which has previously been translated into a TB show
520 * up in the MMIO path, as accesses to the io_mem_notdirty
521 * MemoryRegion. */
522 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
523 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
524 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
525 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
526 }
527 mr->ops->write(mr->opaque, addr, tmp, size);
528 return MEMTX_OK;
529 }
530
531 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
532 hwaddr addr,
533 uint64_t *value,
534 unsigned size,
535 unsigned shift,
536 uint64_t mask,
537 MemTxAttrs attrs)
538 {
539 uint64_t tmp;
540
541 tmp = (*value >> shift) & mask;
542 if (mr->subpage) {
543 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
544 } else if (mr == &io_mem_notdirty) {
545 /* Accesses to code which has previously been translated into a TB show
546 * up in the MMIO path, as accesses to the io_mem_notdirty
547 * MemoryRegion. */
548 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
549 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
550 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
551 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
552 }
553 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
554 }
555
556 static MemTxResult access_with_adjusted_size(hwaddr addr,
557 uint64_t *value,
558 unsigned size,
559 unsigned access_size_min,
560 unsigned access_size_max,
561 MemTxResult (*access_fn)
562 (MemoryRegion *mr,
563 hwaddr addr,
564 uint64_t *value,
565 unsigned size,
566 unsigned shift,
567 uint64_t mask,
568 MemTxAttrs attrs),
569 MemoryRegion *mr,
570 MemTxAttrs attrs)
571 {
572 uint64_t access_mask;
573 unsigned access_size;
574 unsigned i;
575 MemTxResult r = MEMTX_OK;
576
577 if (!access_size_min) {
578 access_size_min = 1;
579 }
580 if (!access_size_max) {
581 access_size_max = 4;
582 }
583
584 /* FIXME: support unaligned access? */
585 access_size = MAX(MIN(size, access_size_max), access_size_min);
586 access_mask = -1ULL >> (64 - access_size * 8);
587 if (memory_region_big_endian(mr)) {
588 for (i = 0; i < size; i += access_size) {
589 r |= access_fn(mr, addr + i, value, access_size,
590 (size - access_size - i) * 8, access_mask, attrs);
591 }
592 } else {
593 for (i = 0; i < size; i += access_size) {
594 r |= access_fn(mr, addr + i, value, access_size, i * 8,
595 access_mask, attrs);
596 }
597 }
598 return r;
599 }
600
601 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
602 {
603 AddressSpace *as;
604
605 while (mr->container) {
606 mr = mr->container;
607 }
608 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
609 if (mr == as->root) {
610 return as;
611 }
612 }
613 return NULL;
614 }
615
616 /* Render a memory region into the global view. Ranges in @view obscure
617 * ranges in @mr.
618 */
619 static void render_memory_region(FlatView *view,
620 MemoryRegion *mr,
621 Int128 base,
622 AddrRange clip,
623 bool readonly)
624 {
625 MemoryRegion *subregion;
626 unsigned i;
627 hwaddr offset_in_region;
628 Int128 remain;
629 Int128 now;
630 FlatRange fr;
631 AddrRange tmp;
632
633 if (!mr->enabled) {
634 return;
635 }
636
637 int128_addto(&base, int128_make64(mr->addr));
638 readonly |= mr->readonly;
639
640 tmp = addrrange_make(base, mr->size);
641
642 if (!addrrange_intersects(tmp, clip)) {
643 return;
644 }
645
646 clip = addrrange_intersection(tmp, clip);
647
648 if (mr->alias) {
649 int128_subfrom(&base, int128_make64(mr->alias->addr));
650 int128_subfrom(&base, int128_make64(mr->alias_offset));
651 render_memory_region(view, mr->alias, base, clip, readonly);
652 return;
653 }
654
655 /* Render subregions in priority order. */
656 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
657 render_memory_region(view, subregion, base, clip, readonly);
658 }
659
660 if (!mr->terminates) {
661 return;
662 }
663
664 offset_in_region = int128_get64(int128_sub(clip.start, base));
665 base = clip.start;
666 remain = clip.size;
667
668 fr.mr = mr;
669 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
670 fr.romd_mode = mr->romd_mode;
671 fr.readonly = readonly;
672
673 /* Render the region itself into any gaps left by the current view. */
674 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
675 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
676 continue;
677 }
678 if (int128_lt(base, view->ranges[i].addr.start)) {
679 now = int128_min(remain,
680 int128_sub(view->ranges[i].addr.start, base));
681 fr.offset_in_region = offset_in_region;
682 fr.addr = addrrange_make(base, now);
683 flatview_insert(view, i, &fr);
684 ++i;
685 int128_addto(&base, now);
686 offset_in_region += int128_get64(now);
687 int128_subfrom(&remain, now);
688 }
689 now = int128_sub(int128_min(int128_add(base, remain),
690 addrrange_end(view->ranges[i].addr)),
691 base);
692 int128_addto(&base, now);
693 offset_in_region += int128_get64(now);
694 int128_subfrom(&remain, now);
695 }
696 if (int128_nz(remain)) {
697 fr.offset_in_region = offset_in_region;
698 fr.addr = addrrange_make(base, remain);
699 flatview_insert(view, i, &fr);
700 }
701 }
702
703 static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
704 {
705 while (mr->enabled) {
706 if (mr->alias) {
707 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
708 /* The alias is included in its entirety. Use it as
709 * the "real" root, so that we can share more FlatViews.
710 */
711 mr = mr->alias;
712 continue;
713 }
714 } else if (!mr->terminates) {
715 unsigned int found = 0;
716 MemoryRegion *child, *next = NULL;
717 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
718 if (child->enabled) {
719 if (++found > 1) {
720 next = NULL;
721 break;
722 }
723 if (!child->addr && int128_ge(mr->size, child->size)) {
724 /* A child is included in its entirety. If it's the only
725 * enabled one, use it in the hope of finding an alias down the
726 * way. This will also let us share FlatViews.
727 */
728 next = child;
729 }
730 }
731 }
732 if (found == 0) {
733 return NULL;
734 }
735 if (next) {
736 mr = next;
737 continue;
738 }
739 }
740
741 return mr;
742 }
743
744 return NULL;
745 }
746
747 /* Render a memory topology into a list of disjoint absolute ranges. */
748 static FlatView *generate_memory_topology(MemoryRegion *mr)
749 {
750 int i;
751 FlatView *view;
752
753 view = flatview_new(mr);
754
755 if (mr) {
756 render_memory_region(view, mr, int128_zero(),
757 addrrange_make(int128_zero(), int128_2_64()), false);
758 }
759 flatview_simplify(view);
760
761 view->dispatch = address_space_dispatch_new(view);
762 for (i = 0; i < view->nr; i++) {
763 MemoryRegionSection mrs =
764 section_from_flat_range(&view->ranges[i], view);
765 flatview_add_to_dispatch(view, &mrs);
766 }
767 address_space_dispatch_compact(view->dispatch);
768 g_hash_table_replace(flat_views, mr, view);
769
770 return view;
771 }
772
773 static void address_space_add_del_ioeventfds(AddressSpace *as,
774 MemoryRegionIoeventfd *fds_new,
775 unsigned fds_new_nb,
776 MemoryRegionIoeventfd *fds_old,
777 unsigned fds_old_nb)
778 {
779 unsigned iold, inew;
780 MemoryRegionIoeventfd *fd;
781 MemoryRegionSection section;
782
783 /* Generate a symmetric difference of the old and new fd sets, adding
784 * and deleting as necessary.
785 */
786
787 iold = inew = 0;
788 while (iold < fds_old_nb || inew < fds_new_nb) {
789 if (iold < fds_old_nb
790 && (inew == fds_new_nb
791 || memory_region_ioeventfd_before(&fds_old[iold],
792 &fds_new[inew]))) {
793 fd = &fds_old[iold];
794 section = (MemoryRegionSection) {
795 .fv = address_space_to_flatview(as),
796 .offset_within_address_space = int128_get64(fd->addr.start),
797 .size = fd->addr.size,
798 };
799 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
800 fd->match_data, fd->data, fd->e);
801 ++iold;
802 } else if (inew < fds_new_nb
803 && (iold == fds_old_nb
804 || memory_region_ioeventfd_before(&fds_new[inew],
805 &fds_old[iold]))) {
806 fd = &fds_new[inew];
807 section = (MemoryRegionSection) {
808 .fv = address_space_to_flatview(as),
809 .offset_within_address_space = int128_get64(fd->addr.start),
810 .size = fd->addr.size,
811 };
812 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
813 fd->match_data, fd->data, fd->e);
814 ++inew;
815 } else {
816 ++iold;
817 ++inew;
818 }
819 }
820 }
821
822 FlatView *address_space_get_flatview(AddressSpace *as)
823 {
824 FlatView *view;
825
826 rcu_read_lock();
827 do {
828 view = address_space_to_flatview(as);
829 /* If somebody has replaced as->current_map concurrently,
830 * flatview_ref returns false.
831 */
832 } while (!flatview_ref(view));
833 rcu_read_unlock();
834 return view;
835 }
836
837 static void address_space_update_ioeventfds(AddressSpace *as)
838 {
839 FlatView *view;
840 FlatRange *fr;
841 unsigned ioeventfd_nb = 0;
842 MemoryRegionIoeventfd *ioeventfds = NULL;
843 AddrRange tmp;
844 unsigned i;
845
846 view = address_space_get_flatview(as);
847 FOR_EACH_FLAT_RANGE(fr, view) {
848 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
849 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
850 int128_sub(fr->addr.start,
851 int128_make64(fr->offset_in_region)));
852 if (addrrange_intersects(fr->addr, tmp)) {
853 ++ioeventfd_nb;
854 ioeventfds = g_realloc(ioeventfds,
855 ioeventfd_nb * sizeof(*ioeventfds));
856 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
857 ioeventfds[ioeventfd_nb-1].addr = tmp;
858 }
859 }
860 }
861
862 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
863 as->ioeventfds, as->ioeventfd_nb);
864
865 g_free(as->ioeventfds);
866 as->ioeventfds = ioeventfds;
867 as->ioeventfd_nb = ioeventfd_nb;
868 flatview_unref(view);
869 }
870
871 static void address_space_update_topology_pass(AddressSpace *as,
872 const FlatView *old_view,
873 const FlatView *new_view,
874 bool adding)
875 {
876 unsigned iold, inew;
877 FlatRange *frold, *frnew;
878
879 /* Generate a symmetric difference of the old and new memory maps.
880 * Kill ranges in the old map, and instantiate ranges in the new map.
881 */
882 iold = inew = 0;
883 while (iold < old_view->nr || inew < new_view->nr) {
884 if (iold < old_view->nr) {
885 frold = &old_view->ranges[iold];
886 } else {
887 frold = NULL;
888 }
889 if (inew < new_view->nr) {
890 frnew = &new_view->ranges[inew];
891 } else {
892 frnew = NULL;
893 }
894
895 if (frold
896 && (!frnew
897 || int128_lt(frold->addr.start, frnew->addr.start)
898 || (int128_eq(frold->addr.start, frnew->addr.start)
899 && !flatrange_equal(frold, frnew)))) {
900 /* In old but not in new, or in both but attributes changed. */
901
902 if (!adding) {
903 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
904 }
905
906 ++iold;
907 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
908 /* In both and unchanged (except logging may have changed) */
909
910 if (adding) {
911 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
912 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
913 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
914 frold->dirty_log_mask,
915 frnew->dirty_log_mask);
916 }
917 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
918 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
919 frold->dirty_log_mask,
920 frnew->dirty_log_mask);
921 }
922 }
923
924 ++iold;
925 ++inew;
926 } else {
927 /* In new */
928
929 if (adding) {
930 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
931 }
932
933 ++inew;
934 }
935 }
936 }
937
938 static void flatviews_init(void)
939 {
940 static FlatView *empty_view;
941
942 if (flat_views) {
943 return;
944 }
945
946 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
947 (GDestroyNotify) flatview_unref);
948 if (!empty_view) {
949 empty_view = generate_memory_topology(NULL);
950 /* We keep it alive forever in the global variable. */
951 flatview_ref(empty_view);
952 } else {
953 g_hash_table_replace(flat_views, NULL, empty_view);
954 flatview_ref(empty_view);
955 }
956 }
957
958 static void flatviews_reset(void)
959 {
960 AddressSpace *as;
961
962 if (flat_views) {
963 g_hash_table_unref(flat_views);
964 flat_views = NULL;
965 }
966 flatviews_init();
967
968 /* Render unique FVs */
969 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
970 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
971
972 if (g_hash_table_lookup(flat_views, physmr)) {
973 continue;
974 }
975
976 generate_memory_topology(physmr);
977 }
978 }
979
980 static void address_space_set_flatview(AddressSpace *as)
981 {
982 FlatView *old_view = address_space_to_flatview(as);
983 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
984 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
985
986 assert(new_view);
987
988 if (old_view == new_view) {
989 return;
990 }
991
992 if (old_view) {
993 flatview_ref(old_view);
994 }
995
996 flatview_ref(new_view);
997
998 if (!QTAILQ_EMPTY(&as->listeners)) {
999 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1000
1001 if (!old_view2) {
1002 old_view2 = &tmpview;
1003 }
1004 address_space_update_topology_pass(as, old_view2, new_view, false);
1005 address_space_update_topology_pass(as, old_view2, new_view, true);
1006 }
1007
1008 /* Writes are protected by the BQL. */
1009 atomic_rcu_set(&as->current_map, new_view);
1010 if (old_view) {
1011 flatview_unref(old_view);
1012 }
1013
1014 /* Note that all the old MemoryRegions are still alive up to this
1015 * point. This relieves most MemoryListeners from the need to
1016 * ref/unref the MemoryRegions they get---unless they use them
1017 * outside the iothread mutex, in which case precise reference
1018 * counting is necessary.
1019 */
1020 if (old_view) {
1021 flatview_unref(old_view);
1022 }
1023 }
1024
1025 static void address_space_update_topology(AddressSpace *as)
1026 {
1027 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1028
1029 flatviews_init();
1030 if (!g_hash_table_lookup(flat_views, physmr)) {
1031 generate_memory_topology(physmr);
1032 }
1033 address_space_set_flatview(as);
1034 }
1035
1036 void memory_region_transaction_begin(void)
1037 {
1038 qemu_flush_coalesced_mmio_buffer();
1039 ++memory_region_transaction_depth;
1040 }
1041
1042 void memory_region_transaction_commit(void)
1043 {
1044 AddressSpace *as;
1045
1046 assert(memory_region_transaction_depth);
1047 assert(qemu_mutex_iothread_locked());
1048
1049 --memory_region_transaction_depth;
1050 if (!memory_region_transaction_depth) {
1051 if (memory_region_update_pending) {
1052 flatviews_reset();
1053
1054 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1055
1056 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1057 address_space_set_flatview(as);
1058 address_space_update_ioeventfds(as);
1059 }
1060 memory_region_update_pending = false;
1061 ioeventfd_update_pending = false;
1062 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1063 } else if (ioeventfd_update_pending) {
1064 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1065 address_space_update_ioeventfds(as);
1066 }
1067 ioeventfd_update_pending = false;
1068 }
1069 }
1070 }
1071
1072 static void memory_region_destructor_none(MemoryRegion *mr)
1073 {
1074 }
1075
1076 static void memory_region_destructor_ram(MemoryRegion *mr)
1077 {
1078 qemu_ram_free(mr->ram_block);
1079 }
1080
1081 static bool memory_region_need_escape(char c)
1082 {
1083 return c == '/' || c == '[' || c == '\\' || c == ']';
1084 }
1085
1086 static char *memory_region_escape_name(const char *name)
1087 {
1088 const char *p;
1089 char *escaped, *q;
1090 uint8_t c;
1091 size_t bytes = 0;
1092
1093 for (p = name; *p; p++) {
1094 bytes += memory_region_need_escape(*p) ? 4 : 1;
1095 }
1096 if (bytes == p - name) {
1097 return g_memdup(name, bytes + 1);
1098 }
1099
1100 escaped = g_malloc(bytes + 1);
1101 for (p = name, q = escaped; *p; p++) {
1102 c = *p;
1103 if (unlikely(memory_region_need_escape(c))) {
1104 *q++ = '\\';
1105 *q++ = 'x';
1106 *q++ = "0123456789abcdef"[c >> 4];
1107 c = "0123456789abcdef"[c & 15];
1108 }
1109 *q++ = c;
1110 }
1111 *q = 0;
1112 return escaped;
1113 }
1114
1115 static void memory_region_do_init(MemoryRegion *mr,
1116 Object *owner,
1117 const char *name,
1118 uint64_t size)
1119 {
1120 mr->size = int128_make64(size);
1121 if (size == UINT64_MAX) {
1122 mr->size = int128_2_64();
1123 }
1124 mr->name = g_strdup(name);
1125 mr->owner = owner;
1126 mr->ram_block = NULL;
1127
1128 if (name) {
1129 char *escaped_name = memory_region_escape_name(name);
1130 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1131
1132 if (!owner) {
1133 owner = container_get(qdev_get_machine(), "/unattached");
1134 }
1135
1136 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
1137 object_unref(OBJECT(mr));
1138 g_free(name_array);
1139 g_free(escaped_name);
1140 }
1141 }
1142
1143 void memory_region_init(MemoryRegion *mr,
1144 Object *owner,
1145 const char *name,
1146 uint64_t size)
1147 {
1148 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1149 memory_region_do_init(mr, owner, name, size);
1150 }
1151
1152 static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1153 void *opaque, Error **errp)
1154 {
1155 MemoryRegion *mr = MEMORY_REGION(obj);
1156 uint64_t value = mr->addr;
1157
1158 visit_type_uint64(v, name, &value, errp);
1159 }
1160
1161 static void memory_region_get_container(Object *obj, Visitor *v,
1162 const char *name, void *opaque,
1163 Error **errp)
1164 {
1165 MemoryRegion *mr = MEMORY_REGION(obj);
1166 gchar *path = (gchar *)"";
1167
1168 if (mr->container) {
1169 path = object_get_canonical_path(OBJECT(mr->container));
1170 }
1171 visit_type_str(v, name, &path, errp);
1172 if (mr->container) {
1173 g_free(path);
1174 }
1175 }
1176
1177 static Object *memory_region_resolve_container(Object *obj, void *opaque,
1178 const char *part)
1179 {
1180 MemoryRegion *mr = MEMORY_REGION(obj);
1181
1182 return OBJECT(mr->container);
1183 }
1184
1185 static void memory_region_get_priority(Object *obj, Visitor *v,
1186 const char *name, void *opaque,
1187 Error **errp)
1188 {
1189 MemoryRegion *mr = MEMORY_REGION(obj);
1190 int32_t value = mr->priority;
1191
1192 visit_type_int32(v, name, &value, errp);
1193 }
1194
1195 static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1196 void *opaque, Error **errp)
1197 {
1198 MemoryRegion *mr = MEMORY_REGION(obj);
1199 uint64_t value = memory_region_size(mr);
1200
1201 visit_type_uint64(v, name, &value, errp);
1202 }
1203
1204 static void memory_region_initfn(Object *obj)
1205 {
1206 MemoryRegion *mr = MEMORY_REGION(obj);
1207 ObjectProperty *op;
1208
1209 mr->ops = &unassigned_mem_ops;
1210 mr->enabled = true;
1211 mr->romd_mode = true;
1212 mr->global_locking = true;
1213 mr->destructor = memory_region_destructor_none;
1214 QTAILQ_INIT(&mr->subregions);
1215 QTAILQ_INIT(&mr->coalesced);
1216
1217 op = object_property_add(OBJECT(mr), "container",
1218 "link<" TYPE_MEMORY_REGION ">",
1219 memory_region_get_container,
1220 NULL, /* memory_region_set_container */
1221 NULL, NULL, &error_abort);
1222 op->resolve = memory_region_resolve_container;
1223
1224 object_property_add(OBJECT(mr), "addr", "uint64",
1225 memory_region_get_addr,
1226 NULL, /* memory_region_set_addr */
1227 NULL, NULL, &error_abort);
1228 object_property_add(OBJECT(mr), "priority", "uint32",
1229 memory_region_get_priority,
1230 NULL, /* memory_region_set_priority */
1231 NULL, NULL, &error_abort);
1232 object_property_add(OBJECT(mr), "size", "uint64",
1233 memory_region_get_size,
1234 NULL, /* memory_region_set_size, */
1235 NULL, NULL, &error_abort);
1236 }
1237
1238 static void iommu_memory_region_initfn(Object *obj)
1239 {
1240 MemoryRegion *mr = MEMORY_REGION(obj);
1241
1242 mr->is_iommu = true;
1243 }
1244
1245 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1246 unsigned size)
1247 {
1248 #ifdef DEBUG_UNASSIGNED
1249 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1250 #endif
1251 if (current_cpu != NULL) {
1252 bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
1253 cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
1254 }
1255 return 0;
1256 }
1257
1258 static void unassigned_mem_write(void *opaque, hwaddr addr,
1259 uint64_t val, unsigned size)
1260 {
1261 #ifdef DEBUG_UNASSIGNED
1262 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1263 #endif
1264 if (current_cpu != NULL) {
1265 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1266 }
1267 }
1268
1269 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1270 unsigned size, bool is_write,
1271 MemTxAttrs attrs)
1272 {
1273 return false;
1274 }
1275
1276 const MemoryRegionOps unassigned_mem_ops = {
1277 .valid.accepts = unassigned_mem_accepts,
1278 .endianness = DEVICE_NATIVE_ENDIAN,
1279 };
1280
1281 static uint64_t memory_region_ram_device_read(void *opaque,
1282 hwaddr addr, unsigned size)
1283 {
1284 MemoryRegion *mr = opaque;
1285 uint64_t data = (uint64_t)~0;
1286
1287 switch (size) {
1288 case 1:
1289 data = *(uint8_t *)(mr->ram_block->host + addr);
1290 break;
1291 case 2:
1292 data = *(uint16_t *)(mr->ram_block->host + addr);
1293 break;
1294 case 4:
1295 data = *(uint32_t *)(mr->ram_block->host + addr);
1296 break;
1297 case 8:
1298 data = *(uint64_t *)(mr->ram_block->host + addr);
1299 break;
1300 }
1301
1302 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1303
1304 return data;
1305 }
1306
1307 static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1308 uint64_t data, unsigned size)
1309 {
1310 MemoryRegion *mr = opaque;
1311
1312 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1313
1314 switch (size) {
1315 case 1:
1316 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1317 break;
1318 case 2:
1319 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1320 break;
1321 case 4:
1322 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1323 break;
1324 case 8:
1325 *(uint64_t *)(mr->ram_block->host + addr) = data;
1326 break;
1327 }
1328 }
1329
1330 static const MemoryRegionOps ram_device_mem_ops = {
1331 .read = memory_region_ram_device_read,
1332 .write = memory_region_ram_device_write,
1333 .endianness = DEVICE_HOST_ENDIAN,
1334 .valid = {
1335 .min_access_size = 1,
1336 .max_access_size = 8,
1337 .unaligned = true,
1338 },
1339 .impl = {
1340 .min_access_size = 1,
1341 .max_access_size = 8,
1342 .unaligned = true,
1343 },
1344 };
1345
1346 bool memory_region_access_valid(MemoryRegion *mr,
1347 hwaddr addr,
1348 unsigned size,
1349 bool is_write,
1350 MemTxAttrs attrs)
1351 {
1352 int access_size_min, access_size_max;
1353 int access_size, i;
1354
1355 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1356 return false;
1357 }
1358
1359 if (!mr->ops->valid.accepts) {
1360 return true;
1361 }
1362
1363 access_size_min = mr->ops->valid.min_access_size;
1364 if (!mr->ops->valid.min_access_size) {
1365 access_size_min = 1;
1366 }
1367
1368 access_size_max = mr->ops->valid.max_access_size;
1369 if (!mr->ops->valid.max_access_size) {
1370 access_size_max = 4;
1371 }
1372
1373 access_size = MAX(MIN(size, access_size_max), access_size_min);
1374 for (i = 0; i < size; i += access_size) {
1375 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1376 is_write, attrs)) {
1377 return false;
1378 }
1379 }
1380
1381 return true;
1382 }
1383
1384 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1385 hwaddr addr,
1386 uint64_t *pval,
1387 unsigned size,
1388 MemTxAttrs attrs)
1389 {
1390 *pval = 0;
1391
1392 if (mr->ops->read) {
1393 return access_with_adjusted_size(addr, pval, size,
1394 mr->ops->impl.min_access_size,
1395 mr->ops->impl.max_access_size,
1396 memory_region_read_accessor,
1397 mr, attrs);
1398 } else if (mr->ops->read_with_attrs) {
1399 return access_with_adjusted_size(addr, pval, size,
1400 mr->ops->impl.min_access_size,
1401 mr->ops->impl.max_access_size,
1402 memory_region_read_with_attrs_accessor,
1403 mr, attrs);
1404 } else {
1405 return access_with_adjusted_size(addr, pval, size, 1, 4,
1406 memory_region_oldmmio_read_accessor,
1407 mr, attrs);
1408 }
1409 }
1410
1411 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1412 hwaddr addr,
1413 uint64_t *pval,
1414 unsigned size,
1415 MemTxAttrs attrs)
1416 {
1417 MemTxResult r;
1418
1419 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
1420 *pval = unassigned_mem_read(mr, addr, size);
1421 return MEMTX_DECODE_ERROR;
1422 }
1423
1424 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1425 adjust_endianness(mr, pval, size);
1426 return r;
1427 }
1428
1429 /* Return true if an eventfd was signalled */
1430 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1431 hwaddr addr,
1432 uint64_t data,
1433 unsigned size,
1434 MemTxAttrs attrs)
1435 {
1436 MemoryRegionIoeventfd ioeventfd = {
1437 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1438 .data = data,
1439 };
1440 unsigned i;
1441
1442 for (i = 0; i < mr->ioeventfd_nb; i++) {
1443 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1444 ioeventfd.e = mr->ioeventfds[i].e;
1445
1446 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
1447 event_notifier_set(ioeventfd.e);
1448 return true;
1449 }
1450 }
1451
1452 return false;
1453 }
1454
1455 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1456 hwaddr addr,
1457 uint64_t data,
1458 unsigned size,
1459 MemTxAttrs attrs)
1460 {
1461 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
1462 unassigned_mem_write(mr, addr, data, size);
1463 return MEMTX_DECODE_ERROR;
1464 }
1465
1466 adjust_endianness(mr, &data, size);
1467
1468 if ((!kvm_eventfds_enabled()) &&
1469 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1470 return MEMTX_OK;
1471 }
1472
1473 if (mr->ops->write) {
1474 return access_with_adjusted_size(addr, &data, size,
1475 mr->ops->impl.min_access_size,
1476 mr->ops->impl.max_access_size,
1477 memory_region_write_accessor, mr,
1478 attrs);
1479 } else if (mr->ops->write_with_attrs) {
1480 return
1481 access_with_adjusted_size(addr, &data, size,
1482 mr->ops->impl.min_access_size,
1483 mr->ops->impl.max_access_size,
1484 memory_region_write_with_attrs_accessor,
1485 mr, attrs);
1486 } else {
1487 return access_with_adjusted_size(addr, &data, size, 1, 4,
1488 memory_region_oldmmio_write_accessor,
1489 mr, attrs);
1490 }
1491 }
1492
1493 void memory_region_init_io(MemoryRegion *mr,
1494 Object *owner,
1495 const MemoryRegionOps *ops,
1496 void *opaque,
1497 const char *name,
1498 uint64_t size)
1499 {
1500 memory_region_init(mr, owner, name, size);
1501 mr->ops = ops ? ops : &unassigned_mem_ops;
1502 mr->opaque = opaque;
1503 mr->terminates = true;
1504 }
1505
1506 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1507 Object *owner,
1508 const char *name,
1509 uint64_t size,
1510 Error **errp)
1511 {
1512 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1513 }
1514
1515 void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
1516 Object *owner,
1517 const char *name,
1518 uint64_t size,
1519 bool share,
1520 Error **errp)
1521 {
1522 memory_region_init(mr, owner, name, size);
1523 mr->ram = true;
1524 mr->terminates = true;
1525 mr->destructor = memory_region_destructor_ram;
1526 mr->ram_block = qemu_ram_alloc(size, share, mr, errp);
1527 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1528 }
1529
1530 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1531 Object *owner,
1532 const char *name,
1533 uint64_t size,
1534 uint64_t max_size,
1535 void (*resized)(const char*,
1536 uint64_t length,
1537 void *host),
1538 Error **errp)
1539 {
1540 memory_region_init(mr, owner, name, size);
1541 mr->ram = true;
1542 mr->terminates = true;
1543 mr->destructor = memory_region_destructor_ram;
1544 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1545 mr, errp);
1546 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1547 }
1548
1549 #ifdef __linux__
1550 void memory_region_init_ram_from_file(MemoryRegion *mr,
1551 struct Object *owner,
1552 const char *name,
1553 uint64_t size,
1554 uint64_t align,
1555 bool share,
1556 const char *path,
1557 Error **errp)
1558 {
1559 memory_region_init(mr, owner, name, size);
1560 mr->ram = true;
1561 mr->terminates = true;
1562 mr->destructor = memory_region_destructor_ram;
1563 mr->align = align;
1564 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
1565 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1566 }
1567
1568 void memory_region_init_ram_from_fd(MemoryRegion *mr,
1569 struct Object *owner,
1570 const char *name,
1571 uint64_t size,
1572 bool share,
1573 int fd,
1574 Error **errp)
1575 {
1576 memory_region_init(mr, owner, name, size);
1577 mr->ram = true;
1578 mr->terminates = true;
1579 mr->destructor = memory_region_destructor_ram;
1580 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
1581 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1582 }
1583 #endif
1584
1585 void memory_region_init_ram_ptr(MemoryRegion *mr,
1586 Object *owner,
1587 const char *name,
1588 uint64_t size,
1589 void *ptr)
1590 {
1591 memory_region_init(mr, owner, name, size);
1592 mr->ram = true;
1593 mr->terminates = true;
1594 mr->destructor = memory_region_destructor_ram;
1595 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1596
1597 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1598 assert(ptr != NULL);
1599 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1600 }
1601
1602 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1603 Object *owner,
1604 const char *name,
1605 uint64_t size,
1606 void *ptr)
1607 {
1608 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1609 mr->ram_device = true;
1610 mr->ops = &ram_device_mem_ops;
1611 mr->opaque = mr;
1612 }
1613
1614 void memory_region_init_alias(MemoryRegion *mr,
1615 Object *owner,
1616 const char *name,
1617 MemoryRegion *orig,
1618 hwaddr offset,
1619 uint64_t size)
1620 {
1621 memory_region_init(mr, owner, name, size);
1622 mr->alias = orig;
1623 mr->alias_offset = offset;
1624 }
1625
1626 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1627 struct Object *owner,
1628 const char *name,
1629 uint64_t size,
1630 Error **errp)
1631 {
1632 memory_region_init(mr, owner, name, size);
1633 mr->ram = true;
1634 mr->readonly = true;
1635 mr->terminates = true;
1636 mr->destructor = memory_region_destructor_ram;
1637 mr->ram_block = qemu_ram_alloc(size, false, mr, errp);
1638 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1639 }
1640
1641 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1642 Object *owner,
1643 const MemoryRegionOps *ops,
1644 void *opaque,
1645 const char *name,
1646 uint64_t size,
1647 Error **errp)
1648 {
1649 assert(ops);
1650 memory_region_init(mr, owner, name, size);
1651 mr->ops = ops;
1652 mr->opaque = opaque;
1653 mr->terminates = true;
1654 mr->rom_device = true;
1655 mr->destructor = memory_region_destructor_ram;
1656 mr->ram_block = qemu_ram_alloc(size, false, mr, errp);
1657 }
1658
1659 void memory_region_init_iommu(void *_iommu_mr,
1660 size_t instance_size,
1661 const char *mrtypename,
1662 Object *owner,
1663 const char *name,
1664 uint64_t size)
1665 {
1666 struct IOMMUMemoryRegion *iommu_mr;
1667 struct MemoryRegion *mr;
1668
1669 object_initialize(_iommu_mr, instance_size, mrtypename);
1670 mr = MEMORY_REGION(_iommu_mr);
1671 memory_region_do_init(mr, owner, name, size);
1672 iommu_mr = IOMMU_MEMORY_REGION(mr);
1673 mr->terminates = true; /* then re-forwards */
1674 QLIST_INIT(&iommu_mr->iommu_notify);
1675 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
1676 }
1677
1678 static void memory_region_finalize(Object *obj)
1679 {
1680 MemoryRegion *mr = MEMORY_REGION(obj);
1681
1682 assert(!mr->container);
1683
1684 /* We know the region is not visible in any address space (it
1685 * does not have a container and cannot be a root either because
1686 * it has no references, so we can blindly clear mr->enabled.
1687 * memory_region_set_enabled instead could trigger a transaction
1688 * and cause an infinite loop.
1689 */
1690 mr->enabled = false;
1691 memory_region_transaction_begin();
1692 while (!QTAILQ_EMPTY(&mr->subregions)) {
1693 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1694 memory_region_del_subregion(mr, subregion);
1695 }
1696 memory_region_transaction_commit();
1697
1698 mr->destructor(mr);
1699 memory_region_clear_coalescing(mr);
1700 g_free((char *)mr->name);
1701 g_free(mr->ioeventfds);
1702 }
1703
1704 Object *memory_region_owner(MemoryRegion *mr)
1705 {
1706 Object *obj = OBJECT(mr);
1707 return obj->parent;
1708 }
1709
1710 void memory_region_ref(MemoryRegion *mr)
1711 {
1712 /* MMIO callbacks most likely will access data that belongs
1713 * to the owner, hence the need to ref/unref the owner whenever
1714 * the memory region is in use.
1715 *
1716 * The memory region is a child of its owner. As long as the
1717 * owner doesn't call unparent itself on the memory region,
1718 * ref-ing the owner will also keep the memory region alive.
1719 * Memory regions without an owner are supposed to never go away;
1720 * we do not ref/unref them because it slows down DMA sensibly.
1721 */
1722 if (mr && mr->owner) {
1723 object_ref(mr->owner);
1724 }
1725 }
1726
1727 void memory_region_unref(MemoryRegion *mr)
1728 {
1729 if (mr && mr->owner) {
1730 object_unref(mr->owner);
1731 }
1732 }
1733
1734 uint64_t memory_region_size(MemoryRegion *mr)
1735 {
1736 if (int128_eq(mr->size, int128_2_64())) {
1737 return UINT64_MAX;
1738 }
1739 return int128_get64(mr->size);
1740 }
1741
1742 const char *memory_region_name(const MemoryRegion *mr)
1743 {
1744 if (!mr->name) {
1745 ((MemoryRegion *)mr)->name =
1746 object_get_canonical_path_component(OBJECT(mr));
1747 }
1748 return mr->name;
1749 }
1750
1751 bool memory_region_is_ram_device(MemoryRegion *mr)
1752 {
1753 return mr->ram_device;
1754 }
1755
1756 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1757 {
1758 uint8_t mask = mr->dirty_log_mask;
1759 if (global_dirty_log && mr->ram_block) {
1760 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1761 }
1762 return mask;
1763 }
1764
1765 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1766 {
1767 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1768 }
1769
1770 static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
1771 {
1772 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1773 IOMMUNotifier *iommu_notifier;
1774 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1775
1776 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1777 flags |= iommu_notifier->notifier_flags;
1778 }
1779
1780 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1781 imrc->notify_flag_changed(iommu_mr,
1782 iommu_mr->iommu_notify_flags,
1783 flags);
1784 }
1785
1786 iommu_mr->iommu_notify_flags = flags;
1787 }
1788
1789 void memory_region_register_iommu_notifier(MemoryRegion *mr,
1790 IOMMUNotifier *n)
1791 {
1792 IOMMUMemoryRegion *iommu_mr;
1793
1794 if (mr->alias) {
1795 memory_region_register_iommu_notifier(mr->alias, n);
1796 return;
1797 }
1798
1799 /* We need to register for at least one bitfield */
1800 iommu_mr = IOMMU_MEMORY_REGION(mr);
1801 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
1802 assert(n->start <= n->end);
1803 assert(n->iommu_idx >= 0 &&
1804 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1805
1806 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1807 memory_region_update_iommu_notify_flags(iommu_mr);
1808 }
1809
1810 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
1811 {
1812 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1813
1814 if (imrc->get_min_page_size) {
1815 return imrc->get_min_page_size(iommu_mr);
1816 }
1817 return TARGET_PAGE_SIZE;
1818 }
1819
1820 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
1821 {
1822 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1823 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1824 hwaddr addr, granularity;
1825 IOMMUTLBEntry iotlb;
1826
1827 /* If the IOMMU has its own replay callback, override */
1828 if (imrc->replay) {
1829 imrc->replay(iommu_mr, n);
1830 return;
1831 }
1832
1833 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
1834
1835 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1836 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
1837 if (iotlb.perm != IOMMU_NONE) {
1838 n->notify(n, &iotlb);
1839 }
1840
1841 /* if (2^64 - MR size) < granularity, it's possible to get an
1842 * infinite loop here. This should catch such a wraparound */
1843 if ((addr + granularity) < addr) {
1844 break;
1845 }
1846 }
1847 }
1848
1849 void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
1850 {
1851 IOMMUNotifier *notifier;
1852
1853 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1854 memory_region_iommu_replay(iommu_mr, notifier);
1855 }
1856 }
1857
1858 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1859 IOMMUNotifier *n)
1860 {
1861 IOMMUMemoryRegion *iommu_mr;
1862
1863 if (mr->alias) {
1864 memory_region_unregister_iommu_notifier(mr->alias, n);
1865 return;
1866 }
1867 QLIST_REMOVE(n, node);
1868 iommu_mr = IOMMU_MEMORY_REGION(mr);
1869 memory_region_update_iommu_notify_flags(iommu_mr);
1870 }
1871
1872 void memory_region_notify_one(IOMMUNotifier *notifier,
1873 IOMMUTLBEntry *entry)
1874 {
1875 IOMMUNotifierFlag request_flags;
1876
1877 /*
1878 * Skip the notification if the notification does not overlap
1879 * with registered range.
1880 */
1881 if (notifier->start > entry->iova + entry->addr_mask ||
1882 notifier->end < entry->iova) {
1883 return;
1884 }
1885
1886 if (entry->perm & IOMMU_RW) {
1887 request_flags = IOMMU_NOTIFIER_MAP;
1888 } else {
1889 request_flags = IOMMU_NOTIFIER_UNMAP;
1890 }
1891
1892 if (notifier->notifier_flags & request_flags) {
1893 notifier->notify(notifier, entry);
1894 }
1895 }
1896
1897 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1898 int iommu_idx,
1899 IOMMUTLBEntry entry)
1900 {
1901 IOMMUNotifier *iommu_notifier;
1902
1903 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
1904
1905 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1906 if (iommu_notifier->iommu_idx == iommu_idx) {
1907 memory_region_notify_one(iommu_notifier, &entry);
1908 }
1909 }
1910 }
1911
1912 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1913 enum IOMMUMemoryRegionAttr attr,
1914 void *data)
1915 {
1916 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1917
1918 if (!imrc->get_attr) {
1919 return -EINVAL;
1920 }
1921
1922 return imrc->get_attr(iommu_mr, attr, data);
1923 }
1924
1925 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1926 MemTxAttrs attrs)
1927 {
1928 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1929
1930 if (!imrc->attrs_to_index) {
1931 return 0;
1932 }
1933
1934 return imrc->attrs_to_index(iommu_mr, attrs);
1935 }
1936
1937 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
1938 {
1939 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1940
1941 if (!imrc->num_indexes) {
1942 return 1;
1943 }
1944
1945 return imrc->num_indexes(iommu_mr);
1946 }
1947
1948 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1949 {
1950 uint8_t mask = 1 << client;
1951 uint8_t old_logging;
1952
1953 assert(client == DIRTY_MEMORY_VGA);
1954 old_logging = mr->vga_logging_count;
1955 mr->vga_logging_count += log ? 1 : -1;
1956 if (!!old_logging == !!mr->vga_logging_count) {
1957 return;
1958 }
1959
1960 memory_region_transaction_begin();
1961 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
1962 memory_region_update_pending |= mr->enabled;
1963 memory_region_transaction_commit();
1964 }
1965
1966 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1967 hwaddr size, unsigned client)
1968 {
1969 assert(mr->ram_block);
1970 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1971 size, client);
1972 }
1973
1974 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1975 hwaddr size)
1976 {
1977 assert(mr->ram_block);
1978 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1979 size,
1980 memory_region_get_dirty_log_mask(mr));
1981 }
1982
1983 static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1984 {
1985 MemoryListener *listener;
1986 AddressSpace *as;
1987 FlatView *view;
1988 FlatRange *fr;
1989
1990 /* If the same address space has multiple log_sync listeners, we
1991 * visit that address space's FlatView multiple times. But because
1992 * log_sync listeners are rare, it's still cheaper than walking each
1993 * address space once.
1994 */
1995 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1996 if (!listener->log_sync) {
1997 continue;
1998 }
1999 as = listener->address_space;
2000 view = address_space_get_flatview(as);
2001 FOR_EACH_FLAT_RANGE(fr, view) {
2002 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
2003 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2004 listener->log_sync(listener, &mrs);
2005 }
2006 }
2007 flatview_unref(view);
2008 }
2009 }
2010
2011 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2012 hwaddr addr,
2013 hwaddr size,
2014 unsigned client)
2015 {
2016 assert(mr->ram_block);
2017 memory_region_sync_dirty_bitmap(mr);
2018 return cpu_physical_memory_snapshot_and_clear_dirty(
2019 memory_region_get_ram_addr(mr) + addr, size, client);
2020 }
2021
2022 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2023 hwaddr addr, hwaddr size)
2024 {
2025 assert(mr->ram_block);
2026 return cpu_physical_memory_snapshot_get_dirty(snap,
2027 memory_region_get_ram_addr(mr) + addr, size);
2028 }
2029
2030 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2031 {
2032 if (mr->readonly != readonly) {
2033 memory_region_transaction_begin();
2034 mr->readonly = readonly;
2035 memory_region_update_pending |= mr->enabled;
2036 memory_region_transaction_commit();
2037 }
2038 }
2039
2040 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2041 {
2042 if (mr->romd_mode != romd_mode) {
2043 memory_region_transaction_begin();
2044 mr->romd_mode = romd_mode;
2045 memory_region_update_pending |= mr->enabled;
2046 memory_region_transaction_commit();
2047 }
2048 }
2049
2050 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2051 hwaddr size, unsigned client)
2052 {
2053 assert(mr->ram_block);
2054 cpu_physical_memory_test_and_clear_dirty(
2055 memory_region_get_ram_addr(mr) + addr, size, client);
2056 }
2057
2058 int memory_region_get_fd(MemoryRegion *mr)
2059 {
2060 int fd;
2061
2062 rcu_read_lock();
2063 while (mr->alias) {
2064 mr = mr->alias;
2065 }
2066 fd = mr->ram_block->fd;
2067 rcu_read_unlock();
2068
2069 return fd;
2070 }
2071
2072 void *memory_region_get_ram_ptr(MemoryRegion *mr)
2073 {
2074 void *ptr;
2075 uint64_t offset = 0;
2076
2077 rcu_read_lock();
2078 while (mr->alias) {
2079 offset += mr->alias_offset;
2080 mr = mr->alias;
2081 }
2082 assert(mr->ram_block);
2083 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
2084 rcu_read_unlock();
2085
2086 return ptr;
2087 }
2088
2089 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2090 {
2091 RAMBlock *block;
2092
2093 block = qemu_ram_block_from_host(ptr, false, offset);
2094 if (!block) {
2095 return NULL;
2096 }
2097
2098 return block->mr;
2099 }
2100
2101 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2102 {
2103 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2104 }
2105
2106 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2107 {
2108 assert(mr->ram_block);
2109
2110 qemu_ram_resize(mr->ram_block, newsize, errp);
2111 }
2112
2113 static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
2114 {
2115 FlatView *view;
2116 FlatRange *fr;
2117 CoalescedMemoryRange *cmr;
2118 AddrRange tmp;
2119 MemoryRegionSection section;
2120
2121 view = address_space_get_flatview(as);
2122 FOR_EACH_FLAT_RANGE(fr, view) {
2123 if (fr->mr == mr) {
2124 section = (MemoryRegionSection) {
2125 .fv = view,
2126 .offset_within_address_space = int128_get64(fr->addr.start),
2127 .size = fr->addr.size,
2128 };
2129
2130 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
2131 int128_get64(fr->addr.start),
2132 int128_get64(fr->addr.size));
2133 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
2134 tmp = addrrange_shift(cmr->addr,
2135 int128_sub(fr->addr.start,
2136 int128_make64(fr->offset_in_region)));
2137 if (!addrrange_intersects(tmp, fr->addr)) {
2138 continue;
2139 }
2140 tmp = addrrange_intersection(tmp, fr->addr);
2141 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
2142 int128_get64(tmp.start),
2143 int128_get64(tmp.size));
2144 }
2145 }
2146 }
2147 flatview_unref(view);
2148 }
2149
2150 static void memory_region_update_coalesced_range(MemoryRegion *mr)
2151 {
2152 AddressSpace *as;
2153
2154 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2155 memory_region_update_coalesced_range_as(mr, as);
2156 }
2157 }
2158
2159 void memory_region_set_coalescing(MemoryRegion *mr)
2160 {
2161 memory_region_clear_coalescing(mr);
2162 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2163 }
2164
2165 void memory_region_add_coalescing(MemoryRegion *mr,
2166 hwaddr offset,
2167 uint64_t size)
2168 {
2169 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2170
2171 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2172 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2173 memory_region_update_coalesced_range(mr);
2174 memory_region_set_flush_coalesced(mr);
2175 }
2176
2177 void memory_region_clear_coalescing(MemoryRegion *mr)
2178 {
2179 CoalescedMemoryRange *cmr;
2180 bool updated = false;
2181
2182 qemu_flush_coalesced_mmio_buffer();
2183 mr->flush_coalesced_mmio = false;
2184
2185 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2186 cmr = QTAILQ_FIRST(&mr->coalesced);
2187 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2188 g_free(cmr);
2189 updated = true;
2190 }
2191
2192 if (updated) {
2193 memory_region_update_coalesced_range(mr);
2194 }
2195 }
2196
2197 void memory_region_set_flush_coalesced(MemoryRegion *mr)
2198 {
2199 mr->flush_coalesced_mmio = true;
2200 }
2201
2202 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2203 {
2204 qemu_flush_coalesced_mmio_buffer();
2205 if (QTAILQ_EMPTY(&mr->coalesced)) {
2206 mr->flush_coalesced_mmio = false;
2207 }
2208 }
2209
2210 void memory_region_clear_global_locking(MemoryRegion *mr)
2211 {
2212 mr->global_locking = false;
2213 }
2214
2215 static bool userspace_eventfd_warning;
2216
2217 void memory_region_add_eventfd(MemoryRegion *mr,
2218 hwaddr addr,
2219 unsigned size,
2220 bool match_data,
2221 uint64_t data,
2222 EventNotifier *e)
2223 {
2224 MemoryRegionIoeventfd mrfd = {
2225 .addr.start = int128_make64(addr),
2226 .addr.size = int128_make64(size),
2227 .match_data = match_data,
2228 .data = data,
2229 .e = e,
2230 };
2231 unsigned i;
2232
2233 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2234 userspace_eventfd_warning))) {
2235 userspace_eventfd_warning = true;
2236 error_report("Using eventfd without MMIO binding in KVM. "
2237 "Suboptimal performance expected");
2238 }
2239
2240 if (size) {
2241 adjust_endianness(mr, &mrfd.data, size);
2242 }
2243 memory_region_transaction_begin();
2244 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2245 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
2246 break;
2247 }
2248 }
2249 ++mr->ioeventfd_nb;
2250 mr->ioeventfds = g_realloc(mr->ioeventfds,
2251 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2252 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2253 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2254 mr->ioeventfds[i] = mrfd;
2255 ioeventfd_update_pending |= mr->enabled;
2256 memory_region_transaction_commit();
2257 }
2258
2259 void memory_region_del_eventfd(MemoryRegion *mr,
2260 hwaddr addr,
2261 unsigned size,
2262 bool match_data,
2263 uint64_t data,
2264 EventNotifier *e)
2265 {
2266 MemoryRegionIoeventfd mrfd = {
2267 .addr.start = int128_make64(addr),
2268 .addr.size = int128_make64(size),
2269 .match_data = match_data,
2270 .data = data,
2271 .e = e,
2272 };
2273 unsigned i;
2274
2275 if (size) {
2276 adjust_endianness(mr, &mrfd.data, size);
2277 }
2278 memory_region_transaction_begin();
2279 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2280 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
2281 break;
2282 }
2283 }
2284 assert(i != mr->ioeventfd_nb);
2285 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2286 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2287 --mr->ioeventfd_nb;
2288 mr->ioeventfds = g_realloc(mr->ioeventfds,
2289 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2290 ioeventfd_update_pending |= mr->enabled;
2291 memory_region_transaction_commit();
2292 }
2293
2294 static void memory_region_update_container_subregions(MemoryRegion *subregion)
2295 {
2296 MemoryRegion *mr = subregion->container;
2297 MemoryRegion *other;
2298
2299 memory_region_transaction_begin();
2300
2301 memory_region_ref(subregion);
2302 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2303 if (subregion->priority >= other->priority) {
2304 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2305 goto done;
2306 }
2307 }
2308 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2309 done:
2310 memory_region_update_pending |= mr->enabled && subregion->enabled;
2311 memory_region_transaction_commit();
2312 }
2313
2314 static void memory_region_add_subregion_common(MemoryRegion *mr,
2315 hwaddr offset,
2316 MemoryRegion *subregion)
2317 {
2318 assert(!subregion->container);
2319 subregion->container = mr;
2320 subregion->addr = offset;
2321 memory_region_update_container_subregions(subregion);
2322 }
2323
2324 void memory_region_add_subregion(MemoryRegion *mr,
2325 hwaddr offset,
2326 MemoryRegion *subregion)
2327 {
2328 subregion->priority = 0;
2329 memory_region_add_subregion_common(mr, offset, subregion);
2330 }
2331
2332 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2333 hwaddr offset,
2334 MemoryRegion *subregion,
2335 int priority)
2336 {
2337 subregion->priority = priority;
2338 memory_region_add_subregion_common(mr, offset, subregion);
2339 }
2340
2341 void memory_region_del_subregion(MemoryRegion *mr,
2342 MemoryRegion *subregion)
2343 {
2344 memory_region_transaction_begin();
2345 assert(subregion->container == mr);
2346 subregion->container = NULL;
2347 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2348 memory_region_unref(subregion);
2349 memory_region_update_pending |= mr->enabled && subregion->enabled;
2350 memory_region_transaction_commit();
2351 }
2352
2353 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2354 {
2355 if (enabled == mr->enabled) {
2356 return;
2357 }
2358 memory_region_transaction_begin();
2359 mr->enabled = enabled;
2360 memory_region_update_pending = true;
2361 memory_region_transaction_commit();
2362 }
2363
2364 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2365 {
2366 Int128 s = int128_make64(size);
2367
2368 if (size == UINT64_MAX) {
2369 s = int128_2_64();
2370 }
2371 if (int128_eq(s, mr->size)) {
2372 return;
2373 }
2374 memory_region_transaction_begin();
2375 mr->size = s;
2376 memory_region_update_pending = true;
2377 memory_region_transaction_commit();
2378 }
2379
2380 static void memory_region_readd_subregion(MemoryRegion *mr)
2381 {
2382 MemoryRegion *container = mr->container;
2383
2384 if (container) {
2385 memory_region_transaction_begin();
2386 memory_region_ref(mr);
2387 memory_region_del_subregion(container, mr);
2388 mr->container = container;
2389 memory_region_update_container_subregions(mr);
2390 memory_region_unref(mr);
2391 memory_region_transaction_commit();
2392 }
2393 }
2394
2395 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2396 {
2397 if (addr != mr->addr) {
2398 mr->addr = addr;
2399 memory_region_readd_subregion(mr);
2400 }
2401 }
2402
2403 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2404 {
2405 assert(mr->alias);
2406
2407 if (offset == mr->alias_offset) {
2408 return;
2409 }
2410
2411 memory_region_transaction_begin();
2412 mr->alias_offset = offset;
2413 memory_region_update_pending |= mr->enabled;
2414 memory_region_transaction_commit();
2415 }
2416
2417 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2418 {
2419 return mr->align;
2420 }
2421
2422 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2423 {
2424 const AddrRange *addr = addr_;
2425 const FlatRange *fr = fr_;
2426
2427 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2428 return -1;
2429 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2430 return 1;
2431 }
2432 return 0;
2433 }
2434
2435 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2436 {
2437 return bsearch(&addr, view->ranges, view->nr,
2438 sizeof(FlatRange), cmp_flatrange_addr);
2439 }
2440
2441 bool memory_region_is_mapped(MemoryRegion *mr)
2442 {
2443 return mr->container ? true : false;
2444 }
2445
2446 /* Same as memory_region_find, but it does not add a reference to the
2447 * returned region. It must be called from an RCU critical section.
2448 */
2449 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2450 hwaddr addr, uint64_t size)
2451 {
2452 MemoryRegionSection ret = { .mr = NULL };
2453 MemoryRegion *root;
2454 AddressSpace *as;
2455 AddrRange range;
2456 FlatView *view;
2457 FlatRange *fr;
2458
2459 addr += mr->addr;
2460 for (root = mr; root->container; ) {
2461 root = root->container;
2462 addr += root->addr;
2463 }
2464
2465 as = memory_region_to_address_space(root);
2466 if (!as) {
2467 return ret;
2468 }
2469 range = addrrange_make(int128_make64(addr), int128_make64(size));
2470
2471 view = address_space_to_flatview(as);
2472 fr = flatview_lookup(view, range);
2473 if (!fr) {
2474 return ret;
2475 }
2476
2477 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2478 --fr;
2479 }
2480
2481 ret.mr = fr->mr;
2482 ret.fv = view;
2483 range = addrrange_intersection(range, fr->addr);
2484 ret.offset_within_region = fr->offset_in_region;
2485 ret.offset_within_region += int128_get64(int128_sub(range.start,
2486 fr->addr.start));
2487 ret.size = range.size;
2488 ret.offset_within_address_space = int128_get64(range.start);
2489 ret.readonly = fr->readonly;
2490 return ret;
2491 }
2492
2493 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2494 hwaddr addr, uint64_t size)
2495 {
2496 MemoryRegionSection ret;
2497 rcu_read_lock();
2498 ret = memory_region_find_rcu(mr, addr, size);
2499 if (ret.mr) {
2500 memory_region_ref(ret.mr);
2501 }
2502 rcu_read_unlock();
2503 return ret;
2504 }
2505
2506 bool memory_region_present(MemoryRegion *container, hwaddr addr)
2507 {
2508 MemoryRegion *mr;
2509
2510 rcu_read_lock();
2511 mr = memory_region_find_rcu(container, addr, 1).mr;
2512 rcu_read_unlock();
2513 return mr && mr != container;
2514 }
2515
2516 void memory_global_dirty_log_sync(void)
2517 {
2518 memory_region_sync_dirty_bitmap(NULL);
2519 }
2520
2521 static VMChangeStateEntry *vmstate_change;
2522
2523 void memory_global_dirty_log_start(void)
2524 {
2525 if (vmstate_change) {
2526 qemu_del_vm_change_state_handler(vmstate_change);
2527 vmstate_change = NULL;
2528 }
2529
2530 global_dirty_log = true;
2531
2532 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2533
2534 /* Refresh DIRTY_LOG_MIGRATION bit. */
2535 memory_region_transaction_begin();
2536 memory_region_update_pending = true;
2537 memory_region_transaction_commit();
2538 }
2539
2540 static void memory_global_dirty_log_do_stop(void)
2541 {
2542 global_dirty_log = false;
2543
2544 /* Refresh DIRTY_LOG_MIGRATION bit. */
2545 memory_region_transaction_begin();
2546 memory_region_update_pending = true;
2547 memory_region_transaction_commit();
2548
2549 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2550 }
2551
2552 static void memory_vm_change_state_handler(void *opaque, int running,
2553 RunState state)
2554 {
2555 if (running) {
2556 memory_global_dirty_log_do_stop();
2557
2558 if (vmstate_change) {
2559 qemu_del_vm_change_state_handler(vmstate_change);
2560 vmstate_change = NULL;
2561 }
2562 }
2563 }
2564
2565 void memory_global_dirty_log_stop(void)
2566 {
2567 if (!runstate_is_running()) {
2568 if (vmstate_change) {
2569 return;
2570 }
2571 vmstate_change = qemu_add_vm_change_state_handler(
2572 memory_vm_change_state_handler, NULL);
2573 return;
2574 }
2575
2576 memory_global_dirty_log_do_stop();
2577 }
2578
2579 static void listener_add_address_space(MemoryListener *listener,
2580 AddressSpace *as)
2581 {
2582 FlatView *view;
2583 FlatRange *fr;
2584
2585 if (listener->begin) {
2586 listener->begin(listener);
2587 }
2588 if (global_dirty_log) {
2589 if (listener->log_global_start) {
2590 listener->log_global_start(listener);
2591 }
2592 }
2593
2594 view = address_space_get_flatview(as);
2595 FOR_EACH_FLAT_RANGE(fr, view) {
2596 MemoryRegionSection section = section_from_flat_range(fr, view);
2597
2598 if (listener->region_add) {
2599 listener->region_add(listener, &section);
2600 }
2601 if (fr->dirty_log_mask && listener->log_start) {
2602 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2603 }
2604 }
2605 if (listener->commit) {
2606 listener->commit(listener);
2607 }
2608 flatview_unref(view);
2609 }
2610
2611 static void listener_del_address_space(MemoryListener *listener,
2612 AddressSpace *as)
2613 {
2614 FlatView *view;
2615 FlatRange *fr;
2616
2617 if (listener->begin) {
2618 listener->begin(listener);
2619 }
2620 view = address_space_get_flatview(as);
2621 FOR_EACH_FLAT_RANGE(fr, view) {
2622 MemoryRegionSection section = section_from_flat_range(fr, view);
2623
2624 if (fr->dirty_log_mask && listener->log_stop) {
2625 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
2626 }
2627 if (listener->region_del) {
2628 listener->region_del(listener, &section);
2629 }
2630 }
2631 if (listener->commit) {
2632 listener->commit(listener);
2633 }
2634 flatview_unref(view);
2635 }
2636
2637 void memory_listener_register(MemoryListener *listener, AddressSpace *as)
2638 {
2639 MemoryListener *other = NULL;
2640
2641 listener->address_space = as;
2642 if (QTAILQ_EMPTY(&memory_listeners)
2643 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2644 memory_listeners)->priority) {
2645 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2646 } else {
2647 QTAILQ_FOREACH(other, &memory_listeners, link) {
2648 if (listener->priority < other->priority) {
2649 break;
2650 }
2651 }
2652 QTAILQ_INSERT_BEFORE(other, listener, link);
2653 }
2654
2655 if (QTAILQ_EMPTY(&as->listeners)
2656 || listener->priority >= QTAILQ_LAST(&as->listeners,
2657 memory_listeners)->priority) {
2658 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2659 } else {
2660 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2661 if (listener->priority < other->priority) {
2662 break;
2663 }
2664 }
2665 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2666 }
2667
2668 listener_add_address_space(listener, as);
2669 }
2670
2671 void memory_listener_unregister(MemoryListener *listener)
2672 {
2673 if (!listener->address_space) {
2674 return;
2675 }
2676
2677 listener_del_address_space(listener, listener->address_space);
2678 QTAILQ_REMOVE(&memory_listeners, listener, link);
2679 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
2680 listener->address_space = NULL;
2681 }
2682
2683 bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
2684 {
2685 void *host;
2686 unsigned size = 0;
2687 unsigned offset = 0;
2688 Object *new_interface;
2689
2690 if (!mr || !mr->ops->request_ptr) {
2691 return false;
2692 }
2693
2694 /*
2695 * Avoid an update if the request_ptr call
2696 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2697 * a cache.
2698 */
2699 memory_region_transaction_begin();
2700
2701 host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
2702
2703 if (!host || !size) {
2704 memory_region_transaction_commit();
2705 return false;
2706 }
2707
2708 new_interface = object_new("mmio_interface");
2709 qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
2710 qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
2711 qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
2712 qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
2713 qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
2714 object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
2715
2716 memory_region_transaction_commit();
2717 return true;
2718 }
2719
2720 typedef struct MMIOPtrInvalidate {
2721 MemoryRegion *mr;
2722 hwaddr offset;
2723 unsigned size;
2724 int busy;
2725 int allocated;
2726 } MMIOPtrInvalidate;
2727
2728 #define MAX_MMIO_INVALIDATE 10
2729 static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
2730
2731 static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
2732 run_on_cpu_data data)
2733 {
2734 MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
2735 MemoryRegion *mr = invalidate_data->mr;
2736 hwaddr offset = invalidate_data->offset;
2737 unsigned size = invalidate_data->size;
2738 MemoryRegionSection section = memory_region_find(mr, offset, size);
2739
2740 qemu_mutex_lock_iothread();
2741
2742 /* Reset dirty so this doesn't happen later. */
2743 cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
2744
2745 if (section.mr != mr) {
2746 /* memory_region_find add a ref on section.mr */
2747 memory_region_unref(section.mr);
2748 if (MMIO_INTERFACE(section.mr->owner)) {
2749 /* We found the interface just drop it. */
2750 object_property_set_bool(section.mr->owner, false, "realized",
2751 NULL);
2752 object_unref(section.mr->owner);
2753 object_unparent(section.mr->owner);
2754 }
2755 }
2756
2757 qemu_mutex_unlock_iothread();
2758
2759 if (invalidate_data->allocated) {
2760 g_free(invalidate_data);
2761 } else {
2762 invalidate_data->busy = 0;
2763 }
2764 }
2765
2766 void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
2767 unsigned size)
2768 {
2769 size_t i;
2770 MMIOPtrInvalidate *invalidate_data = NULL;
2771
2772 for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
2773 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
2774 invalidate_data = &mmio_ptr_invalidate_list[i];
2775 break;
2776 }
2777 }
2778
2779 if (!invalidate_data) {
2780 invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
2781 invalidate_data->allocated = 1;
2782 }
2783
2784 invalidate_data->mr = mr;
2785 invalidate_data->offset = offset;
2786 invalidate_data->size = size;
2787
2788 async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
2789 RUN_ON_CPU_HOST_PTR(invalidate_data));
2790 }
2791
2792 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
2793 {
2794 memory_region_ref(root);
2795 as->root = root;
2796 as->current_map = NULL;
2797 as->ioeventfd_nb = 0;
2798 as->ioeventfds = NULL;
2799 QTAILQ_INIT(&as->listeners);
2800 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
2801 as->name = g_strdup(name ? name : "anonymous");
2802 address_space_update_topology(as);
2803 address_space_update_ioeventfds(as);
2804 }
2805
2806 static void do_address_space_destroy(AddressSpace *as)
2807 {
2808 assert(QTAILQ_EMPTY(&as->listeners));
2809
2810 flatview_unref(as->current_map);
2811 g_free(as->name);
2812 g_free(as->ioeventfds);
2813 memory_region_unref(as->root);
2814 }
2815
2816 void address_space_destroy(AddressSpace *as)
2817 {
2818 MemoryRegion *root = as->root;
2819
2820 /* Flush out anything from MemoryListeners listening in on this */
2821 memory_region_transaction_begin();
2822 as->root = NULL;
2823 memory_region_transaction_commit();
2824 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2825
2826 /* At this point, as->dispatch and as->current_map are dummy
2827 * entries that the guest should never use. Wait for the old
2828 * values to expire before freeing the data.
2829 */
2830 as->root = root;
2831 call_rcu(as, do_address_space_destroy, rcu);
2832 }
2833
2834 static const char *memory_region_type(MemoryRegion *mr)
2835 {
2836 if (memory_region_is_ram_device(mr)) {
2837 return "ramd";
2838 } else if (memory_region_is_romd(mr)) {
2839 return "romd";
2840 } else if (memory_region_is_rom(mr)) {
2841 return "rom";
2842 } else if (memory_region_is_ram(mr)) {
2843 return "ram";
2844 } else {
2845 return "i/o";
2846 }
2847 }
2848
2849 typedef struct MemoryRegionList MemoryRegionList;
2850
2851 struct MemoryRegionList {
2852 const MemoryRegion *mr;
2853 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
2854 };
2855
2856 typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
2857
2858 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2859 int128_sub((size), int128_one())) : 0)
2860 #define MTREE_INDENT " "
2861
2862 static void mtree_expand_owner(fprintf_function mon_printf, void *f,
2863 const char *label, Object *obj)
2864 {
2865 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
2866
2867 mon_printf(f, " %s:{%s", label, dev ? "dev" : "obj");
2868 if (dev && dev->id) {
2869 mon_printf(f, " id=%s", dev->id);
2870 } else {
2871 gchar *canonical_path = object_get_canonical_path(obj);
2872 if (canonical_path) {
2873 mon_printf(f, " path=%s", canonical_path);
2874 g_free(canonical_path);
2875 } else {
2876 mon_printf(f, " type=%s", object_get_typename(obj));
2877 }
2878 }
2879 mon_printf(f, "}");
2880 }
2881
2882 static void mtree_print_mr_owner(fprintf_function mon_printf, void *f,
2883 const MemoryRegion *mr)
2884 {
2885 Object *owner = mr->owner;
2886 Object *parent = memory_region_owner((MemoryRegion *)mr);
2887
2888 if (!owner && !parent) {
2889 mon_printf(f, " orphan");
2890 return;
2891 }
2892 if (owner) {
2893 mtree_expand_owner(mon_printf, f, "owner", owner);
2894 }
2895 if (parent && parent != owner) {
2896 mtree_expand_owner(mon_printf, f, "parent", parent);
2897 }
2898 }
2899
2900 static void mtree_print_mr(fprintf_function mon_printf, void *f,
2901 const MemoryRegion *mr, unsigned int level,
2902 hwaddr base,
2903 MemoryRegionListHead *alias_print_queue,
2904 bool owner)
2905 {
2906 MemoryRegionList *new_ml, *ml, *next_ml;
2907 MemoryRegionListHead submr_print_queue;
2908 const MemoryRegion *submr;
2909 unsigned int i;
2910 hwaddr cur_start, cur_end;
2911
2912 if (!mr) {
2913 return;
2914 }
2915
2916 for (i = 0; i < level; i++) {
2917 mon_printf(f, MTREE_INDENT);
2918 }
2919
2920 cur_start = base + mr->addr;
2921 cur_end = cur_start + MR_SIZE(mr->size);
2922
2923 /*
2924 * Try to detect overflow of memory region. This should never
2925 * happen normally. When it happens, we dump something to warn the
2926 * user who is observing this.
2927 */
2928 if (cur_start < base || cur_end < cur_start) {
2929 mon_printf(f, "[DETECTED OVERFLOW!] ");
2930 }
2931
2932 if (mr->alias) {
2933 MemoryRegionList *ml;
2934 bool found = false;
2935
2936 /* check if the alias is already in the queue */
2937 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
2938 if (ml->mr == mr->alias) {
2939 found = true;
2940 }
2941 }
2942
2943 if (!found) {
2944 ml = g_new(MemoryRegionList, 1);
2945 ml->mr = mr->alias;
2946 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
2947 }
2948 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
2949 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
2950 "-" TARGET_FMT_plx "%s",
2951 cur_start, cur_end,
2952 mr->priority,
2953 memory_region_type((MemoryRegion *)mr),
2954 memory_region_name(mr),
2955 memory_region_name(mr->alias),
2956 mr->alias_offset,
2957 mr->alias_offset + MR_SIZE(mr->size),
2958 mr->enabled ? "" : " [disabled]");
2959 if (owner) {
2960 mtree_print_mr_owner(mon_printf, f, mr);
2961 }
2962 } else {
2963 mon_printf(f,
2964 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s",
2965 cur_start, cur_end,
2966 mr->priority,
2967 memory_region_type((MemoryRegion *)mr),
2968 memory_region_name(mr),
2969 mr->enabled ? "" : " [disabled]");
2970 if (owner) {
2971 mtree_print_mr_owner(mon_printf, f, mr);
2972 }
2973 }
2974 mon_printf(f, "\n");
2975
2976 QTAILQ_INIT(&submr_print_queue);
2977
2978 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
2979 new_ml = g_new(MemoryRegionList, 1);
2980 new_ml->mr = submr;
2981 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2982 if (new_ml->mr->addr < ml->mr->addr ||
2983 (new_ml->mr->addr == ml->mr->addr &&
2984 new_ml->mr->priority > ml->mr->priority)) {
2985 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
2986 new_ml = NULL;
2987 break;
2988 }
2989 }
2990 if (new_ml) {
2991 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
2992 }
2993 }
2994
2995 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2996 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
2997 alias_print_queue, owner);
2998 }
2999
3000 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
3001 g_free(ml);
3002 }
3003 }
3004
3005 struct FlatViewInfo {
3006 fprintf_function mon_printf;
3007 void *f;
3008 int counter;
3009 bool dispatch_tree;
3010 bool owner;
3011 };
3012
3013 static void mtree_print_flatview(gpointer key, gpointer value,
3014 gpointer user_data)
3015 {
3016 FlatView *view = key;
3017 GArray *fv_address_spaces = value;
3018 struct FlatViewInfo *fvi = user_data;
3019 fprintf_function p = fvi->mon_printf;
3020 void *f = fvi->f;
3021 FlatRange *range = &view->ranges[0];
3022 MemoryRegion *mr;
3023 int n = view->nr;
3024 int i;
3025 AddressSpace *as;
3026
3027 p(f, "FlatView #%d\n", fvi->counter);
3028 ++fvi->counter;
3029
3030 for (i = 0; i < fv_address_spaces->len; ++i) {
3031 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3032 p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
3033 if (as->root->alias) {
3034 p(f, ", alias %s", memory_region_name(as->root->alias));
3035 }
3036 p(f, "\n");
3037 }
3038
3039 p(f, " Root memory region: %s\n",
3040 view->root ? memory_region_name(view->root) : "(none)");
3041
3042 if (n <= 0) {
3043 p(f, MTREE_INDENT "No rendered FlatView\n\n");
3044 return;
3045 }
3046
3047 while (n--) {
3048 mr = range->mr;
3049 if (range->offset_in_region) {
3050 p(f, MTREE_INDENT TARGET_FMT_plx "-"
3051 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx,
3052 int128_get64(range->addr.start),
3053 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
3054 mr->priority,
3055 range->readonly ? "rom" : memory_region_type(mr),
3056 memory_region_name(mr),
3057 range->offset_in_region);
3058 } else {
3059 p(f, MTREE_INDENT TARGET_FMT_plx "-"
3060 TARGET_FMT_plx " (prio %d, %s): %s",
3061 int128_get64(range->addr.start),
3062 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
3063 mr->priority,
3064 range->readonly ? "rom" : memory_region_type(mr),
3065 memory_region_name(mr));
3066 }
3067 if (fvi->owner) {
3068 mtree_print_mr_owner(p, f, mr);
3069 }
3070 p(f, "\n");
3071 range++;
3072 }
3073
3074 #if !defined(CONFIG_USER_ONLY)
3075 if (fvi->dispatch_tree && view->root) {
3076 mtree_print_dispatch(p, f, view->dispatch, view->root);
3077 }
3078 #endif
3079
3080 p(f, "\n");
3081 }
3082
3083 static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3084 gpointer user_data)
3085 {
3086 FlatView *view = key;
3087 GArray *fv_address_spaces = value;
3088
3089 g_array_unref(fv_address_spaces);
3090 flatview_unref(view);
3091
3092 return true;
3093 }
3094
3095 void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
3096 bool dispatch_tree, bool owner)
3097 {
3098 MemoryRegionListHead ml_head;
3099 MemoryRegionList *ml, *ml2;
3100 AddressSpace *as;
3101
3102 if (flatview) {
3103 FlatView *view;
3104 struct FlatViewInfo fvi = {
3105 .mon_printf = mon_printf,
3106 .f = f,
3107 .counter = 0,
3108 .dispatch_tree = dispatch_tree,
3109 .owner = owner,
3110 };
3111 GArray *fv_address_spaces;
3112 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3113
3114 /* Gather all FVs in one table */
3115 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3116 view = address_space_get_flatview(as);
3117
3118 fv_address_spaces = g_hash_table_lookup(views, view);
3119 if (!fv_address_spaces) {
3120 fv_address_spaces = g_array_new(false, false, sizeof(as));
3121 g_hash_table_insert(views, view, fv_address_spaces);
3122 }
3123
3124 g_array_append_val(fv_address_spaces, as);
3125 }
3126
3127 /* Print */
3128 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3129
3130 /* Free */
3131 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3132 g_hash_table_unref(views);
3133
3134 return;
3135 }
3136
3137 QTAILQ_INIT(&ml_head);
3138
3139 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3140 mon_printf(f, "address-space: %s\n", as->name);
3141 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head, owner);
3142 mon_printf(f, "\n");
3143 }
3144
3145 /* print aliased regions */
3146 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3147 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
3148 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head, owner);
3149 mon_printf(f, "\n");
3150 }
3151
3152 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
3153 g_free(ml);
3154 }
3155 }
3156
3157 void memory_region_init_ram(MemoryRegion *mr,
3158 struct Object *owner,
3159 const char *name,
3160 uint64_t size,
3161 Error **errp)
3162 {
3163 DeviceState *owner_dev;
3164 Error *err = NULL;
3165
3166 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3167 if (err) {
3168 error_propagate(errp, err);
3169 return;
3170 }
3171 /* This will assert if owner is neither NULL nor a DeviceState.
3172 * We only want the owner here for the purposes of defining a
3173 * unique name for migration. TODO: Ideally we should implement
3174 * a naming scheme for Objects which are not DeviceStates, in
3175 * which case we can relax this restriction.
3176 */
3177 owner_dev = DEVICE(owner);
3178 vmstate_register_ram(mr, owner_dev);
3179 }
3180
3181 void memory_region_init_rom(MemoryRegion *mr,
3182 struct Object *owner,
3183 const char *name,
3184 uint64_t size,
3185 Error **errp)
3186 {
3187 DeviceState *owner_dev;
3188 Error *err = NULL;
3189
3190 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3191 if (err) {
3192 error_propagate(errp, err);
3193 return;
3194 }
3195 /* This will assert if owner is neither NULL nor a DeviceState.
3196 * We only want the owner here for the purposes of defining a
3197 * unique name for migration. TODO: Ideally we should implement
3198 * a naming scheme for Objects which are not DeviceStates, in
3199 * which case we can relax this restriction.
3200 */
3201 owner_dev = DEVICE(owner);
3202 vmstate_register_ram(mr, owner_dev);
3203 }
3204
3205 void memory_region_init_rom_device(MemoryRegion *mr,
3206 struct Object *owner,
3207 const MemoryRegionOps *ops,
3208 void *opaque,
3209 const char *name,
3210 uint64_t size,
3211 Error **errp)
3212 {
3213 DeviceState *owner_dev;
3214 Error *err = NULL;
3215
3216 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3217 name, size, &err);
3218 if (err) {
3219 error_propagate(errp, err);
3220 return;
3221 }
3222 /* This will assert if owner is neither NULL nor a DeviceState.
3223 * We only want the owner here for the purposes of defining a
3224 * unique name for migration. TODO: Ideally we should implement
3225 * a naming scheme for Objects which are not DeviceStates, in
3226 * which case we can relax this restriction.
3227 */
3228 owner_dev = DEVICE(owner);
3229 vmstate_register_ram(mr, owner_dev);
3230 }
3231
3232 static const TypeInfo memory_region_info = {
3233 .parent = TYPE_OBJECT,
3234 .name = TYPE_MEMORY_REGION,
3235 .instance_size = sizeof(MemoryRegion),
3236 .instance_init = memory_region_initfn,
3237 .instance_finalize = memory_region_finalize,
3238 };
3239
3240 static const TypeInfo iommu_memory_region_info = {
3241 .parent = TYPE_MEMORY_REGION,
3242 .name = TYPE_IOMMU_MEMORY_REGION,
3243 .class_size = sizeof(IOMMUMemoryRegionClass),
3244 .instance_size = sizeof(IOMMUMemoryRegion),
3245 .instance_init = iommu_memory_region_initfn,
3246 .abstract = true,
3247 };
3248
3249 static void memory_register_types(void)
3250 {
3251 type_register_static(&memory_region_info);
3252 type_register_static(&iommu_memory_region_info);
3253 }
3254
3255 type_init(memory_register_types)