]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Physical memory management | |
3 | * | |
4 | * Copyright 2011 Red Hat, Inc. and/or its affiliates | |
5 | * | |
6 | * Authors: | |
7 | * Avi Kivity <avi@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | * Contributions after 2012-01-13 are licensed under the terms of the | |
13 | * GNU GPL, version 2 or (at your option) any later version. | |
14 | */ | |
15 | ||
16 | #include "qemu/osdep.h" | |
17 | #include "qapi/error.h" | |
18 | #include "qemu-common.h" | |
19 | #include "cpu.h" | |
20 | #include "exec/memory.h" | |
21 | #include "exec/address-spaces.h" | |
22 | #include "exec/ioport.h" | |
23 | #include "qapi/visitor.h" | |
24 | #include "qemu/bitops.h" | |
25 | #include "qemu/error-report.h" | |
26 | #include "qom/object.h" | |
27 | #include "trace-root.h" | |
28 | ||
29 | #include "exec/memory-internal.h" | |
30 | #include "exec/ram_addr.h" | |
31 | #include "sysemu/kvm.h" | |
32 | #include "sysemu/sysemu.h" | |
33 | #include "hw/misc/mmio_interface.h" | |
34 | #include "hw/qdev-properties.h" | |
35 | #include "migration/vmstate.h" | |
36 | ||
37 | //#define DEBUG_UNASSIGNED | |
38 | ||
39 | static unsigned memory_region_transaction_depth; | |
40 | static bool memory_region_update_pending; | |
41 | static bool ioeventfd_update_pending; | |
42 | static bool global_dirty_log = false; | |
43 | ||
44 | static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners | |
45 | = QTAILQ_HEAD_INITIALIZER(memory_listeners); | |
46 | ||
47 | static QTAILQ_HEAD(, AddressSpace) address_spaces | |
48 | = QTAILQ_HEAD_INITIALIZER(address_spaces); | |
49 | ||
50 | static GHashTable *flat_views; | |
51 | ||
52 | typedef struct AddrRange AddrRange; | |
53 | ||
54 | /* | |
55 | * Note that signed integers are needed for negative offsetting in aliases | |
56 | * (large MemoryRegion::alias_offset). | |
57 | */ | |
58 | struct AddrRange { | |
59 | Int128 start; | |
60 | Int128 size; | |
61 | }; | |
62 | ||
63 | static AddrRange addrrange_make(Int128 start, Int128 size) | |
64 | { | |
65 | return (AddrRange) { start, size }; | |
66 | } | |
67 | ||
68 | static bool addrrange_equal(AddrRange r1, AddrRange r2) | |
69 | { | |
70 | return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size); | |
71 | } | |
72 | ||
73 | static Int128 addrrange_end(AddrRange r) | |
74 | { | |
75 | return int128_add(r.start, r.size); | |
76 | } | |
77 | ||
78 | static AddrRange addrrange_shift(AddrRange range, Int128 delta) | |
79 | { | |
80 | int128_addto(&range.start, delta); | |
81 | return range; | |
82 | } | |
83 | ||
84 | static bool addrrange_contains(AddrRange range, Int128 addr) | |
85 | { | |
86 | return int128_ge(addr, range.start) | |
87 | && int128_lt(addr, addrrange_end(range)); | |
88 | } | |
89 | ||
90 | static bool addrrange_intersects(AddrRange r1, AddrRange r2) | |
91 | { | |
92 | return addrrange_contains(r1, r2.start) | |
93 | || addrrange_contains(r2, r1.start); | |
94 | } | |
95 | ||
96 | static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) | |
97 | { | |
98 | Int128 start = int128_max(r1.start, r2.start); | |
99 | Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2)); | |
100 | return addrrange_make(start, int128_sub(end, start)); | |
101 | } | |
102 | ||
103 | enum ListenerDirection { Forward, Reverse }; | |
104 | ||
105 | #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \ | |
106 | do { \ | |
107 | MemoryListener *_listener; \ | |
108 | \ | |
109 | switch (_direction) { \ | |
110 | case Forward: \ | |
111 | QTAILQ_FOREACH(_listener, &memory_listeners, link) { \ | |
112 | if (_listener->_callback) { \ | |
113 | _listener->_callback(_listener, ##_args); \ | |
114 | } \ | |
115 | } \ | |
116 | break; \ | |
117 | case Reverse: \ | |
118 | QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \ | |
119 | memory_listeners, link) { \ | |
120 | if (_listener->_callback) { \ | |
121 | _listener->_callback(_listener, ##_args); \ | |
122 | } \ | |
123 | } \ | |
124 | break; \ | |
125 | default: \ | |
126 | abort(); \ | |
127 | } \ | |
128 | } while (0) | |
129 | ||
130 | #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \ | |
131 | do { \ | |
132 | MemoryListener *_listener; \ | |
133 | struct memory_listeners_as *list = &(_as)->listeners; \ | |
134 | \ | |
135 | switch (_direction) { \ | |
136 | case Forward: \ | |
137 | QTAILQ_FOREACH(_listener, list, link_as) { \ | |
138 | if (_listener->_callback) { \ | |
139 | _listener->_callback(_listener, _section, ##_args); \ | |
140 | } \ | |
141 | } \ | |
142 | break; \ | |
143 | case Reverse: \ | |
144 | QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \ | |
145 | link_as) { \ | |
146 | if (_listener->_callback) { \ | |
147 | _listener->_callback(_listener, _section, ##_args); \ | |
148 | } \ | |
149 | } \ | |
150 | break; \ | |
151 | default: \ | |
152 | abort(); \ | |
153 | } \ | |
154 | } while (0) | |
155 | ||
156 | /* No need to ref/unref .mr, the FlatRange keeps it alive. */ | |
157 | #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \ | |
158 | do { \ | |
159 | MemoryRegionSection mrs = section_from_flat_range(fr, \ | |
160 | address_space_to_flatview(as)); \ | |
161 | MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \ | |
162 | } while(0) | |
163 | ||
164 | struct CoalescedMemoryRange { | |
165 | AddrRange addr; | |
166 | QTAILQ_ENTRY(CoalescedMemoryRange) link; | |
167 | }; | |
168 | ||
169 | struct MemoryRegionIoeventfd { | |
170 | AddrRange addr; | |
171 | bool match_data; | |
172 | uint64_t data; | |
173 | EventNotifier *e; | |
174 | }; | |
175 | ||
176 | static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a, | |
177 | MemoryRegionIoeventfd b) | |
178 | { | |
179 | if (int128_lt(a.addr.start, b.addr.start)) { | |
180 | return true; | |
181 | } else if (int128_gt(a.addr.start, b.addr.start)) { | |
182 | return false; | |
183 | } else if (int128_lt(a.addr.size, b.addr.size)) { | |
184 | return true; | |
185 | } else if (int128_gt(a.addr.size, b.addr.size)) { | |
186 | return false; | |
187 | } else if (a.match_data < b.match_data) { | |
188 | return true; | |
189 | } else if (a.match_data > b.match_data) { | |
190 | return false; | |
191 | } else if (a.match_data) { | |
192 | if (a.data < b.data) { | |
193 | return true; | |
194 | } else if (a.data > b.data) { | |
195 | return false; | |
196 | } | |
197 | } | |
198 | if (a.e < b.e) { | |
199 | return true; | |
200 | } else if (a.e > b.e) { | |
201 | return false; | |
202 | } | |
203 | return false; | |
204 | } | |
205 | ||
206 | static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a, | |
207 | MemoryRegionIoeventfd b) | |
208 | { | |
209 | return !memory_region_ioeventfd_before(a, b) | |
210 | && !memory_region_ioeventfd_before(b, a); | |
211 | } | |
212 | ||
213 | typedef struct FlatRange FlatRange; | |
214 | ||
215 | /* Range of memory in the global map. Addresses are absolute. */ | |
216 | struct FlatRange { | |
217 | MemoryRegion *mr; | |
218 | hwaddr offset_in_region; | |
219 | AddrRange addr; | |
220 | uint8_t dirty_log_mask; | |
221 | bool romd_mode; | |
222 | bool readonly; | |
223 | }; | |
224 | ||
225 | /* Flattened global view of current active memory hierarchy. Kept in sorted | |
226 | * order. | |
227 | */ | |
228 | struct FlatView { | |
229 | struct rcu_head rcu; | |
230 | unsigned ref; | |
231 | FlatRange *ranges; | |
232 | unsigned nr; | |
233 | unsigned nr_allocated; | |
234 | struct AddressSpaceDispatch *dispatch; | |
235 | MemoryRegion *root; | |
236 | }; | |
237 | ||
238 | typedef struct AddressSpaceOps AddressSpaceOps; | |
239 | ||
240 | #define FOR_EACH_FLAT_RANGE(var, view) \ | |
241 | for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) | |
242 | ||
243 | static inline MemoryRegionSection | |
244 | section_from_flat_range(FlatRange *fr, FlatView *fv) | |
245 | { | |
246 | return (MemoryRegionSection) { | |
247 | .mr = fr->mr, | |
248 | .fv = fv, | |
249 | .offset_within_region = fr->offset_in_region, | |
250 | .size = fr->addr.size, | |
251 | .offset_within_address_space = int128_get64(fr->addr.start), | |
252 | .readonly = fr->readonly, | |
253 | }; | |
254 | } | |
255 | ||
256 | static bool flatrange_equal(FlatRange *a, FlatRange *b) | |
257 | { | |
258 | return a->mr == b->mr | |
259 | && addrrange_equal(a->addr, b->addr) | |
260 | && a->offset_in_region == b->offset_in_region | |
261 | && a->romd_mode == b->romd_mode | |
262 | && a->readonly == b->readonly; | |
263 | } | |
264 | ||
265 | static FlatView *flatview_new(MemoryRegion *mr_root) | |
266 | { | |
267 | FlatView *view; | |
268 | ||
269 | view = g_new0(FlatView, 1); | |
270 | view->ref = 1; | |
271 | view->root = mr_root; | |
272 | memory_region_ref(mr_root); | |
273 | ||
274 | return view; | |
275 | } | |
276 | ||
277 | /* Insert a range into a given position. Caller is responsible for maintaining | |
278 | * sorting order. | |
279 | */ | |
280 | static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) | |
281 | { | |
282 | if (view->nr == view->nr_allocated) { | |
283 | view->nr_allocated = MAX(2 * view->nr, 10); | |
284 | view->ranges = g_realloc(view->ranges, | |
285 | view->nr_allocated * sizeof(*view->ranges)); | |
286 | } | |
287 | memmove(view->ranges + pos + 1, view->ranges + pos, | |
288 | (view->nr - pos) * sizeof(FlatRange)); | |
289 | view->ranges[pos] = *range; | |
290 | memory_region_ref(range->mr); | |
291 | ++view->nr; | |
292 | } | |
293 | ||
294 | static void flatview_destroy(FlatView *view) | |
295 | { | |
296 | int i; | |
297 | ||
298 | if (view->dispatch) { | |
299 | address_space_dispatch_free(view->dispatch); | |
300 | } | |
301 | for (i = 0; i < view->nr; i++) { | |
302 | memory_region_unref(view->ranges[i].mr); | |
303 | } | |
304 | g_free(view->ranges); | |
305 | memory_region_unref(view->root); | |
306 | g_free(view); | |
307 | } | |
308 | ||
309 | static bool flatview_ref(FlatView *view) | |
310 | { | |
311 | return atomic_fetch_inc_nonzero(&view->ref) > 0; | |
312 | } | |
313 | ||
314 | static void flatview_unref(FlatView *view) | |
315 | { | |
316 | if (atomic_fetch_dec(&view->ref) == 1) { | |
317 | call_rcu(view, flatview_destroy, rcu); | |
318 | } | |
319 | } | |
320 | ||
321 | FlatView *address_space_to_flatview(AddressSpace *as) | |
322 | { | |
323 | return atomic_rcu_read(&as->current_map); | |
324 | } | |
325 | ||
326 | AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv) | |
327 | { | |
328 | return fv->dispatch; | |
329 | } | |
330 | ||
331 | AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as) | |
332 | { | |
333 | return flatview_to_dispatch(address_space_to_flatview(as)); | |
334 | } | |
335 | ||
336 | static bool can_merge(FlatRange *r1, FlatRange *r2) | |
337 | { | |
338 | return int128_eq(addrrange_end(r1->addr), r2->addr.start) | |
339 | && r1->mr == r2->mr | |
340 | && int128_eq(int128_add(int128_make64(r1->offset_in_region), | |
341 | r1->addr.size), | |
342 | int128_make64(r2->offset_in_region)) | |
343 | && r1->dirty_log_mask == r2->dirty_log_mask | |
344 | && r1->romd_mode == r2->romd_mode | |
345 | && r1->readonly == r2->readonly; | |
346 | } | |
347 | ||
348 | /* Attempt to simplify a view by merging adjacent ranges */ | |
349 | static void flatview_simplify(FlatView *view) | |
350 | { | |
351 | unsigned i, j; | |
352 | ||
353 | i = 0; | |
354 | while (i < view->nr) { | |
355 | j = i + 1; | |
356 | while (j < view->nr | |
357 | && can_merge(&view->ranges[j-1], &view->ranges[j])) { | |
358 | int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size); | |
359 | ++j; | |
360 | } | |
361 | ++i; | |
362 | memmove(&view->ranges[i], &view->ranges[j], | |
363 | (view->nr - j) * sizeof(view->ranges[j])); | |
364 | view->nr -= j - i; | |
365 | } | |
366 | } | |
367 | ||
368 | static bool memory_region_big_endian(MemoryRegion *mr) | |
369 | { | |
370 | #ifdef TARGET_WORDS_BIGENDIAN | |
371 | return mr->ops->endianness != DEVICE_LITTLE_ENDIAN; | |
372 | #else | |
373 | return mr->ops->endianness == DEVICE_BIG_ENDIAN; | |
374 | #endif | |
375 | } | |
376 | ||
377 | static bool memory_region_wrong_endianness(MemoryRegion *mr) | |
378 | { | |
379 | #ifdef TARGET_WORDS_BIGENDIAN | |
380 | return mr->ops->endianness == DEVICE_LITTLE_ENDIAN; | |
381 | #else | |
382 | return mr->ops->endianness == DEVICE_BIG_ENDIAN; | |
383 | #endif | |
384 | } | |
385 | ||
386 | static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size) | |
387 | { | |
388 | if (memory_region_wrong_endianness(mr)) { | |
389 | switch (size) { | |
390 | case 1: | |
391 | break; | |
392 | case 2: | |
393 | *data = bswap16(*data); | |
394 | break; | |
395 | case 4: | |
396 | *data = bswap32(*data); | |
397 | break; | |
398 | case 8: | |
399 | *data = bswap64(*data); | |
400 | break; | |
401 | default: | |
402 | abort(); | |
403 | } | |
404 | } | |
405 | } | |
406 | ||
407 | static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset) | |
408 | { | |
409 | MemoryRegion *root; | |
410 | hwaddr abs_addr = offset; | |
411 | ||
412 | abs_addr += mr->addr; | |
413 | for (root = mr; root->container; ) { | |
414 | root = root->container; | |
415 | abs_addr += root->addr; | |
416 | } | |
417 | ||
418 | return abs_addr; | |
419 | } | |
420 | ||
421 | static int get_cpu_index(void) | |
422 | { | |
423 | if (current_cpu) { | |
424 | return current_cpu->cpu_index; | |
425 | } | |
426 | return -1; | |
427 | } | |
428 | ||
429 | static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr, | |
430 | hwaddr addr, | |
431 | uint64_t *value, | |
432 | unsigned size, | |
433 | unsigned shift, | |
434 | uint64_t mask, | |
435 | MemTxAttrs attrs) | |
436 | { | |
437 | uint64_t tmp; | |
438 | ||
439 | tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); | |
440 | if (mr->subpage) { | |
441 | trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); | |
442 | } else if (mr == &io_mem_notdirty) { | |
443 | /* Accesses to code which has previously been translated into a TB show | |
444 | * up in the MMIO path, as accesses to the io_mem_notdirty | |
445 | * MemoryRegion. */ | |
446 | trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size); | |
447 | } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) { | |
448 | hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); | |
449 | trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); | |
450 | } | |
451 | *value |= (tmp & mask) << shift; | |
452 | return MEMTX_OK; | |
453 | } | |
454 | ||
455 | static MemTxResult memory_region_read_accessor(MemoryRegion *mr, | |
456 | hwaddr addr, | |
457 | uint64_t *value, | |
458 | unsigned size, | |
459 | unsigned shift, | |
460 | uint64_t mask, | |
461 | MemTxAttrs attrs) | |
462 | { | |
463 | uint64_t tmp; | |
464 | ||
465 | tmp = mr->ops->read(mr->opaque, addr, size); | |
466 | if (mr->subpage) { | |
467 | trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); | |
468 | } else if (mr == &io_mem_notdirty) { | |
469 | /* Accesses to code which has previously been translated into a TB show | |
470 | * up in the MMIO path, as accesses to the io_mem_notdirty | |
471 | * MemoryRegion. */ | |
472 | trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size); | |
473 | } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) { | |
474 | hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); | |
475 | trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); | |
476 | } | |
477 | *value |= (tmp & mask) << shift; | |
478 | return MEMTX_OK; | |
479 | } | |
480 | ||
481 | static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr, | |
482 | hwaddr addr, | |
483 | uint64_t *value, | |
484 | unsigned size, | |
485 | unsigned shift, | |
486 | uint64_t mask, | |
487 | MemTxAttrs attrs) | |
488 | { | |
489 | uint64_t tmp = 0; | |
490 | MemTxResult r; | |
491 | ||
492 | r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs); | |
493 | if (mr->subpage) { | |
494 | trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); | |
495 | } else if (mr == &io_mem_notdirty) { | |
496 | /* Accesses to code which has previously been translated into a TB show | |
497 | * up in the MMIO path, as accesses to the io_mem_notdirty | |
498 | * MemoryRegion. */ | |
499 | trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size); | |
500 | } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) { | |
501 | hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); | |
502 | trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); | |
503 | } | |
504 | *value |= (tmp & mask) << shift; | |
505 | return r; | |
506 | } | |
507 | ||
508 | static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr, | |
509 | hwaddr addr, | |
510 | uint64_t *value, | |
511 | unsigned size, | |
512 | unsigned shift, | |
513 | uint64_t mask, | |
514 | MemTxAttrs attrs) | |
515 | { | |
516 | uint64_t tmp; | |
517 | ||
518 | tmp = (*value >> shift) & mask; | |
519 | if (mr->subpage) { | |
520 | trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); | |
521 | } else if (mr == &io_mem_notdirty) { | |
522 | /* Accesses to code which has previously been translated into a TB show | |
523 | * up in the MMIO path, as accesses to the io_mem_notdirty | |
524 | * MemoryRegion. */ | |
525 | trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size); | |
526 | } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) { | |
527 | hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); | |
528 | trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); | |
529 | } | |
530 | mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp); | |
531 | return MEMTX_OK; | |
532 | } | |
533 | ||
534 | static MemTxResult memory_region_write_accessor(MemoryRegion *mr, | |
535 | hwaddr addr, | |
536 | uint64_t *value, | |
537 | unsigned size, | |
538 | unsigned shift, | |
539 | uint64_t mask, | |
540 | MemTxAttrs attrs) | |
541 | { | |
542 | uint64_t tmp; | |
543 | ||
544 | tmp = (*value >> shift) & mask; | |
545 | if (mr->subpage) { | |
546 | trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); | |
547 | } else if (mr == &io_mem_notdirty) { | |
548 | /* Accesses to code which has previously been translated into a TB show | |
549 | * up in the MMIO path, as accesses to the io_mem_notdirty | |
550 | * MemoryRegion. */ | |
551 | trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size); | |
552 | } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) { | |
553 | hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); | |
554 | trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); | |
555 | } | |
556 | mr->ops->write(mr->opaque, addr, tmp, size); | |
557 | return MEMTX_OK; | |
558 | } | |
559 | ||
560 | static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr, | |
561 | hwaddr addr, | |
562 | uint64_t *value, | |
563 | unsigned size, | |
564 | unsigned shift, | |
565 | uint64_t mask, | |
566 | MemTxAttrs attrs) | |
567 | { | |
568 | uint64_t tmp; | |
569 | ||
570 | tmp = (*value >> shift) & mask; | |
571 | if (mr->subpage) { | |
572 | trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); | |
573 | } else if (mr == &io_mem_notdirty) { | |
574 | /* Accesses to code which has previously been translated into a TB show | |
575 | * up in the MMIO path, as accesses to the io_mem_notdirty | |
576 | * MemoryRegion. */ | |
577 | trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size); | |
578 | } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) { | |
579 | hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); | |
580 | trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); | |
581 | } | |
582 | return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs); | |
583 | } | |
584 | ||
585 | static MemTxResult access_with_adjusted_size(hwaddr addr, | |
586 | uint64_t *value, | |
587 | unsigned size, | |
588 | unsigned access_size_min, | |
589 | unsigned access_size_max, | |
590 | MemTxResult (*access_fn) | |
591 | (MemoryRegion *mr, | |
592 | hwaddr addr, | |
593 | uint64_t *value, | |
594 | unsigned size, | |
595 | unsigned shift, | |
596 | uint64_t mask, | |
597 | MemTxAttrs attrs), | |
598 | MemoryRegion *mr, | |
599 | MemTxAttrs attrs) | |
600 | { | |
601 | uint64_t access_mask; | |
602 | unsigned access_size; | |
603 | unsigned i; | |
604 | MemTxResult r = MEMTX_OK; | |
605 | ||
606 | if (!access_size_min) { | |
607 | access_size_min = 1; | |
608 | } | |
609 | if (!access_size_max) { | |
610 | access_size_max = 4; | |
611 | } | |
612 | ||
613 | /* FIXME: support unaligned access? */ | |
614 | access_size = MAX(MIN(size, access_size_max), access_size_min); | |
615 | access_mask = -1ULL >> (64 - access_size * 8); | |
616 | if (memory_region_big_endian(mr)) { | |
617 | for (i = 0; i < size; i += access_size) { | |
618 | r |= access_fn(mr, addr + i, value, access_size, | |
619 | (size - access_size - i) * 8, access_mask, attrs); | |
620 | } | |
621 | } else { | |
622 | for (i = 0; i < size; i += access_size) { | |
623 | r |= access_fn(mr, addr + i, value, access_size, i * 8, | |
624 | access_mask, attrs); | |
625 | } | |
626 | } | |
627 | return r; | |
628 | } | |
629 | ||
630 | static AddressSpace *memory_region_to_address_space(MemoryRegion *mr) | |
631 | { | |
632 | AddressSpace *as; | |
633 | ||
634 | while (mr->container) { | |
635 | mr = mr->container; | |
636 | } | |
637 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
638 | if (mr == as->root) { | |
639 | return as; | |
640 | } | |
641 | } | |
642 | return NULL; | |
643 | } | |
644 | ||
645 | /* Render a memory region into the global view. Ranges in @view obscure | |
646 | * ranges in @mr. | |
647 | */ | |
648 | static void render_memory_region(FlatView *view, | |
649 | MemoryRegion *mr, | |
650 | Int128 base, | |
651 | AddrRange clip, | |
652 | bool readonly) | |
653 | { | |
654 | MemoryRegion *subregion; | |
655 | unsigned i; | |
656 | hwaddr offset_in_region; | |
657 | Int128 remain; | |
658 | Int128 now; | |
659 | FlatRange fr; | |
660 | AddrRange tmp; | |
661 | ||
662 | if (!mr->enabled) { | |
663 | return; | |
664 | } | |
665 | ||
666 | int128_addto(&base, int128_make64(mr->addr)); | |
667 | readonly |= mr->readonly; | |
668 | ||
669 | tmp = addrrange_make(base, mr->size); | |
670 | ||
671 | if (!addrrange_intersects(tmp, clip)) { | |
672 | return; | |
673 | } | |
674 | ||
675 | clip = addrrange_intersection(tmp, clip); | |
676 | ||
677 | if (mr->alias) { | |
678 | int128_subfrom(&base, int128_make64(mr->alias->addr)); | |
679 | int128_subfrom(&base, int128_make64(mr->alias_offset)); | |
680 | render_memory_region(view, mr->alias, base, clip, readonly); | |
681 | return; | |
682 | } | |
683 | ||
684 | /* Render subregions in priority order. */ | |
685 | QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { | |
686 | render_memory_region(view, subregion, base, clip, readonly); | |
687 | } | |
688 | ||
689 | if (!mr->terminates) { | |
690 | return; | |
691 | } | |
692 | ||
693 | offset_in_region = int128_get64(int128_sub(clip.start, base)); | |
694 | base = clip.start; | |
695 | remain = clip.size; | |
696 | ||
697 | fr.mr = mr; | |
698 | fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr); | |
699 | fr.romd_mode = mr->romd_mode; | |
700 | fr.readonly = readonly; | |
701 | ||
702 | /* Render the region itself into any gaps left by the current view. */ | |
703 | for (i = 0; i < view->nr && int128_nz(remain); ++i) { | |
704 | if (int128_ge(base, addrrange_end(view->ranges[i].addr))) { | |
705 | continue; | |
706 | } | |
707 | if (int128_lt(base, view->ranges[i].addr.start)) { | |
708 | now = int128_min(remain, | |
709 | int128_sub(view->ranges[i].addr.start, base)); | |
710 | fr.offset_in_region = offset_in_region; | |
711 | fr.addr = addrrange_make(base, now); | |
712 | flatview_insert(view, i, &fr); | |
713 | ++i; | |
714 | int128_addto(&base, now); | |
715 | offset_in_region += int128_get64(now); | |
716 | int128_subfrom(&remain, now); | |
717 | } | |
718 | now = int128_sub(int128_min(int128_add(base, remain), | |
719 | addrrange_end(view->ranges[i].addr)), | |
720 | base); | |
721 | int128_addto(&base, now); | |
722 | offset_in_region += int128_get64(now); | |
723 | int128_subfrom(&remain, now); | |
724 | } | |
725 | if (int128_nz(remain)) { | |
726 | fr.offset_in_region = offset_in_region; | |
727 | fr.addr = addrrange_make(base, remain); | |
728 | flatview_insert(view, i, &fr); | |
729 | } | |
730 | } | |
731 | ||
732 | static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr) | |
733 | { | |
734 | while (mr->alias && !mr->alias_offset && | |
735 | int128_ge(mr->size, mr->alias->size)) { | |
736 | /* The alias is included in its entirety. Use it as | |
737 | * the "real" root, so that we can share more FlatViews. | |
738 | */ | |
739 | mr = mr->alias; | |
740 | } | |
741 | ||
742 | return mr; | |
743 | } | |
744 | ||
745 | /* Render a memory topology into a list of disjoint absolute ranges. */ | |
746 | static FlatView *generate_memory_topology(MemoryRegion *mr) | |
747 | { | |
748 | int i; | |
749 | FlatView *view; | |
750 | ||
751 | view = flatview_new(mr); | |
752 | ||
753 | if (mr) { | |
754 | render_memory_region(view, mr, int128_zero(), | |
755 | addrrange_make(int128_zero(), int128_2_64()), false); | |
756 | } | |
757 | flatview_simplify(view); | |
758 | ||
759 | view->dispatch = address_space_dispatch_new(view); | |
760 | for (i = 0; i < view->nr; i++) { | |
761 | MemoryRegionSection mrs = | |
762 | section_from_flat_range(&view->ranges[i], view); | |
763 | flatview_add_to_dispatch(view, &mrs); | |
764 | } | |
765 | address_space_dispatch_compact(view->dispatch); | |
766 | g_hash_table_replace(flat_views, mr, view); | |
767 | ||
768 | return view; | |
769 | } | |
770 | ||
771 | static void address_space_add_del_ioeventfds(AddressSpace *as, | |
772 | MemoryRegionIoeventfd *fds_new, | |
773 | unsigned fds_new_nb, | |
774 | MemoryRegionIoeventfd *fds_old, | |
775 | unsigned fds_old_nb) | |
776 | { | |
777 | unsigned iold, inew; | |
778 | MemoryRegionIoeventfd *fd; | |
779 | MemoryRegionSection section; | |
780 | ||
781 | /* Generate a symmetric difference of the old and new fd sets, adding | |
782 | * and deleting as necessary. | |
783 | */ | |
784 | ||
785 | iold = inew = 0; | |
786 | while (iold < fds_old_nb || inew < fds_new_nb) { | |
787 | if (iold < fds_old_nb | |
788 | && (inew == fds_new_nb | |
789 | || memory_region_ioeventfd_before(fds_old[iold], | |
790 | fds_new[inew]))) { | |
791 | fd = &fds_old[iold]; | |
792 | section = (MemoryRegionSection) { | |
793 | .fv = address_space_to_flatview(as), | |
794 | .offset_within_address_space = int128_get64(fd->addr.start), | |
795 | .size = fd->addr.size, | |
796 | }; | |
797 | MEMORY_LISTENER_CALL(as, eventfd_del, Forward, §ion, | |
798 | fd->match_data, fd->data, fd->e); | |
799 | ++iold; | |
800 | } else if (inew < fds_new_nb | |
801 | && (iold == fds_old_nb | |
802 | || memory_region_ioeventfd_before(fds_new[inew], | |
803 | fds_old[iold]))) { | |
804 | fd = &fds_new[inew]; | |
805 | section = (MemoryRegionSection) { | |
806 | .fv = address_space_to_flatview(as), | |
807 | .offset_within_address_space = int128_get64(fd->addr.start), | |
808 | .size = fd->addr.size, | |
809 | }; | |
810 | MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, §ion, | |
811 | fd->match_data, fd->data, fd->e); | |
812 | ++inew; | |
813 | } else { | |
814 | ++iold; | |
815 | ++inew; | |
816 | } | |
817 | } | |
818 | } | |
819 | ||
820 | static FlatView *address_space_get_flatview(AddressSpace *as) | |
821 | { | |
822 | FlatView *view; | |
823 | ||
824 | rcu_read_lock(); | |
825 | do { | |
826 | view = address_space_to_flatview(as); | |
827 | /* If somebody has replaced as->current_map concurrently, | |
828 | * flatview_ref returns false. | |
829 | */ | |
830 | } while (!flatview_ref(view)); | |
831 | rcu_read_unlock(); | |
832 | return view; | |
833 | } | |
834 | ||
835 | static void address_space_update_ioeventfds(AddressSpace *as) | |
836 | { | |
837 | FlatView *view; | |
838 | FlatRange *fr; | |
839 | unsigned ioeventfd_nb = 0; | |
840 | MemoryRegionIoeventfd *ioeventfds = NULL; | |
841 | AddrRange tmp; | |
842 | unsigned i; | |
843 | ||
844 | view = address_space_get_flatview(as); | |
845 | FOR_EACH_FLAT_RANGE(fr, view) { | |
846 | for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { | |
847 | tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, | |
848 | int128_sub(fr->addr.start, | |
849 | int128_make64(fr->offset_in_region))); | |
850 | if (addrrange_intersects(fr->addr, tmp)) { | |
851 | ++ioeventfd_nb; | |
852 | ioeventfds = g_realloc(ioeventfds, | |
853 | ioeventfd_nb * sizeof(*ioeventfds)); | |
854 | ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i]; | |
855 | ioeventfds[ioeventfd_nb-1].addr = tmp; | |
856 | } | |
857 | } | |
858 | } | |
859 | ||
860 | address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb, | |
861 | as->ioeventfds, as->ioeventfd_nb); | |
862 | ||
863 | g_free(as->ioeventfds); | |
864 | as->ioeventfds = ioeventfds; | |
865 | as->ioeventfd_nb = ioeventfd_nb; | |
866 | flatview_unref(view); | |
867 | } | |
868 | ||
869 | static void address_space_update_topology_pass(AddressSpace *as, | |
870 | const FlatView *old_view, | |
871 | const FlatView *new_view, | |
872 | bool adding) | |
873 | { | |
874 | unsigned iold, inew; | |
875 | FlatRange *frold, *frnew; | |
876 | ||
877 | /* Generate a symmetric difference of the old and new memory maps. | |
878 | * Kill ranges in the old map, and instantiate ranges in the new map. | |
879 | */ | |
880 | iold = inew = 0; | |
881 | while (iold < old_view->nr || inew < new_view->nr) { | |
882 | if (iold < old_view->nr) { | |
883 | frold = &old_view->ranges[iold]; | |
884 | } else { | |
885 | frold = NULL; | |
886 | } | |
887 | if (inew < new_view->nr) { | |
888 | frnew = &new_view->ranges[inew]; | |
889 | } else { | |
890 | frnew = NULL; | |
891 | } | |
892 | ||
893 | if (frold | |
894 | && (!frnew | |
895 | || int128_lt(frold->addr.start, frnew->addr.start) | |
896 | || (int128_eq(frold->addr.start, frnew->addr.start) | |
897 | && !flatrange_equal(frold, frnew)))) { | |
898 | /* In old but not in new, or in both but attributes changed. */ | |
899 | ||
900 | if (!adding) { | |
901 | MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del); | |
902 | } | |
903 | ||
904 | ++iold; | |
905 | } else if (frold && frnew && flatrange_equal(frold, frnew)) { | |
906 | /* In both and unchanged (except logging may have changed) */ | |
907 | ||
908 | if (adding) { | |
909 | MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop); | |
910 | if (frnew->dirty_log_mask & ~frold->dirty_log_mask) { | |
911 | MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start, | |
912 | frold->dirty_log_mask, | |
913 | frnew->dirty_log_mask); | |
914 | } | |
915 | if (frold->dirty_log_mask & ~frnew->dirty_log_mask) { | |
916 | MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop, | |
917 | frold->dirty_log_mask, | |
918 | frnew->dirty_log_mask); | |
919 | } | |
920 | } | |
921 | ||
922 | ++iold; | |
923 | ++inew; | |
924 | } else { | |
925 | /* In new */ | |
926 | ||
927 | if (adding) { | |
928 | MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add); | |
929 | } | |
930 | ||
931 | ++inew; | |
932 | } | |
933 | } | |
934 | } | |
935 | ||
936 | static void flatviews_init(void) | |
937 | { | |
938 | if (flat_views) { | |
939 | return; | |
940 | } | |
941 | ||
942 | flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, | |
943 | (GDestroyNotify) flatview_unref); | |
944 | } | |
945 | ||
946 | static void flatviews_reset(void) | |
947 | { | |
948 | AddressSpace *as; | |
949 | ||
950 | if (flat_views) { | |
951 | g_hash_table_unref(flat_views); | |
952 | flat_views = NULL; | |
953 | } | |
954 | flatviews_init(); | |
955 | ||
956 | /* Render unique FVs */ | |
957 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
958 | MemoryRegion *physmr = memory_region_get_flatview_root(as->root); | |
959 | ||
960 | if (g_hash_table_lookup(flat_views, physmr)) { | |
961 | continue; | |
962 | } | |
963 | ||
964 | generate_memory_topology(physmr); | |
965 | } | |
966 | } | |
967 | ||
968 | static void address_space_set_flatview(AddressSpace *as) | |
969 | { | |
970 | FlatView *old_view = address_space_to_flatview(as); | |
971 | MemoryRegion *physmr = memory_region_get_flatview_root(as->root); | |
972 | FlatView *new_view = g_hash_table_lookup(flat_views, physmr); | |
973 | ||
974 | assert(new_view); | |
975 | ||
976 | if (old_view == new_view) { | |
977 | return; | |
978 | } | |
979 | ||
980 | if (old_view) { | |
981 | flatview_ref(old_view); | |
982 | } | |
983 | ||
984 | flatview_ref(new_view); | |
985 | ||
986 | if (!QTAILQ_EMPTY(&as->listeners)) { | |
987 | FlatView tmpview = { .nr = 0 }, *old_view2 = old_view; | |
988 | ||
989 | if (!old_view2) { | |
990 | old_view2 = &tmpview; | |
991 | } | |
992 | address_space_update_topology_pass(as, old_view2, new_view, false); | |
993 | address_space_update_topology_pass(as, old_view2, new_view, true); | |
994 | } | |
995 | ||
996 | /* Writes are protected by the BQL. */ | |
997 | atomic_rcu_set(&as->current_map, new_view); | |
998 | if (old_view) { | |
999 | flatview_unref(old_view); | |
1000 | } | |
1001 | ||
1002 | /* Note that all the old MemoryRegions are still alive up to this | |
1003 | * point. This relieves most MemoryListeners from the need to | |
1004 | * ref/unref the MemoryRegions they get---unless they use them | |
1005 | * outside the iothread mutex, in which case precise reference | |
1006 | * counting is necessary. | |
1007 | */ | |
1008 | if (old_view) { | |
1009 | flatview_unref(old_view); | |
1010 | } | |
1011 | } | |
1012 | ||
1013 | void memory_region_transaction_begin(void) | |
1014 | { | |
1015 | qemu_flush_coalesced_mmio_buffer(); | |
1016 | ++memory_region_transaction_depth; | |
1017 | } | |
1018 | ||
1019 | void memory_region_transaction_commit(void) | |
1020 | { | |
1021 | AddressSpace *as; | |
1022 | ||
1023 | assert(memory_region_transaction_depth); | |
1024 | assert(qemu_mutex_iothread_locked()); | |
1025 | ||
1026 | --memory_region_transaction_depth; | |
1027 | if (!memory_region_transaction_depth) { | |
1028 | if (memory_region_update_pending) { | |
1029 | flatviews_reset(); | |
1030 | ||
1031 | MEMORY_LISTENER_CALL_GLOBAL(begin, Forward); | |
1032 | ||
1033 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
1034 | address_space_set_flatview(as); | |
1035 | address_space_update_ioeventfds(as); | |
1036 | } | |
1037 | memory_region_update_pending = false; | |
1038 | MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); | |
1039 | } else if (ioeventfd_update_pending) { | |
1040 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
1041 | address_space_update_ioeventfds(as); | |
1042 | } | |
1043 | ioeventfd_update_pending = false; | |
1044 | } | |
1045 | } | |
1046 | } | |
1047 | ||
1048 | static void memory_region_destructor_none(MemoryRegion *mr) | |
1049 | { | |
1050 | } | |
1051 | ||
1052 | static void memory_region_destructor_ram(MemoryRegion *mr) | |
1053 | { | |
1054 | qemu_ram_free(mr->ram_block); | |
1055 | } | |
1056 | ||
1057 | static bool memory_region_need_escape(char c) | |
1058 | { | |
1059 | return c == '/' || c == '[' || c == '\\' || c == ']'; | |
1060 | } | |
1061 | ||
1062 | static char *memory_region_escape_name(const char *name) | |
1063 | { | |
1064 | const char *p; | |
1065 | char *escaped, *q; | |
1066 | uint8_t c; | |
1067 | size_t bytes = 0; | |
1068 | ||
1069 | for (p = name; *p; p++) { | |
1070 | bytes += memory_region_need_escape(*p) ? 4 : 1; | |
1071 | } | |
1072 | if (bytes == p - name) { | |
1073 | return g_memdup(name, bytes + 1); | |
1074 | } | |
1075 | ||
1076 | escaped = g_malloc(bytes + 1); | |
1077 | for (p = name, q = escaped; *p; p++) { | |
1078 | c = *p; | |
1079 | if (unlikely(memory_region_need_escape(c))) { | |
1080 | *q++ = '\\'; | |
1081 | *q++ = 'x'; | |
1082 | *q++ = "0123456789abcdef"[c >> 4]; | |
1083 | c = "0123456789abcdef"[c & 15]; | |
1084 | } | |
1085 | *q++ = c; | |
1086 | } | |
1087 | *q = 0; | |
1088 | return escaped; | |
1089 | } | |
1090 | ||
1091 | static void memory_region_do_init(MemoryRegion *mr, | |
1092 | Object *owner, | |
1093 | const char *name, | |
1094 | uint64_t size) | |
1095 | { | |
1096 | mr->size = int128_make64(size); | |
1097 | if (size == UINT64_MAX) { | |
1098 | mr->size = int128_2_64(); | |
1099 | } | |
1100 | mr->name = g_strdup(name); | |
1101 | mr->owner = owner; | |
1102 | mr->ram_block = NULL; | |
1103 | ||
1104 | if (name) { | |
1105 | char *escaped_name = memory_region_escape_name(name); | |
1106 | char *name_array = g_strdup_printf("%s[*]", escaped_name); | |
1107 | ||
1108 | if (!owner) { | |
1109 | owner = container_get(qdev_get_machine(), "/unattached"); | |
1110 | } | |
1111 | ||
1112 | object_property_add_child(owner, name_array, OBJECT(mr), &error_abort); | |
1113 | object_unref(OBJECT(mr)); | |
1114 | g_free(name_array); | |
1115 | g_free(escaped_name); | |
1116 | } | |
1117 | } | |
1118 | ||
1119 | void memory_region_init(MemoryRegion *mr, | |
1120 | Object *owner, | |
1121 | const char *name, | |
1122 | uint64_t size) | |
1123 | { | |
1124 | object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION); | |
1125 | memory_region_do_init(mr, owner, name, size); | |
1126 | } | |
1127 | ||
1128 | static void memory_region_get_addr(Object *obj, Visitor *v, const char *name, | |
1129 | void *opaque, Error **errp) | |
1130 | { | |
1131 | MemoryRegion *mr = MEMORY_REGION(obj); | |
1132 | uint64_t value = mr->addr; | |
1133 | ||
1134 | visit_type_uint64(v, name, &value, errp); | |
1135 | } | |
1136 | ||
1137 | static void memory_region_get_container(Object *obj, Visitor *v, | |
1138 | const char *name, void *opaque, | |
1139 | Error **errp) | |
1140 | { | |
1141 | MemoryRegion *mr = MEMORY_REGION(obj); | |
1142 | gchar *path = (gchar *)""; | |
1143 | ||
1144 | if (mr->container) { | |
1145 | path = object_get_canonical_path(OBJECT(mr->container)); | |
1146 | } | |
1147 | visit_type_str(v, name, &path, errp); | |
1148 | if (mr->container) { | |
1149 | g_free(path); | |
1150 | } | |
1151 | } | |
1152 | ||
1153 | static Object *memory_region_resolve_container(Object *obj, void *opaque, | |
1154 | const char *part) | |
1155 | { | |
1156 | MemoryRegion *mr = MEMORY_REGION(obj); | |
1157 | ||
1158 | return OBJECT(mr->container); | |
1159 | } | |
1160 | ||
1161 | static void memory_region_get_priority(Object *obj, Visitor *v, | |
1162 | const char *name, void *opaque, | |
1163 | Error **errp) | |
1164 | { | |
1165 | MemoryRegion *mr = MEMORY_REGION(obj); | |
1166 | int32_t value = mr->priority; | |
1167 | ||
1168 | visit_type_int32(v, name, &value, errp); | |
1169 | } | |
1170 | ||
1171 | static void memory_region_get_size(Object *obj, Visitor *v, const char *name, | |
1172 | void *opaque, Error **errp) | |
1173 | { | |
1174 | MemoryRegion *mr = MEMORY_REGION(obj); | |
1175 | uint64_t value = memory_region_size(mr); | |
1176 | ||
1177 | visit_type_uint64(v, name, &value, errp); | |
1178 | } | |
1179 | ||
1180 | static void memory_region_initfn(Object *obj) | |
1181 | { | |
1182 | MemoryRegion *mr = MEMORY_REGION(obj); | |
1183 | ObjectProperty *op; | |
1184 | ||
1185 | mr->ops = &unassigned_mem_ops; | |
1186 | mr->enabled = true; | |
1187 | mr->romd_mode = true; | |
1188 | mr->global_locking = true; | |
1189 | mr->destructor = memory_region_destructor_none; | |
1190 | QTAILQ_INIT(&mr->subregions); | |
1191 | QTAILQ_INIT(&mr->coalesced); | |
1192 | ||
1193 | op = object_property_add(OBJECT(mr), "container", | |
1194 | "link<" TYPE_MEMORY_REGION ">", | |
1195 | memory_region_get_container, | |
1196 | NULL, /* memory_region_set_container */ | |
1197 | NULL, NULL, &error_abort); | |
1198 | op->resolve = memory_region_resolve_container; | |
1199 | ||
1200 | object_property_add(OBJECT(mr), "addr", "uint64", | |
1201 | memory_region_get_addr, | |
1202 | NULL, /* memory_region_set_addr */ | |
1203 | NULL, NULL, &error_abort); | |
1204 | object_property_add(OBJECT(mr), "priority", "uint32", | |
1205 | memory_region_get_priority, | |
1206 | NULL, /* memory_region_set_priority */ | |
1207 | NULL, NULL, &error_abort); | |
1208 | object_property_add(OBJECT(mr), "size", "uint64", | |
1209 | memory_region_get_size, | |
1210 | NULL, /* memory_region_set_size, */ | |
1211 | NULL, NULL, &error_abort); | |
1212 | } | |
1213 | ||
1214 | static void iommu_memory_region_initfn(Object *obj) | |
1215 | { | |
1216 | MemoryRegion *mr = MEMORY_REGION(obj); | |
1217 | ||
1218 | mr->is_iommu = true; | |
1219 | } | |
1220 | ||
1221 | static uint64_t unassigned_mem_read(void *opaque, hwaddr addr, | |
1222 | unsigned size) | |
1223 | { | |
1224 | #ifdef DEBUG_UNASSIGNED | |
1225 | printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); | |
1226 | #endif | |
1227 | if (current_cpu != NULL) { | |
1228 | cpu_unassigned_access(current_cpu, addr, false, false, 0, size); | |
1229 | } | |
1230 | return 0; | |
1231 | } | |
1232 | ||
1233 | static void unassigned_mem_write(void *opaque, hwaddr addr, | |
1234 | uint64_t val, unsigned size) | |
1235 | { | |
1236 | #ifdef DEBUG_UNASSIGNED | |
1237 | printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val); | |
1238 | #endif | |
1239 | if (current_cpu != NULL) { | |
1240 | cpu_unassigned_access(current_cpu, addr, true, false, 0, size); | |
1241 | } | |
1242 | } | |
1243 | ||
1244 | static bool unassigned_mem_accepts(void *opaque, hwaddr addr, | |
1245 | unsigned size, bool is_write) | |
1246 | { | |
1247 | return false; | |
1248 | } | |
1249 | ||
1250 | const MemoryRegionOps unassigned_mem_ops = { | |
1251 | .valid.accepts = unassigned_mem_accepts, | |
1252 | .endianness = DEVICE_NATIVE_ENDIAN, | |
1253 | }; | |
1254 | ||
1255 | static uint64_t memory_region_ram_device_read(void *opaque, | |
1256 | hwaddr addr, unsigned size) | |
1257 | { | |
1258 | MemoryRegion *mr = opaque; | |
1259 | uint64_t data = (uint64_t)~0; | |
1260 | ||
1261 | switch (size) { | |
1262 | case 1: | |
1263 | data = *(uint8_t *)(mr->ram_block->host + addr); | |
1264 | break; | |
1265 | case 2: | |
1266 | data = *(uint16_t *)(mr->ram_block->host + addr); | |
1267 | break; | |
1268 | case 4: | |
1269 | data = *(uint32_t *)(mr->ram_block->host + addr); | |
1270 | break; | |
1271 | case 8: | |
1272 | data = *(uint64_t *)(mr->ram_block->host + addr); | |
1273 | break; | |
1274 | } | |
1275 | ||
1276 | trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size); | |
1277 | ||
1278 | return data; | |
1279 | } | |
1280 | ||
1281 | static void memory_region_ram_device_write(void *opaque, hwaddr addr, | |
1282 | uint64_t data, unsigned size) | |
1283 | { | |
1284 | MemoryRegion *mr = opaque; | |
1285 | ||
1286 | trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size); | |
1287 | ||
1288 | switch (size) { | |
1289 | case 1: | |
1290 | *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data; | |
1291 | break; | |
1292 | case 2: | |
1293 | *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data; | |
1294 | break; | |
1295 | case 4: | |
1296 | *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data; | |
1297 | break; | |
1298 | case 8: | |
1299 | *(uint64_t *)(mr->ram_block->host + addr) = data; | |
1300 | break; | |
1301 | } | |
1302 | } | |
1303 | ||
1304 | static const MemoryRegionOps ram_device_mem_ops = { | |
1305 | .read = memory_region_ram_device_read, | |
1306 | .write = memory_region_ram_device_write, | |
1307 | .endianness = DEVICE_HOST_ENDIAN, | |
1308 | .valid = { | |
1309 | .min_access_size = 1, | |
1310 | .max_access_size = 8, | |
1311 | .unaligned = true, | |
1312 | }, | |
1313 | .impl = { | |
1314 | .min_access_size = 1, | |
1315 | .max_access_size = 8, | |
1316 | .unaligned = true, | |
1317 | }, | |
1318 | }; | |
1319 | ||
1320 | bool memory_region_access_valid(MemoryRegion *mr, | |
1321 | hwaddr addr, | |
1322 | unsigned size, | |
1323 | bool is_write) | |
1324 | { | |
1325 | int access_size_min, access_size_max; | |
1326 | int access_size, i; | |
1327 | ||
1328 | if (!mr->ops->valid.unaligned && (addr & (size - 1))) { | |
1329 | return false; | |
1330 | } | |
1331 | ||
1332 | if (!mr->ops->valid.accepts) { | |
1333 | return true; | |
1334 | } | |
1335 | ||
1336 | access_size_min = mr->ops->valid.min_access_size; | |
1337 | if (!mr->ops->valid.min_access_size) { | |
1338 | access_size_min = 1; | |
1339 | } | |
1340 | ||
1341 | access_size_max = mr->ops->valid.max_access_size; | |
1342 | if (!mr->ops->valid.max_access_size) { | |
1343 | access_size_max = 4; | |
1344 | } | |
1345 | ||
1346 | access_size = MAX(MIN(size, access_size_max), access_size_min); | |
1347 | for (i = 0; i < size; i += access_size) { | |
1348 | if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size, | |
1349 | is_write)) { | |
1350 | return false; | |
1351 | } | |
1352 | } | |
1353 | ||
1354 | return true; | |
1355 | } | |
1356 | ||
1357 | static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr, | |
1358 | hwaddr addr, | |
1359 | uint64_t *pval, | |
1360 | unsigned size, | |
1361 | MemTxAttrs attrs) | |
1362 | { | |
1363 | *pval = 0; | |
1364 | ||
1365 | if (mr->ops->read) { | |
1366 | return access_with_adjusted_size(addr, pval, size, | |
1367 | mr->ops->impl.min_access_size, | |
1368 | mr->ops->impl.max_access_size, | |
1369 | memory_region_read_accessor, | |
1370 | mr, attrs); | |
1371 | } else if (mr->ops->read_with_attrs) { | |
1372 | return access_with_adjusted_size(addr, pval, size, | |
1373 | mr->ops->impl.min_access_size, | |
1374 | mr->ops->impl.max_access_size, | |
1375 | memory_region_read_with_attrs_accessor, | |
1376 | mr, attrs); | |
1377 | } else { | |
1378 | return access_with_adjusted_size(addr, pval, size, 1, 4, | |
1379 | memory_region_oldmmio_read_accessor, | |
1380 | mr, attrs); | |
1381 | } | |
1382 | } | |
1383 | ||
1384 | MemTxResult memory_region_dispatch_read(MemoryRegion *mr, | |
1385 | hwaddr addr, | |
1386 | uint64_t *pval, | |
1387 | unsigned size, | |
1388 | MemTxAttrs attrs) | |
1389 | { | |
1390 | MemTxResult r; | |
1391 | ||
1392 | if (!memory_region_access_valid(mr, addr, size, false)) { | |
1393 | *pval = unassigned_mem_read(mr, addr, size); | |
1394 | return MEMTX_DECODE_ERROR; | |
1395 | } | |
1396 | ||
1397 | r = memory_region_dispatch_read1(mr, addr, pval, size, attrs); | |
1398 | adjust_endianness(mr, pval, size); | |
1399 | return r; | |
1400 | } | |
1401 | ||
1402 | /* Return true if an eventfd was signalled */ | |
1403 | static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr, | |
1404 | hwaddr addr, | |
1405 | uint64_t data, | |
1406 | unsigned size, | |
1407 | MemTxAttrs attrs) | |
1408 | { | |
1409 | MemoryRegionIoeventfd ioeventfd = { | |
1410 | .addr = addrrange_make(int128_make64(addr), int128_make64(size)), | |
1411 | .data = data, | |
1412 | }; | |
1413 | unsigned i; | |
1414 | ||
1415 | for (i = 0; i < mr->ioeventfd_nb; i++) { | |
1416 | ioeventfd.match_data = mr->ioeventfds[i].match_data; | |
1417 | ioeventfd.e = mr->ioeventfds[i].e; | |
1418 | ||
1419 | if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) { | |
1420 | event_notifier_set(ioeventfd.e); | |
1421 | return true; | |
1422 | } | |
1423 | } | |
1424 | ||
1425 | return false; | |
1426 | } | |
1427 | ||
1428 | MemTxResult memory_region_dispatch_write(MemoryRegion *mr, | |
1429 | hwaddr addr, | |
1430 | uint64_t data, | |
1431 | unsigned size, | |
1432 | MemTxAttrs attrs) | |
1433 | { | |
1434 | if (!memory_region_access_valid(mr, addr, size, true)) { | |
1435 | unassigned_mem_write(mr, addr, data, size); | |
1436 | return MEMTX_DECODE_ERROR; | |
1437 | } | |
1438 | ||
1439 | adjust_endianness(mr, &data, size); | |
1440 | ||
1441 | if ((!kvm_eventfds_enabled()) && | |
1442 | memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) { | |
1443 | return MEMTX_OK; | |
1444 | } | |
1445 | ||
1446 | if (mr->ops->write) { | |
1447 | return access_with_adjusted_size(addr, &data, size, | |
1448 | mr->ops->impl.min_access_size, | |
1449 | mr->ops->impl.max_access_size, | |
1450 | memory_region_write_accessor, mr, | |
1451 | attrs); | |
1452 | } else if (mr->ops->write_with_attrs) { | |
1453 | return | |
1454 | access_with_adjusted_size(addr, &data, size, | |
1455 | mr->ops->impl.min_access_size, | |
1456 | mr->ops->impl.max_access_size, | |
1457 | memory_region_write_with_attrs_accessor, | |
1458 | mr, attrs); | |
1459 | } else { | |
1460 | return access_with_adjusted_size(addr, &data, size, 1, 4, | |
1461 | memory_region_oldmmio_write_accessor, | |
1462 | mr, attrs); | |
1463 | } | |
1464 | } | |
1465 | ||
1466 | void memory_region_init_io(MemoryRegion *mr, | |
1467 | Object *owner, | |
1468 | const MemoryRegionOps *ops, | |
1469 | void *opaque, | |
1470 | const char *name, | |
1471 | uint64_t size) | |
1472 | { | |
1473 | memory_region_init(mr, owner, name, size); | |
1474 | mr->ops = ops ? ops : &unassigned_mem_ops; | |
1475 | mr->opaque = opaque; | |
1476 | mr->terminates = true; | |
1477 | } | |
1478 | ||
1479 | void memory_region_init_ram_nomigrate(MemoryRegion *mr, | |
1480 | Object *owner, | |
1481 | const char *name, | |
1482 | uint64_t size, | |
1483 | Error **errp) | |
1484 | { | |
1485 | memory_region_init(mr, owner, name, size); | |
1486 | mr->ram = true; | |
1487 | mr->terminates = true; | |
1488 | mr->destructor = memory_region_destructor_ram; | |
1489 | mr->ram_block = qemu_ram_alloc(size, mr, errp); | |
1490 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; | |
1491 | } | |
1492 | ||
1493 | void memory_region_init_resizeable_ram(MemoryRegion *mr, | |
1494 | Object *owner, | |
1495 | const char *name, | |
1496 | uint64_t size, | |
1497 | uint64_t max_size, | |
1498 | void (*resized)(const char*, | |
1499 | uint64_t length, | |
1500 | void *host), | |
1501 | Error **errp) | |
1502 | { | |
1503 | memory_region_init(mr, owner, name, size); | |
1504 | mr->ram = true; | |
1505 | mr->terminates = true; | |
1506 | mr->destructor = memory_region_destructor_ram; | |
1507 | mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized, | |
1508 | mr, errp); | |
1509 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; | |
1510 | } | |
1511 | ||
1512 | #ifdef __linux__ | |
1513 | void memory_region_init_ram_from_file(MemoryRegion *mr, | |
1514 | struct Object *owner, | |
1515 | const char *name, | |
1516 | uint64_t size, | |
1517 | bool share, | |
1518 | const char *path, | |
1519 | Error **errp) | |
1520 | { | |
1521 | memory_region_init(mr, owner, name, size); | |
1522 | mr->ram = true; | |
1523 | mr->terminates = true; | |
1524 | mr->destructor = memory_region_destructor_ram; | |
1525 | mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp); | |
1526 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; | |
1527 | } | |
1528 | ||
1529 | void memory_region_init_ram_from_fd(MemoryRegion *mr, | |
1530 | struct Object *owner, | |
1531 | const char *name, | |
1532 | uint64_t size, | |
1533 | bool share, | |
1534 | int fd, | |
1535 | Error **errp) | |
1536 | { | |
1537 | memory_region_init(mr, owner, name, size); | |
1538 | mr->ram = true; | |
1539 | mr->terminates = true; | |
1540 | mr->destructor = memory_region_destructor_ram; | |
1541 | mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp); | |
1542 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; | |
1543 | } | |
1544 | #endif | |
1545 | ||
1546 | void memory_region_init_ram_ptr(MemoryRegion *mr, | |
1547 | Object *owner, | |
1548 | const char *name, | |
1549 | uint64_t size, | |
1550 | void *ptr) | |
1551 | { | |
1552 | memory_region_init(mr, owner, name, size); | |
1553 | mr->ram = true; | |
1554 | mr->terminates = true; | |
1555 | mr->destructor = memory_region_destructor_ram; | |
1556 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; | |
1557 | ||
1558 | /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ | |
1559 | assert(ptr != NULL); | |
1560 | mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal); | |
1561 | } | |
1562 | ||
1563 | void memory_region_init_ram_device_ptr(MemoryRegion *mr, | |
1564 | Object *owner, | |
1565 | const char *name, | |
1566 | uint64_t size, | |
1567 | void *ptr) | |
1568 | { | |
1569 | memory_region_init_ram_ptr(mr, owner, name, size, ptr); | |
1570 | mr->ram_device = true; | |
1571 | mr->ops = &ram_device_mem_ops; | |
1572 | mr->opaque = mr; | |
1573 | } | |
1574 | ||
1575 | void memory_region_init_alias(MemoryRegion *mr, | |
1576 | Object *owner, | |
1577 | const char *name, | |
1578 | MemoryRegion *orig, | |
1579 | hwaddr offset, | |
1580 | uint64_t size) | |
1581 | { | |
1582 | memory_region_init(mr, owner, name, size); | |
1583 | mr->alias = orig; | |
1584 | mr->alias_offset = offset; | |
1585 | } | |
1586 | ||
1587 | void memory_region_init_rom_nomigrate(MemoryRegion *mr, | |
1588 | struct Object *owner, | |
1589 | const char *name, | |
1590 | uint64_t size, | |
1591 | Error **errp) | |
1592 | { | |
1593 | memory_region_init(mr, owner, name, size); | |
1594 | mr->ram = true; | |
1595 | mr->readonly = true; | |
1596 | mr->terminates = true; | |
1597 | mr->destructor = memory_region_destructor_ram; | |
1598 | mr->ram_block = qemu_ram_alloc(size, mr, errp); | |
1599 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; | |
1600 | } | |
1601 | ||
1602 | void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, | |
1603 | Object *owner, | |
1604 | const MemoryRegionOps *ops, | |
1605 | void *opaque, | |
1606 | const char *name, | |
1607 | uint64_t size, | |
1608 | Error **errp) | |
1609 | { | |
1610 | assert(ops); | |
1611 | memory_region_init(mr, owner, name, size); | |
1612 | mr->ops = ops; | |
1613 | mr->opaque = opaque; | |
1614 | mr->terminates = true; | |
1615 | mr->rom_device = true; | |
1616 | mr->destructor = memory_region_destructor_ram; | |
1617 | mr->ram_block = qemu_ram_alloc(size, mr, errp); | |
1618 | } | |
1619 | ||
1620 | void memory_region_init_iommu(void *_iommu_mr, | |
1621 | size_t instance_size, | |
1622 | const char *mrtypename, | |
1623 | Object *owner, | |
1624 | const char *name, | |
1625 | uint64_t size) | |
1626 | { | |
1627 | struct IOMMUMemoryRegion *iommu_mr; | |
1628 | struct MemoryRegion *mr; | |
1629 | ||
1630 | object_initialize(_iommu_mr, instance_size, mrtypename); | |
1631 | mr = MEMORY_REGION(_iommu_mr); | |
1632 | memory_region_do_init(mr, owner, name, size); | |
1633 | iommu_mr = IOMMU_MEMORY_REGION(mr); | |
1634 | mr->terminates = true; /* then re-forwards */ | |
1635 | QLIST_INIT(&iommu_mr->iommu_notify); | |
1636 | iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE; | |
1637 | } | |
1638 | ||
1639 | static void memory_region_finalize(Object *obj) | |
1640 | { | |
1641 | MemoryRegion *mr = MEMORY_REGION(obj); | |
1642 | ||
1643 | assert(!mr->container); | |
1644 | ||
1645 | /* We know the region is not visible in any address space (it | |
1646 | * does not have a container and cannot be a root either because | |
1647 | * it has no references, so we can blindly clear mr->enabled. | |
1648 | * memory_region_set_enabled instead could trigger a transaction | |
1649 | * and cause an infinite loop. | |
1650 | */ | |
1651 | mr->enabled = false; | |
1652 | memory_region_transaction_begin(); | |
1653 | while (!QTAILQ_EMPTY(&mr->subregions)) { | |
1654 | MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions); | |
1655 | memory_region_del_subregion(mr, subregion); | |
1656 | } | |
1657 | memory_region_transaction_commit(); | |
1658 | ||
1659 | mr->destructor(mr); | |
1660 | memory_region_clear_coalescing(mr); | |
1661 | g_free((char *)mr->name); | |
1662 | g_free(mr->ioeventfds); | |
1663 | } | |
1664 | ||
1665 | Object *memory_region_owner(MemoryRegion *mr) | |
1666 | { | |
1667 | Object *obj = OBJECT(mr); | |
1668 | return obj->parent; | |
1669 | } | |
1670 | ||
1671 | void memory_region_ref(MemoryRegion *mr) | |
1672 | { | |
1673 | /* MMIO callbacks most likely will access data that belongs | |
1674 | * to the owner, hence the need to ref/unref the owner whenever | |
1675 | * the memory region is in use. | |
1676 | * | |
1677 | * The memory region is a child of its owner. As long as the | |
1678 | * owner doesn't call unparent itself on the memory region, | |
1679 | * ref-ing the owner will also keep the memory region alive. | |
1680 | * Memory regions without an owner are supposed to never go away; | |
1681 | * we do not ref/unref them because it slows down DMA sensibly. | |
1682 | */ | |
1683 | if (mr && mr->owner) { | |
1684 | object_ref(mr->owner); | |
1685 | } | |
1686 | } | |
1687 | ||
1688 | void memory_region_unref(MemoryRegion *mr) | |
1689 | { | |
1690 | if (mr && mr->owner) { | |
1691 | object_unref(mr->owner); | |
1692 | } | |
1693 | } | |
1694 | ||
1695 | uint64_t memory_region_size(MemoryRegion *mr) | |
1696 | { | |
1697 | if (int128_eq(mr->size, int128_2_64())) { | |
1698 | return UINT64_MAX; | |
1699 | } | |
1700 | return int128_get64(mr->size); | |
1701 | } | |
1702 | ||
1703 | const char *memory_region_name(const MemoryRegion *mr) | |
1704 | { | |
1705 | if (!mr->name) { | |
1706 | ((MemoryRegion *)mr)->name = | |
1707 | object_get_canonical_path_component(OBJECT(mr)); | |
1708 | } | |
1709 | return mr->name; | |
1710 | } | |
1711 | ||
1712 | bool memory_region_is_ram_device(MemoryRegion *mr) | |
1713 | { | |
1714 | return mr->ram_device; | |
1715 | } | |
1716 | ||
1717 | uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr) | |
1718 | { | |
1719 | uint8_t mask = mr->dirty_log_mask; | |
1720 | if (global_dirty_log && mr->ram_block) { | |
1721 | mask |= (1 << DIRTY_MEMORY_MIGRATION); | |
1722 | } | |
1723 | return mask; | |
1724 | } | |
1725 | ||
1726 | bool memory_region_is_logging(MemoryRegion *mr, uint8_t client) | |
1727 | { | |
1728 | return memory_region_get_dirty_log_mask(mr) & (1 << client); | |
1729 | } | |
1730 | ||
1731 | static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr) | |
1732 | { | |
1733 | IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE; | |
1734 | IOMMUNotifier *iommu_notifier; | |
1735 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); | |
1736 | ||
1737 | IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) { | |
1738 | flags |= iommu_notifier->notifier_flags; | |
1739 | } | |
1740 | ||
1741 | if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) { | |
1742 | imrc->notify_flag_changed(iommu_mr, | |
1743 | iommu_mr->iommu_notify_flags, | |
1744 | flags); | |
1745 | } | |
1746 | ||
1747 | iommu_mr->iommu_notify_flags = flags; | |
1748 | } | |
1749 | ||
1750 | void memory_region_register_iommu_notifier(MemoryRegion *mr, | |
1751 | IOMMUNotifier *n) | |
1752 | { | |
1753 | IOMMUMemoryRegion *iommu_mr; | |
1754 | ||
1755 | if (mr->alias) { | |
1756 | memory_region_register_iommu_notifier(mr->alias, n); | |
1757 | return; | |
1758 | } | |
1759 | ||
1760 | /* We need to register for at least one bitfield */ | |
1761 | iommu_mr = IOMMU_MEMORY_REGION(mr); | |
1762 | assert(n->notifier_flags != IOMMU_NOTIFIER_NONE); | |
1763 | assert(n->start <= n->end); | |
1764 | QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node); | |
1765 | memory_region_update_iommu_notify_flags(iommu_mr); | |
1766 | } | |
1767 | ||
1768 | uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr) | |
1769 | { | |
1770 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); | |
1771 | ||
1772 | if (imrc->get_min_page_size) { | |
1773 | return imrc->get_min_page_size(iommu_mr); | |
1774 | } | |
1775 | return TARGET_PAGE_SIZE; | |
1776 | } | |
1777 | ||
1778 | void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) | |
1779 | { | |
1780 | MemoryRegion *mr = MEMORY_REGION(iommu_mr); | |
1781 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); | |
1782 | hwaddr addr, granularity; | |
1783 | IOMMUTLBEntry iotlb; | |
1784 | ||
1785 | /* If the IOMMU has its own replay callback, override */ | |
1786 | if (imrc->replay) { | |
1787 | imrc->replay(iommu_mr, n); | |
1788 | return; | |
1789 | } | |
1790 | ||
1791 | granularity = memory_region_iommu_get_min_page_size(iommu_mr); | |
1792 | ||
1793 | for (addr = 0; addr < memory_region_size(mr); addr += granularity) { | |
1794 | iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE); | |
1795 | if (iotlb.perm != IOMMU_NONE) { | |
1796 | n->notify(n, &iotlb); | |
1797 | } | |
1798 | ||
1799 | /* if (2^64 - MR size) < granularity, it's possible to get an | |
1800 | * infinite loop here. This should catch such a wraparound */ | |
1801 | if ((addr + granularity) < addr) { | |
1802 | break; | |
1803 | } | |
1804 | } | |
1805 | } | |
1806 | ||
1807 | void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr) | |
1808 | { | |
1809 | IOMMUNotifier *notifier; | |
1810 | ||
1811 | IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) { | |
1812 | memory_region_iommu_replay(iommu_mr, notifier); | |
1813 | } | |
1814 | } | |
1815 | ||
1816 | void memory_region_unregister_iommu_notifier(MemoryRegion *mr, | |
1817 | IOMMUNotifier *n) | |
1818 | { | |
1819 | IOMMUMemoryRegion *iommu_mr; | |
1820 | ||
1821 | if (mr->alias) { | |
1822 | memory_region_unregister_iommu_notifier(mr->alias, n); | |
1823 | return; | |
1824 | } | |
1825 | QLIST_REMOVE(n, node); | |
1826 | iommu_mr = IOMMU_MEMORY_REGION(mr); | |
1827 | memory_region_update_iommu_notify_flags(iommu_mr); | |
1828 | } | |
1829 | ||
1830 | void memory_region_notify_one(IOMMUNotifier *notifier, | |
1831 | IOMMUTLBEntry *entry) | |
1832 | { | |
1833 | IOMMUNotifierFlag request_flags; | |
1834 | ||
1835 | /* | |
1836 | * Skip the notification if the notification does not overlap | |
1837 | * with registered range. | |
1838 | */ | |
1839 | if (notifier->start > entry->iova + entry->addr_mask + 1 || | |
1840 | notifier->end < entry->iova) { | |
1841 | return; | |
1842 | } | |
1843 | ||
1844 | if (entry->perm & IOMMU_RW) { | |
1845 | request_flags = IOMMU_NOTIFIER_MAP; | |
1846 | } else { | |
1847 | request_flags = IOMMU_NOTIFIER_UNMAP; | |
1848 | } | |
1849 | ||
1850 | if (notifier->notifier_flags & request_flags) { | |
1851 | notifier->notify(notifier, entry); | |
1852 | } | |
1853 | } | |
1854 | ||
1855 | void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, | |
1856 | IOMMUTLBEntry entry) | |
1857 | { | |
1858 | IOMMUNotifier *iommu_notifier; | |
1859 | ||
1860 | assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr))); | |
1861 | ||
1862 | IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) { | |
1863 | memory_region_notify_one(iommu_notifier, &entry); | |
1864 | } | |
1865 | } | |
1866 | ||
1867 | void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) | |
1868 | { | |
1869 | uint8_t mask = 1 << client; | |
1870 | uint8_t old_logging; | |
1871 | ||
1872 | assert(client == DIRTY_MEMORY_VGA); | |
1873 | old_logging = mr->vga_logging_count; | |
1874 | mr->vga_logging_count += log ? 1 : -1; | |
1875 | if (!!old_logging == !!mr->vga_logging_count) { | |
1876 | return; | |
1877 | } | |
1878 | ||
1879 | memory_region_transaction_begin(); | |
1880 | mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); | |
1881 | memory_region_update_pending |= mr->enabled; | |
1882 | memory_region_transaction_commit(); | |
1883 | } | |
1884 | ||
1885 | bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr, | |
1886 | hwaddr size, unsigned client) | |
1887 | { | |
1888 | assert(mr->ram_block); | |
1889 | return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr, | |
1890 | size, client); | |
1891 | } | |
1892 | ||
1893 | void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, | |
1894 | hwaddr size) | |
1895 | { | |
1896 | assert(mr->ram_block); | |
1897 | cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr, | |
1898 | size, | |
1899 | memory_region_get_dirty_log_mask(mr)); | |
1900 | } | |
1901 | ||
1902 | bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr, | |
1903 | hwaddr size, unsigned client) | |
1904 | { | |
1905 | assert(mr->ram_block); | |
1906 | return cpu_physical_memory_test_and_clear_dirty( | |
1907 | memory_region_get_ram_addr(mr) + addr, size, client); | |
1908 | } | |
1909 | ||
1910 | DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, | |
1911 | hwaddr addr, | |
1912 | hwaddr size, | |
1913 | unsigned client) | |
1914 | { | |
1915 | assert(mr->ram_block); | |
1916 | return cpu_physical_memory_snapshot_and_clear_dirty( | |
1917 | memory_region_get_ram_addr(mr) + addr, size, client); | |
1918 | } | |
1919 | ||
1920 | bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap, | |
1921 | hwaddr addr, hwaddr size) | |
1922 | { | |
1923 | assert(mr->ram_block); | |
1924 | return cpu_physical_memory_snapshot_get_dirty(snap, | |
1925 | memory_region_get_ram_addr(mr) + addr, size); | |
1926 | } | |
1927 | ||
1928 | void memory_region_sync_dirty_bitmap(MemoryRegion *mr) | |
1929 | { | |
1930 | MemoryListener *listener; | |
1931 | AddressSpace *as; | |
1932 | FlatView *view; | |
1933 | FlatRange *fr; | |
1934 | ||
1935 | /* If the same address space has multiple log_sync listeners, we | |
1936 | * visit that address space's FlatView multiple times. But because | |
1937 | * log_sync listeners are rare, it's still cheaper than walking each | |
1938 | * address space once. | |
1939 | */ | |
1940 | QTAILQ_FOREACH(listener, &memory_listeners, link) { | |
1941 | if (!listener->log_sync) { | |
1942 | continue; | |
1943 | } | |
1944 | as = listener->address_space; | |
1945 | view = address_space_get_flatview(as); | |
1946 | FOR_EACH_FLAT_RANGE(fr, view) { | |
1947 | if (fr->mr == mr) { | |
1948 | MemoryRegionSection mrs = section_from_flat_range(fr, view); | |
1949 | listener->log_sync(listener, &mrs); | |
1950 | } | |
1951 | } | |
1952 | flatview_unref(view); | |
1953 | } | |
1954 | } | |
1955 | ||
1956 | void memory_region_set_readonly(MemoryRegion *mr, bool readonly) | |
1957 | { | |
1958 | if (mr->readonly != readonly) { | |
1959 | memory_region_transaction_begin(); | |
1960 | mr->readonly = readonly; | |
1961 | memory_region_update_pending |= mr->enabled; | |
1962 | memory_region_transaction_commit(); | |
1963 | } | |
1964 | } | |
1965 | ||
1966 | void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode) | |
1967 | { | |
1968 | if (mr->romd_mode != romd_mode) { | |
1969 | memory_region_transaction_begin(); | |
1970 | mr->romd_mode = romd_mode; | |
1971 | memory_region_update_pending |= mr->enabled; | |
1972 | memory_region_transaction_commit(); | |
1973 | } | |
1974 | } | |
1975 | ||
1976 | void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, | |
1977 | hwaddr size, unsigned client) | |
1978 | { | |
1979 | assert(mr->ram_block); | |
1980 | cpu_physical_memory_test_and_clear_dirty( | |
1981 | memory_region_get_ram_addr(mr) + addr, size, client); | |
1982 | } | |
1983 | ||
1984 | int memory_region_get_fd(MemoryRegion *mr) | |
1985 | { | |
1986 | int fd; | |
1987 | ||
1988 | rcu_read_lock(); | |
1989 | while (mr->alias) { | |
1990 | mr = mr->alias; | |
1991 | } | |
1992 | fd = mr->ram_block->fd; | |
1993 | rcu_read_unlock(); | |
1994 | ||
1995 | return fd; | |
1996 | } | |
1997 | ||
1998 | void *memory_region_get_ram_ptr(MemoryRegion *mr) | |
1999 | { | |
2000 | void *ptr; | |
2001 | uint64_t offset = 0; | |
2002 | ||
2003 | rcu_read_lock(); | |
2004 | while (mr->alias) { | |
2005 | offset += mr->alias_offset; | |
2006 | mr = mr->alias; | |
2007 | } | |
2008 | assert(mr->ram_block); | |
2009 | ptr = qemu_map_ram_ptr(mr->ram_block, offset); | |
2010 | rcu_read_unlock(); | |
2011 | ||
2012 | return ptr; | |
2013 | } | |
2014 | ||
2015 | MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset) | |
2016 | { | |
2017 | RAMBlock *block; | |
2018 | ||
2019 | block = qemu_ram_block_from_host(ptr, false, offset); | |
2020 | if (!block) { | |
2021 | return NULL; | |
2022 | } | |
2023 | ||
2024 | return block->mr; | |
2025 | } | |
2026 | ||
2027 | ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) | |
2028 | { | |
2029 | return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID; | |
2030 | } | |
2031 | ||
2032 | void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp) | |
2033 | { | |
2034 | assert(mr->ram_block); | |
2035 | ||
2036 | qemu_ram_resize(mr->ram_block, newsize, errp); | |
2037 | } | |
2038 | ||
2039 | static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as) | |
2040 | { | |
2041 | FlatView *view; | |
2042 | FlatRange *fr; | |
2043 | CoalescedMemoryRange *cmr; | |
2044 | AddrRange tmp; | |
2045 | MemoryRegionSection section; | |
2046 | ||
2047 | view = address_space_get_flatview(as); | |
2048 | FOR_EACH_FLAT_RANGE(fr, view) { | |
2049 | if (fr->mr == mr) { | |
2050 | section = (MemoryRegionSection) { | |
2051 | .fv = view, | |
2052 | .offset_within_address_space = int128_get64(fr->addr.start), | |
2053 | .size = fr->addr.size, | |
2054 | }; | |
2055 | ||
2056 | MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, §ion, | |
2057 | int128_get64(fr->addr.start), | |
2058 | int128_get64(fr->addr.size)); | |
2059 | QTAILQ_FOREACH(cmr, &mr->coalesced, link) { | |
2060 | tmp = addrrange_shift(cmr->addr, | |
2061 | int128_sub(fr->addr.start, | |
2062 | int128_make64(fr->offset_in_region))); | |
2063 | if (!addrrange_intersects(tmp, fr->addr)) { | |
2064 | continue; | |
2065 | } | |
2066 | tmp = addrrange_intersection(tmp, fr->addr); | |
2067 | MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, §ion, | |
2068 | int128_get64(tmp.start), | |
2069 | int128_get64(tmp.size)); | |
2070 | } | |
2071 | } | |
2072 | } | |
2073 | flatview_unref(view); | |
2074 | } | |
2075 | ||
2076 | static void memory_region_update_coalesced_range(MemoryRegion *mr) | |
2077 | { | |
2078 | AddressSpace *as; | |
2079 | ||
2080 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
2081 | memory_region_update_coalesced_range_as(mr, as); | |
2082 | } | |
2083 | } | |
2084 | ||
2085 | void memory_region_set_coalescing(MemoryRegion *mr) | |
2086 | { | |
2087 | memory_region_clear_coalescing(mr); | |
2088 | memory_region_add_coalescing(mr, 0, int128_get64(mr->size)); | |
2089 | } | |
2090 | ||
2091 | void memory_region_add_coalescing(MemoryRegion *mr, | |
2092 | hwaddr offset, | |
2093 | uint64_t size) | |
2094 | { | |
2095 | CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr)); | |
2096 | ||
2097 | cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size)); | |
2098 | QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); | |
2099 | memory_region_update_coalesced_range(mr); | |
2100 | memory_region_set_flush_coalesced(mr); | |
2101 | } | |
2102 | ||
2103 | void memory_region_clear_coalescing(MemoryRegion *mr) | |
2104 | { | |
2105 | CoalescedMemoryRange *cmr; | |
2106 | bool updated = false; | |
2107 | ||
2108 | qemu_flush_coalesced_mmio_buffer(); | |
2109 | mr->flush_coalesced_mmio = false; | |
2110 | ||
2111 | while (!QTAILQ_EMPTY(&mr->coalesced)) { | |
2112 | cmr = QTAILQ_FIRST(&mr->coalesced); | |
2113 | QTAILQ_REMOVE(&mr->coalesced, cmr, link); | |
2114 | g_free(cmr); | |
2115 | updated = true; | |
2116 | } | |
2117 | ||
2118 | if (updated) { | |
2119 | memory_region_update_coalesced_range(mr); | |
2120 | } | |
2121 | } | |
2122 | ||
2123 | void memory_region_set_flush_coalesced(MemoryRegion *mr) | |
2124 | { | |
2125 | mr->flush_coalesced_mmio = true; | |
2126 | } | |
2127 | ||
2128 | void memory_region_clear_flush_coalesced(MemoryRegion *mr) | |
2129 | { | |
2130 | qemu_flush_coalesced_mmio_buffer(); | |
2131 | if (QTAILQ_EMPTY(&mr->coalesced)) { | |
2132 | mr->flush_coalesced_mmio = false; | |
2133 | } | |
2134 | } | |
2135 | ||
2136 | void memory_region_set_global_locking(MemoryRegion *mr) | |
2137 | { | |
2138 | mr->global_locking = true; | |
2139 | } | |
2140 | ||
2141 | void memory_region_clear_global_locking(MemoryRegion *mr) | |
2142 | { | |
2143 | mr->global_locking = false; | |
2144 | } | |
2145 | ||
2146 | static bool userspace_eventfd_warning; | |
2147 | ||
2148 | void memory_region_add_eventfd(MemoryRegion *mr, | |
2149 | hwaddr addr, | |
2150 | unsigned size, | |
2151 | bool match_data, | |
2152 | uint64_t data, | |
2153 | EventNotifier *e) | |
2154 | { | |
2155 | MemoryRegionIoeventfd mrfd = { | |
2156 | .addr.start = int128_make64(addr), | |
2157 | .addr.size = int128_make64(size), | |
2158 | .match_data = match_data, | |
2159 | .data = data, | |
2160 | .e = e, | |
2161 | }; | |
2162 | unsigned i; | |
2163 | ||
2164 | if (kvm_enabled() && (!(kvm_eventfds_enabled() || | |
2165 | userspace_eventfd_warning))) { | |
2166 | userspace_eventfd_warning = true; | |
2167 | error_report("Using eventfd without MMIO binding in KVM. " | |
2168 | "Suboptimal performance expected"); | |
2169 | } | |
2170 | ||
2171 | if (size) { | |
2172 | adjust_endianness(mr, &mrfd.data, size); | |
2173 | } | |
2174 | memory_region_transaction_begin(); | |
2175 | for (i = 0; i < mr->ioeventfd_nb; ++i) { | |
2176 | if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) { | |
2177 | break; | |
2178 | } | |
2179 | } | |
2180 | ++mr->ioeventfd_nb; | |
2181 | mr->ioeventfds = g_realloc(mr->ioeventfds, | |
2182 | sizeof(*mr->ioeventfds) * mr->ioeventfd_nb); | |
2183 | memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i], | |
2184 | sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); | |
2185 | mr->ioeventfds[i] = mrfd; | |
2186 | ioeventfd_update_pending |= mr->enabled; | |
2187 | memory_region_transaction_commit(); | |
2188 | } | |
2189 | ||
2190 | void memory_region_del_eventfd(MemoryRegion *mr, | |
2191 | hwaddr addr, | |
2192 | unsigned size, | |
2193 | bool match_data, | |
2194 | uint64_t data, | |
2195 | EventNotifier *e) | |
2196 | { | |
2197 | MemoryRegionIoeventfd mrfd = { | |
2198 | .addr.start = int128_make64(addr), | |
2199 | .addr.size = int128_make64(size), | |
2200 | .match_data = match_data, | |
2201 | .data = data, | |
2202 | .e = e, | |
2203 | }; | |
2204 | unsigned i; | |
2205 | ||
2206 | if (size) { | |
2207 | adjust_endianness(mr, &mrfd.data, size); | |
2208 | } | |
2209 | memory_region_transaction_begin(); | |
2210 | for (i = 0; i < mr->ioeventfd_nb; ++i) { | |
2211 | if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) { | |
2212 | break; | |
2213 | } | |
2214 | } | |
2215 | assert(i != mr->ioeventfd_nb); | |
2216 | memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1], | |
2217 | sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1))); | |
2218 | --mr->ioeventfd_nb; | |
2219 | mr->ioeventfds = g_realloc(mr->ioeventfds, | |
2220 | sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1); | |
2221 | ioeventfd_update_pending |= mr->enabled; | |
2222 | memory_region_transaction_commit(); | |
2223 | } | |
2224 | ||
2225 | static void memory_region_update_container_subregions(MemoryRegion *subregion) | |
2226 | { | |
2227 | MemoryRegion *mr = subregion->container; | |
2228 | MemoryRegion *other; | |
2229 | ||
2230 | memory_region_transaction_begin(); | |
2231 | ||
2232 | memory_region_ref(subregion); | |
2233 | QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { | |
2234 | if (subregion->priority >= other->priority) { | |
2235 | QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); | |
2236 | goto done; | |
2237 | } | |
2238 | } | |
2239 | QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); | |
2240 | done: | |
2241 | memory_region_update_pending |= mr->enabled && subregion->enabled; | |
2242 | memory_region_transaction_commit(); | |
2243 | } | |
2244 | ||
2245 | static void memory_region_add_subregion_common(MemoryRegion *mr, | |
2246 | hwaddr offset, | |
2247 | MemoryRegion *subregion) | |
2248 | { | |
2249 | assert(!subregion->container); | |
2250 | subregion->container = mr; | |
2251 | subregion->addr = offset; | |
2252 | memory_region_update_container_subregions(subregion); | |
2253 | } | |
2254 | ||
2255 | void memory_region_add_subregion(MemoryRegion *mr, | |
2256 | hwaddr offset, | |
2257 | MemoryRegion *subregion) | |
2258 | { | |
2259 | subregion->priority = 0; | |
2260 | memory_region_add_subregion_common(mr, offset, subregion); | |
2261 | } | |
2262 | ||
2263 | void memory_region_add_subregion_overlap(MemoryRegion *mr, | |
2264 | hwaddr offset, | |
2265 | MemoryRegion *subregion, | |
2266 | int priority) | |
2267 | { | |
2268 | subregion->priority = priority; | |
2269 | memory_region_add_subregion_common(mr, offset, subregion); | |
2270 | } | |
2271 | ||
2272 | void memory_region_del_subregion(MemoryRegion *mr, | |
2273 | MemoryRegion *subregion) | |
2274 | { | |
2275 | memory_region_transaction_begin(); | |
2276 | assert(subregion->container == mr); | |
2277 | subregion->container = NULL; | |
2278 | QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); | |
2279 | memory_region_unref(subregion); | |
2280 | memory_region_update_pending |= mr->enabled && subregion->enabled; | |
2281 | memory_region_transaction_commit(); | |
2282 | } | |
2283 | ||
2284 | void memory_region_set_enabled(MemoryRegion *mr, bool enabled) | |
2285 | { | |
2286 | if (enabled == mr->enabled) { | |
2287 | return; | |
2288 | } | |
2289 | memory_region_transaction_begin(); | |
2290 | mr->enabled = enabled; | |
2291 | memory_region_update_pending = true; | |
2292 | memory_region_transaction_commit(); | |
2293 | } | |
2294 | ||
2295 | void memory_region_set_size(MemoryRegion *mr, uint64_t size) | |
2296 | { | |
2297 | Int128 s = int128_make64(size); | |
2298 | ||
2299 | if (size == UINT64_MAX) { | |
2300 | s = int128_2_64(); | |
2301 | } | |
2302 | if (int128_eq(s, mr->size)) { | |
2303 | return; | |
2304 | } | |
2305 | memory_region_transaction_begin(); | |
2306 | mr->size = s; | |
2307 | memory_region_update_pending = true; | |
2308 | memory_region_transaction_commit(); | |
2309 | } | |
2310 | ||
2311 | static void memory_region_readd_subregion(MemoryRegion *mr) | |
2312 | { | |
2313 | MemoryRegion *container = mr->container; | |
2314 | ||
2315 | if (container) { | |
2316 | memory_region_transaction_begin(); | |
2317 | memory_region_ref(mr); | |
2318 | memory_region_del_subregion(container, mr); | |
2319 | mr->container = container; | |
2320 | memory_region_update_container_subregions(mr); | |
2321 | memory_region_unref(mr); | |
2322 | memory_region_transaction_commit(); | |
2323 | } | |
2324 | } | |
2325 | ||
2326 | void memory_region_set_address(MemoryRegion *mr, hwaddr addr) | |
2327 | { | |
2328 | if (addr != mr->addr) { | |
2329 | mr->addr = addr; | |
2330 | memory_region_readd_subregion(mr); | |
2331 | } | |
2332 | } | |
2333 | ||
2334 | void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset) | |
2335 | { | |
2336 | assert(mr->alias); | |
2337 | ||
2338 | if (offset == mr->alias_offset) { | |
2339 | return; | |
2340 | } | |
2341 | ||
2342 | memory_region_transaction_begin(); | |
2343 | mr->alias_offset = offset; | |
2344 | memory_region_update_pending |= mr->enabled; | |
2345 | memory_region_transaction_commit(); | |
2346 | } | |
2347 | ||
2348 | uint64_t memory_region_get_alignment(const MemoryRegion *mr) | |
2349 | { | |
2350 | return mr->align; | |
2351 | } | |
2352 | ||
2353 | static int cmp_flatrange_addr(const void *addr_, const void *fr_) | |
2354 | { | |
2355 | const AddrRange *addr = addr_; | |
2356 | const FlatRange *fr = fr_; | |
2357 | ||
2358 | if (int128_le(addrrange_end(*addr), fr->addr.start)) { | |
2359 | return -1; | |
2360 | } else if (int128_ge(addr->start, addrrange_end(fr->addr))) { | |
2361 | return 1; | |
2362 | } | |
2363 | return 0; | |
2364 | } | |
2365 | ||
2366 | static FlatRange *flatview_lookup(FlatView *view, AddrRange addr) | |
2367 | { | |
2368 | return bsearch(&addr, view->ranges, view->nr, | |
2369 | sizeof(FlatRange), cmp_flatrange_addr); | |
2370 | } | |
2371 | ||
2372 | bool memory_region_is_mapped(MemoryRegion *mr) | |
2373 | { | |
2374 | return mr->container ? true : false; | |
2375 | } | |
2376 | ||
2377 | /* Same as memory_region_find, but it does not add a reference to the | |
2378 | * returned region. It must be called from an RCU critical section. | |
2379 | */ | |
2380 | static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr, | |
2381 | hwaddr addr, uint64_t size) | |
2382 | { | |
2383 | MemoryRegionSection ret = { .mr = NULL }; | |
2384 | MemoryRegion *root; | |
2385 | AddressSpace *as; | |
2386 | AddrRange range; | |
2387 | FlatView *view; | |
2388 | FlatRange *fr; | |
2389 | ||
2390 | addr += mr->addr; | |
2391 | for (root = mr; root->container; ) { | |
2392 | root = root->container; | |
2393 | addr += root->addr; | |
2394 | } | |
2395 | ||
2396 | as = memory_region_to_address_space(root); | |
2397 | if (!as) { | |
2398 | return ret; | |
2399 | } | |
2400 | range = addrrange_make(int128_make64(addr), int128_make64(size)); | |
2401 | ||
2402 | view = address_space_to_flatview(as); | |
2403 | fr = flatview_lookup(view, range); | |
2404 | if (!fr) { | |
2405 | return ret; | |
2406 | } | |
2407 | ||
2408 | while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) { | |
2409 | --fr; | |
2410 | } | |
2411 | ||
2412 | ret.mr = fr->mr; | |
2413 | ret.fv = view; | |
2414 | range = addrrange_intersection(range, fr->addr); | |
2415 | ret.offset_within_region = fr->offset_in_region; | |
2416 | ret.offset_within_region += int128_get64(int128_sub(range.start, | |
2417 | fr->addr.start)); | |
2418 | ret.size = range.size; | |
2419 | ret.offset_within_address_space = int128_get64(range.start); | |
2420 | ret.readonly = fr->readonly; | |
2421 | return ret; | |
2422 | } | |
2423 | ||
2424 | MemoryRegionSection memory_region_find(MemoryRegion *mr, | |
2425 | hwaddr addr, uint64_t size) | |
2426 | { | |
2427 | MemoryRegionSection ret; | |
2428 | rcu_read_lock(); | |
2429 | ret = memory_region_find_rcu(mr, addr, size); | |
2430 | if (ret.mr) { | |
2431 | memory_region_ref(ret.mr); | |
2432 | } | |
2433 | rcu_read_unlock(); | |
2434 | return ret; | |
2435 | } | |
2436 | ||
2437 | bool memory_region_present(MemoryRegion *container, hwaddr addr) | |
2438 | { | |
2439 | MemoryRegion *mr; | |
2440 | ||
2441 | rcu_read_lock(); | |
2442 | mr = memory_region_find_rcu(container, addr, 1).mr; | |
2443 | rcu_read_unlock(); | |
2444 | return mr && mr != container; | |
2445 | } | |
2446 | ||
2447 | void memory_global_dirty_log_sync(void) | |
2448 | { | |
2449 | MemoryListener *listener; | |
2450 | AddressSpace *as; | |
2451 | FlatView *view; | |
2452 | FlatRange *fr; | |
2453 | ||
2454 | QTAILQ_FOREACH(listener, &memory_listeners, link) { | |
2455 | if (!listener->log_sync) { | |
2456 | continue; | |
2457 | } | |
2458 | as = listener->address_space; | |
2459 | view = address_space_get_flatview(as); | |
2460 | FOR_EACH_FLAT_RANGE(fr, view) { | |
2461 | if (fr->dirty_log_mask) { | |
2462 | MemoryRegionSection mrs = section_from_flat_range(fr, view); | |
2463 | ||
2464 | listener->log_sync(listener, &mrs); | |
2465 | } | |
2466 | } | |
2467 | flatview_unref(view); | |
2468 | } | |
2469 | } | |
2470 | ||
2471 | static VMChangeStateEntry *vmstate_change; | |
2472 | ||
2473 | void memory_global_dirty_log_start(void) | |
2474 | { | |
2475 | if (vmstate_change) { | |
2476 | qemu_del_vm_change_state_handler(vmstate_change); | |
2477 | vmstate_change = NULL; | |
2478 | } | |
2479 | ||
2480 | global_dirty_log = true; | |
2481 | ||
2482 | MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward); | |
2483 | ||
2484 | /* Refresh DIRTY_LOG_MIGRATION bit. */ | |
2485 | memory_region_transaction_begin(); | |
2486 | memory_region_update_pending = true; | |
2487 | memory_region_transaction_commit(); | |
2488 | } | |
2489 | ||
2490 | static void memory_global_dirty_log_do_stop(void) | |
2491 | { | |
2492 | global_dirty_log = false; | |
2493 | ||
2494 | /* Refresh DIRTY_LOG_MIGRATION bit. */ | |
2495 | memory_region_transaction_begin(); | |
2496 | memory_region_update_pending = true; | |
2497 | memory_region_transaction_commit(); | |
2498 | ||
2499 | MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse); | |
2500 | } | |
2501 | ||
2502 | static void memory_vm_change_state_handler(void *opaque, int running, | |
2503 | RunState state) | |
2504 | { | |
2505 | if (running) { | |
2506 | memory_global_dirty_log_do_stop(); | |
2507 | ||
2508 | if (vmstate_change) { | |
2509 | qemu_del_vm_change_state_handler(vmstate_change); | |
2510 | vmstate_change = NULL; | |
2511 | } | |
2512 | } | |
2513 | } | |
2514 | ||
2515 | void memory_global_dirty_log_stop(void) | |
2516 | { | |
2517 | if (!runstate_is_running()) { | |
2518 | if (vmstate_change) { | |
2519 | return; | |
2520 | } | |
2521 | vmstate_change = qemu_add_vm_change_state_handler( | |
2522 | memory_vm_change_state_handler, NULL); | |
2523 | return; | |
2524 | } | |
2525 | ||
2526 | memory_global_dirty_log_do_stop(); | |
2527 | } | |
2528 | ||
2529 | static void listener_add_address_space(MemoryListener *listener, | |
2530 | AddressSpace *as) | |
2531 | { | |
2532 | FlatView *view; | |
2533 | FlatRange *fr; | |
2534 | ||
2535 | if (listener->begin) { | |
2536 | listener->begin(listener); | |
2537 | } | |
2538 | if (global_dirty_log) { | |
2539 | if (listener->log_global_start) { | |
2540 | listener->log_global_start(listener); | |
2541 | } | |
2542 | } | |
2543 | ||
2544 | view = address_space_get_flatview(as); | |
2545 | FOR_EACH_FLAT_RANGE(fr, view) { | |
2546 | MemoryRegionSection section = { | |
2547 | .mr = fr->mr, | |
2548 | .fv = view, | |
2549 | .offset_within_region = fr->offset_in_region, | |
2550 | .size = fr->addr.size, | |
2551 | .offset_within_address_space = int128_get64(fr->addr.start), | |
2552 | .readonly = fr->readonly, | |
2553 | }; | |
2554 | if (fr->dirty_log_mask && listener->log_start) { | |
2555 | listener->log_start(listener, §ion, 0, fr->dirty_log_mask); | |
2556 | } | |
2557 | if (listener->region_add) { | |
2558 | listener->region_add(listener, §ion); | |
2559 | } | |
2560 | } | |
2561 | if (listener->commit) { | |
2562 | listener->commit(listener); | |
2563 | } | |
2564 | flatview_unref(view); | |
2565 | } | |
2566 | ||
2567 | void memory_listener_register(MemoryListener *listener, AddressSpace *as) | |
2568 | { | |
2569 | MemoryListener *other = NULL; | |
2570 | ||
2571 | listener->address_space = as; | |
2572 | if (QTAILQ_EMPTY(&memory_listeners) | |
2573 | || listener->priority >= QTAILQ_LAST(&memory_listeners, | |
2574 | memory_listeners)->priority) { | |
2575 | QTAILQ_INSERT_TAIL(&memory_listeners, listener, link); | |
2576 | } else { | |
2577 | QTAILQ_FOREACH(other, &memory_listeners, link) { | |
2578 | if (listener->priority < other->priority) { | |
2579 | break; | |
2580 | } | |
2581 | } | |
2582 | QTAILQ_INSERT_BEFORE(other, listener, link); | |
2583 | } | |
2584 | ||
2585 | if (QTAILQ_EMPTY(&as->listeners) | |
2586 | || listener->priority >= QTAILQ_LAST(&as->listeners, | |
2587 | memory_listeners)->priority) { | |
2588 | QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as); | |
2589 | } else { | |
2590 | QTAILQ_FOREACH(other, &as->listeners, link_as) { | |
2591 | if (listener->priority < other->priority) { | |
2592 | break; | |
2593 | } | |
2594 | } | |
2595 | QTAILQ_INSERT_BEFORE(other, listener, link_as); | |
2596 | } | |
2597 | ||
2598 | listener_add_address_space(listener, as); | |
2599 | } | |
2600 | ||
2601 | void memory_listener_unregister(MemoryListener *listener) | |
2602 | { | |
2603 | if (!listener->address_space) { | |
2604 | return; | |
2605 | } | |
2606 | ||
2607 | QTAILQ_REMOVE(&memory_listeners, listener, link); | |
2608 | QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as); | |
2609 | listener->address_space = NULL; | |
2610 | } | |
2611 | ||
2612 | bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr) | |
2613 | { | |
2614 | void *host; | |
2615 | unsigned size = 0; | |
2616 | unsigned offset = 0; | |
2617 | Object *new_interface; | |
2618 | ||
2619 | if (!mr || !mr->ops->request_ptr) { | |
2620 | return false; | |
2621 | } | |
2622 | ||
2623 | /* | |
2624 | * Avoid an update if the request_ptr call | |
2625 | * memory_region_invalidate_mmio_ptr which seems to be likely when we use | |
2626 | * a cache. | |
2627 | */ | |
2628 | memory_region_transaction_begin(); | |
2629 | ||
2630 | host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset); | |
2631 | ||
2632 | if (!host || !size) { | |
2633 | memory_region_transaction_commit(); | |
2634 | return false; | |
2635 | } | |
2636 | ||
2637 | new_interface = object_new("mmio_interface"); | |
2638 | qdev_prop_set_uint64(DEVICE(new_interface), "start", offset); | |
2639 | qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1); | |
2640 | qdev_prop_set_bit(DEVICE(new_interface), "ro", true); | |
2641 | qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host); | |
2642 | qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr); | |
2643 | object_property_set_bool(OBJECT(new_interface), true, "realized", NULL); | |
2644 | ||
2645 | memory_region_transaction_commit(); | |
2646 | return true; | |
2647 | } | |
2648 | ||
2649 | typedef struct MMIOPtrInvalidate { | |
2650 | MemoryRegion *mr; | |
2651 | hwaddr offset; | |
2652 | unsigned size; | |
2653 | int busy; | |
2654 | int allocated; | |
2655 | } MMIOPtrInvalidate; | |
2656 | ||
2657 | #define MAX_MMIO_INVALIDATE 10 | |
2658 | static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE]; | |
2659 | ||
2660 | static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu, | |
2661 | run_on_cpu_data data) | |
2662 | { | |
2663 | MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr; | |
2664 | MemoryRegion *mr = invalidate_data->mr; | |
2665 | hwaddr offset = invalidate_data->offset; | |
2666 | unsigned size = invalidate_data->size; | |
2667 | MemoryRegionSection section = memory_region_find(mr, offset, size); | |
2668 | ||
2669 | qemu_mutex_lock_iothread(); | |
2670 | ||
2671 | /* Reset dirty so this doesn't happen later. */ | |
2672 | cpu_physical_memory_test_and_clear_dirty(offset, size, 1); | |
2673 | ||
2674 | if (section.mr != mr) { | |
2675 | /* memory_region_find add a ref on section.mr */ | |
2676 | memory_region_unref(section.mr); | |
2677 | if (MMIO_INTERFACE(section.mr->owner)) { | |
2678 | /* We found the interface just drop it. */ | |
2679 | object_property_set_bool(section.mr->owner, false, "realized", | |
2680 | NULL); | |
2681 | object_unref(section.mr->owner); | |
2682 | object_unparent(section.mr->owner); | |
2683 | } | |
2684 | } | |
2685 | ||
2686 | qemu_mutex_unlock_iothread(); | |
2687 | ||
2688 | if (invalidate_data->allocated) { | |
2689 | g_free(invalidate_data); | |
2690 | } else { | |
2691 | invalidate_data->busy = 0; | |
2692 | } | |
2693 | } | |
2694 | ||
2695 | void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset, | |
2696 | unsigned size) | |
2697 | { | |
2698 | size_t i; | |
2699 | MMIOPtrInvalidate *invalidate_data = NULL; | |
2700 | ||
2701 | for (i = 0; i < MAX_MMIO_INVALIDATE; i++) { | |
2702 | if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) { | |
2703 | invalidate_data = &mmio_ptr_invalidate_list[i]; | |
2704 | break; | |
2705 | } | |
2706 | } | |
2707 | ||
2708 | if (!invalidate_data) { | |
2709 | invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate)); | |
2710 | invalidate_data->allocated = 1; | |
2711 | } | |
2712 | ||
2713 | invalidate_data->mr = mr; | |
2714 | invalidate_data->offset = offset; | |
2715 | invalidate_data->size = size; | |
2716 | ||
2717 | async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr, | |
2718 | RUN_ON_CPU_HOST_PTR(invalidate_data)); | |
2719 | } | |
2720 | ||
2721 | void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) | |
2722 | { | |
2723 | memory_region_ref(root); | |
2724 | memory_region_transaction_begin(); | |
2725 | as->ref_count = 1; | |
2726 | as->root = root; | |
2727 | as->malloced = false; | |
2728 | as->current_map = NULL; | |
2729 | as->ioeventfd_nb = 0; | |
2730 | as->ioeventfds = NULL; | |
2731 | QTAILQ_INIT(&as->listeners); | |
2732 | QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); | |
2733 | as->name = g_strdup(name ? name : "anonymous"); | |
2734 | memory_region_update_pending |= root->enabled; | |
2735 | memory_region_transaction_commit(); | |
2736 | } | |
2737 | ||
2738 | static void do_address_space_destroy(AddressSpace *as) | |
2739 | { | |
2740 | bool do_free = as->malloced; | |
2741 | ||
2742 | assert(QTAILQ_EMPTY(&as->listeners)); | |
2743 | ||
2744 | flatview_unref(as->current_map); | |
2745 | g_free(as->name); | |
2746 | g_free(as->ioeventfds); | |
2747 | memory_region_unref(as->root); | |
2748 | if (do_free) { | |
2749 | g_free(as); | |
2750 | } | |
2751 | } | |
2752 | ||
2753 | AddressSpace *address_space_init_shareable(MemoryRegion *root, const char *name) | |
2754 | { | |
2755 | AddressSpace *as; | |
2756 | ||
2757 | as = g_malloc0(sizeof *as); | |
2758 | address_space_init(as, root, name); | |
2759 | as->malloced = true; | |
2760 | return as; | |
2761 | } | |
2762 | ||
2763 | void address_space_destroy(AddressSpace *as) | |
2764 | { | |
2765 | MemoryRegion *root = as->root; | |
2766 | ||
2767 | as->ref_count--; | |
2768 | if (as->ref_count) { | |
2769 | return; | |
2770 | } | |
2771 | /* Flush out anything from MemoryListeners listening in on this */ | |
2772 | memory_region_transaction_begin(); | |
2773 | as->root = NULL; | |
2774 | memory_region_transaction_commit(); | |
2775 | QTAILQ_REMOVE(&address_spaces, as, address_spaces_link); | |
2776 | ||
2777 | /* At this point, as->dispatch and as->current_map are dummy | |
2778 | * entries that the guest should never use. Wait for the old | |
2779 | * values to expire before freeing the data. | |
2780 | */ | |
2781 | as->root = root; | |
2782 | call_rcu(as, do_address_space_destroy, rcu); | |
2783 | } | |
2784 | ||
2785 | static const char *memory_region_type(MemoryRegion *mr) | |
2786 | { | |
2787 | if (memory_region_is_ram_device(mr)) { | |
2788 | return "ramd"; | |
2789 | } else if (memory_region_is_romd(mr)) { | |
2790 | return "romd"; | |
2791 | } else if (memory_region_is_rom(mr)) { | |
2792 | return "rom"; | |
2793 | } else if (memory_region_is_ram(mr)) { | |
2794 | return "ram"; | |
2795 | } else { | |
2796 | return "i/o"; | |
2797 | } | |
2798 | } | |
2799 | ||
2800 | typedef struct MemoryRegionList MemoryRegionList; | |
2801 | ||
2802 | struct MemoryRegionList { | |
2803 | const MemoryRegion *mr; | |
2804 | QTAILQ_ENTRY(MemoryRegionList) mrqueue; | |
2805 | }; | |
2806 | ||
2807 | typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead; | |
2808 | ||
2809 | #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ | |
2810 | int128_sub((size), int128_one())) : 0) | |
2811 | #define MTREE_INDENT " " | |
2812 | ||
2813 | static void mtree_print_mr(fprintf_function mon_printf, void *f, | |
2814 | const MemoryRegion *mr, unsigned int level, | |
2815 | hwaddr base, | |
2816 | MemoryRegionListHead *alias_print_queue) | |
2817 | { | |
2818 | MemoryRegionList *new_ml, *ml, *next_ml; | |
2819 | MemoryRegionListHead submr_print_queue; | |
2820 | const MemoryRegion *submr; | |
2821 | unsigned int i; | |
2822 | hwaddr cur_start, cur_end; | |
2823 | ||
2824 | if (!mr) { | |
2825 | return; | |
2826 | } | |
2827 | ||
2828 | for (i = 0; i < level; i++) { | |
2829 | mon_printf(f, MTREE_INDENT); | |
2830 | } | |
2831 | ||
2832 | cur_start = base + mr->addr; | |
2833 | cur_end = cur_start + MR_SIZE(mr->size); | |
2834 | ||
2835 | /* | |
2836 | * Try to detect overflow of memory region. This should never | |
2837 | * happen normally. When it happens, we dump something to warn the | |
2838 | * user who is observing this. | |
2839 | */ | |
2840 | if (cur_start < base || cur_end < cur_start) { | |
2841 | mon_printf(f, "[DETECTED OVERFLOW!] "); | |
2842 | } | |
2843 | ||
2844 | if (mr->alias) { | |
2845 | MemoryRegionList *ml; | |
2846 | bool found = false; | |
2847 | ||
2848 | /* check if the alias is already in the queue */ | |
2849 | QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) { | |
2850 | if (ml->mr == mr->alias) { | |
2851 | found = true; | |
2852 | } | |
2853 | } | |
2854 | ||
2855 | if (!found) { | |
2856 | ml = g_new(MemoryRegionList, 1); | |
2857 | ml->mr = mr->alias; | |
2858 | QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue); | |
2859 | } | |
2860 | mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx | |
2861 | " (prio %d, %s): alias %s @%s " TARGET_FMT_plx | |
2862 | "-" TARGET_FMT_plx "%s\n", | |
2863 | cur_start, cur_end, | |
2864 | mr->priority, | |
2865 | memory_region_type((MemoryRegion *)mr), | |
2866 | memory_region_name(mr), | |
2867 | memory_region_name(mr->alias), | |
2868 | mr->alias_offset, | |
2869 | mr->alias_offset + MR_SIZE(mr->size), | |
2870 | mr->enabled ? "" : " [disabled]"); | |
2871 | } else { | |
2872 | mon_printf(f, | |
2873 | TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n", | |
2874 | cur_start, cur_end, | |
2875 | mr->priority, | |
2876 | memory_region_type((MemoryRegion *)mr), | |
2877 | memory_region_name(mr), | |
2878 | mr->enabled ? "" : " [disabled]"); | |
2879 | } | |
2880 | ||
2881 | QTAILQ_INIT(&submr_print_queue); | |
2882 | ||
2883 | QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) { | |
2884 | new_ml = g_new(MemoryRegionList, 1); | |
2885 | new_ml->mr = submr; | |
2886 | QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) { | |
2887 | if (new_ml->mr->addr < ml->mr->addr || | |
2888 | (new_ml->mr->addr == ml->mr->addr && | |
2889 | new_ml->mr->priority > ml->mr->priority)) { | |
2890 | QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue); | |
2891 | new_ml = NULL; | |
2892 | break; | |
2893 | } | |
2894 | } | |
2895 | if (new_ml) { | |
2896 | QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue); | |
2897 | } | |
2898 | } | |
2899 | ||
2900 | QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) { | |
2901 | mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start, | |
2902 | alias_print_queue); | |
2903 | } | |
2904 | ||
2905 | QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) { | |
2906 | g_free(ml); | |
2907 | } | |
2908 | } | |
2909 | ||
2910 | static void mtree_print_flatview(fprintf_function p, void *f, | |
2911 | AddressSpace *as) | |
2912 | { | |
2913 | FlatView *view = address_space_get_flatview(as); | |
2914 | FlatRange *range = &view->ranges[0]; | |
2915 | MemoryRegion *mr; | |
2916 | int n = view->nr; | |
2917 | ||
2918 | if (n <= 0) { | |
2919 | p(f, MTREE_INDENT "No rendered FlatView for " | |
2920 | "address space '%s'\n", as->name); | |
2921 | flatview_unref(view); | |
2922 | return; | |
2923 | } | |
2924 | ||
2925 | while (n--) { | |
2926 | mr = range->mr; | |
2927 | if (range->offset_in_region) { | |
2928 | p(f, MTREE_INDENT TARGET_FMT_plx "-" | |
2929 | TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n", | |
2930 | int128_get64(range->addr.start), | |
2931 | int128_get64(range->addr.start) + MR_SIZE(range->addr.size), | |
2932 | mr->priority, | |
2933 | range->readonly ? "rom" : memory_region_type(mr), | |
2934 | memory_region_name(mr), | |
2935 | range->offset_in_region); | |
2936 | } else { | |
2937 | p(f, MTREE_INDENT TARGET_FMT_plx "-" | |
2938 | TARGET_FMT_plx " (prio %d, %s): %s\n", | |
2939 | int128_get64(range->addr.start), | |
2940 | int128_get64(range->addr.start) + MR_SIZE(range->addr.size), | |
2941 | mr->priority, | |
2942 | range->readonly ? "rom" : memory_region_type(mr), | |
2943 | memory_region_name(mr)); | |
2944 | } | |
2945 | range++; | |
2946 | } | |
2947 | ||
2948 | flatview_unref(view); | |
2949 | } | |
2950 | ||
2951 | void mtree_info(fprintf_function mon_printf, void *f, bool flatview) | |
2952 | { | |
2953 | MemoryRegionListHead ml_head; | |
2954 | MemoryRegionList *ml, *ml2; | |
2955 | AddressSpace *as; | |
2956 | ||
2957 | if (flatview) { | |
2958 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
2959 | mon_printf(f, "address-space (flat view): %s\n", as->name); | |
2960 | mtree_print_flatview(mon_printf, f, as); | |
2961 | mon_printf(f, "\n"); | |
2962 | } | |
2963 | return; | |
2964 | } | |
2965 | ||
2966 | QTAILQ_INIT(&ml_head); | |
2967 | ||
2968 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
2969 | mon_printf(f, "address-space: %s\n", as->name); | |
2970 | mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head); | |
2971 | mon_printf(f, "\n"); | |
2972 | } | |
2973 | ||
2974 | /* print aliased regions */ | |
2975 | QTAILQ_FOREACH(ml, &ml_head, mrqueue) { | |
2976 | mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr)); | |
2977 | mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head); | |
2978 | mon_printf(f, "\n"); | |
2979 | } | |
2980 | ||
2981 | QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) { | |
2982 | g_free(ml); | |
2983 | } | |
2984 | } | |
2985 | ||
2986 | void memory_region_init_ram(MemoryRegion *mr, | |
2987 | struct Object *owner, | |
2988 | const char *name, | |
2989 | uint64_t size, | |
2990 | Error **errp) | |
2991 | { | |
2992 | DeviceState *owner_dev; | |
2993 | Error *err = NULL; | |
2994 | ||
2995 | memory_region_init_ram_nomigrate(mr, owner, name, size, &err); | |
2996 | if (err) { | |
2997 | error_propagate(errp, err); | |
2998 | return; | |
2999 | } | |
3000 | /* This will assert if owner is neither NULL nor a DeviceState. | |
3001 | * We only want the owner here for the purposes of defining a | |
3002 | * unique name for migration. TODO: Ideally we should implement | |
3003 | * a naming scheme for Objects which are not DeviceStates, in | |
3004 | * which case we can relax this restriction. | |
3005 | */ | |
3006 | owner_dev = DEVICE(owner); | |
3007 | vmstate_register_ram(mr, owner_dev); | |
3008 | } | |
3009 | ||
3010 | void memory_region_init_rom(MemoryRegion *mr, | |
3011 | struct Object *owner, | |
3012 | const char *name, | |
3013 | uint64_t size, | |
3014 | Error **errp) | |
3015 | { | |
3016 | DeviceState *owner_dev; | |
3017 | Error *err = NULL; | |
3018 | ||
3019 | memory_region_init_rom_nomigrate(mr, owner, name, size, &err); | |
3020 | if (err) { | |
3021 | error_propagate(errp, err); | |
3022 | return; | |
3023 | } | |
3024 | /* This will assert if owner is neither NULL nor a DeviceState. | |
3025 | * We only want the owner here for the purposes of defining a | |
3026 | * unique name for migration. TODO: Ideally we should implement | |
3027 | * a naming scheme for Objects which are not DeviceStates, in | |
3028 | * which case we can relax this restriction. | |
3029 | */ | |
3030 | owner_dev = DEVICE(owner); | |
3031 | vmstate_register_ram(mr, owner_dev); | |
3032 | } | |
3033 | ||
3034 | void memory_region_init_rom_device(MemoryRegion *mr, | |
3035 | struct Object *owner, | |
3036 | const MemoryRegionOps *ops, | |
3037 | void *opaque, | |
3038 | const char *name, | |
3039 | uint64_t size, | |
3040 | Error **errp) | |
3041 | { | |
3042 | DeviceState *owner_dev; | |
3043 | Error *err = NULL; | |
3044 | ||
3045 | memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque, | |
3046 | name, size, &err); | |
3047 | if (err) { | |
3048 | error_propagate(errp, err); | |
3049 | return; | |
3050 | } | |
3051 | /* This will assert if owner is neither NULL nor a DeviceState. | |
3052 | * We only want the owner here for the purposes of defining a | |
3053 | * unique name for migration. TODO: Ideally we should implement | |
3054 | * a naming scheme for Objects which are not DeviceStates, in | |
3055 | * which case we can relax this restriction. | |
3056 | */ | |
3057 | owner_dev = DEVICE(owner); | |
3058 | vmstate_register_ram(mr, owner_dev); | |
3059 | } | |
3060 | ||
3061 | static const TypeInfo memory_region_info = { | |
3062 | .parent = TYPE_OBJECT, | |
3063 | .name = TYPE_MEMORY_REGION, | |
3064 | .instance_size = sizeof(MemoryRegion), | |
3065 | .instance_init = memory_region_initfn, | |
3066 | .instance_finalize = memory_region_finalize, | |
3067 | }; | |
3068 | ||
3069 | static const TypeInfo iommu_memory_region_info = { | |
3070 | .parent = TYPE_MEMORY_REGION, | |
3071 | .name = TYPE_IOMMU_MEMORY_REGION, | |
3072 | .class_size = sizeof(IOMMUMemoryRegionClass), | |
3073 | .instance_size = sizeof(IOMMUMemoryRegion), | |
3074 | .instance_init = iommu_memory_region_initfn, | |
3075 | .abstract = true, | |
3076 | }; | |
3077 | ||
3078 | static void memory_register_types(void) | |
3079 | { | |
3080 | type_register_static(&memory_region_info); | |
3081 | type_register_static(&iommu_memory_region_info); | |
3082 | } | |
3083 | ||
3084 | type_init(memory_register_types) |