]> git.proxmox.com Git - mirror_qemu.git/blob - memory.c
Hierarchical memory region API
[mirror_qemu.git] / memory.c
1 /*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "memory.h"
15 #include <assert.h>
16
17 typedef struct AddrRange AddrRange;
18
19 struct AddrRange {
20 uint64_t start;
21 uint64_t size;
22 };
23
24 static AddrRange addrrange_make(uint64_t start, uint64_t size)
25 {
26 return (AddrRange) { start, size };
27 }
28
29 static bool addrrange_equal(AddrRange r1, AddrRange r2)
30 {
31 return r1.start == r2.start && r1.size == r2.size;
32 }
33
34 static uint64_t addrrange_end(AddrRange r)
35 {
36 return r.start + r.size;
37 }
38
39 static AddrRange addrrange_shift(AddrRange range, int64_t delta)
40 {
41 range.start += delta;
42 return range;
43 }
44
45 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
46 {
47 return (r1.start >= r2.start && r1.start < r2.start + r2.size)
48 || (r2.start >= r1.start && r2.start < r1.start + r1.size);
49 }
50
51 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
52 {
53 uint64_t start = MAX(r1.start, r2.start);
54 /* off-by-one arithmetic to prevent overflow */
55 uint64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1);
56 return addrrange_make(start, end - start + 1);
57 }
58
59 struct CoalescedMemoryRange {
60 AddrRange addr;
61 QTAILQ_ENTRY(CoalescedMemoryRange) link;
62 };
63
64 typedef struct FlatRange FlatRange;
65 typedef struct FlatView FlatView;
66
67 /* Range of memory in the global map. Addresses are absolute. */
68 struct FlatRange {
69 MemoryRegion *mr;
70 target_phys_addr_t offset_in_region;
71 AddrRange addr;
72 };
73
74 /* Flattened global view of current active memory hierarchy. Kept in sorted
75 * order.
76 */
77 struct FlatView {
78 FlatRange *ranges;
79 unsigned nr;
80 unsigned nr_allocated;
81 };
82
83 #define FOR_EACH_FLAT_RANGE(var, view) \
84 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
85
86 static FlatView current_memory_map;
87 static MemoryRegion *root_memory_region;
88
89 static bool flatrange_equal(FlatRange *a, FlatRange *b)
90 {
91 return a->mr == b->mr
92 && addrrange_equal(a->addr, b->addr)
93 && a->offset_in_region == b->offset_in_region;
94 }
95
96 static void flatview_init(FlatView *view)
97 {
98 view->ranges = NULL;
99 view->nr = 0;
100 view->nr_allocated = 0;
101 }
102
103 /* Insert a range into a given position. Caller is responsible for maintaining
104 * sorting order.
105 */
106 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
107 {
108 if (view->nr == view->nr_allocated) {
109 view->nr_allocated = MAX(2 * view->nr, 10);
110 view->ranges = qemu_realloc(view->ranges,
111 view->nr_allocated * sizeof(*view->ranges));
112 }
113 memmove(view->ranges + pos + 1, view->ranges + pos,
114 (view->nr - pos) * sizeof(FlatRange));
115 view->ranges[pos] = *range;
116 ++view->nr;
117 }
118
119 static void flatview_destroy(FlatView *view)
120 {
121 qemu_free(view->ranges);
122 }
123
124 /* Render a memory region into the global view. Ranges in @view obscure
125 * ranges in @mr.
126 */
127 static void render_memory_region(FlatView *view,
128 MemoryRegion *mr,
129 target_phys_addr_t base,
130 AddrRange clip)
131 {
132 MemoryRegion *subregion;
133 unsigned i;
134 target_phys_addr_t offset_in_region;
135 uint64_t remain;
136 uint64_t now;
137 FlatRange fr;
138 AddrRange tmp;
139
140 base += mr->addr;
141
142 tmp = addrrange_make(base, mr->size);
143
144 if (!addrrange_intersects(tmp, clip)) {
145 return;
146 }
147
148 clip = addrrange_intersection(tmp, clip);
149
150 if (mr->alias) {
151 base -= mr->alias->addr;
152 base -= mr->alias_offset;
153 render_memory_region(view, mr->alias, base, clip);
154 return;
155 }
156
157 /* Render subregions in priority order. */
158 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
159 render_memory_region(view, subregion, base, clip);
160 }
161
162 if (!mr->has_ram_addr) {
163 return;
164 }
165
166 offset_in_region = clip.start - base;
167 base = clip.start;
168 remain = clip.size;
169
170 /* Render the region itself into any gaps left by the current view. */
171 for (i = 0; i < view->nr && remain; ++i) {
172 if (base >= addrrange_end(view->ranges[i].addr)) {
173 continue;
174 }
175 if (base < view->ranges[i].addr.start) {
176 now = MIN(remain, view->ranges[i].addr.start - base);
177 fr.mr = mr;
178 fr.offset_in_region = offset_in_region;
179 fr.addr = addrrange_make(base, now);
180 flatview_insert(view, i, &fr);
181 ++i;
182 base += now;
183 offset_in_region += now;
184 remain -= now;
185 }
186 if (base == view->ranges[i].addr.start) {
187 now = MIN(remain, view->ranges[i].addr.size);
188 base += now;
189 offset_in_region += now;
190 remain -= now;
191 }
192 }
193 if (remain) {
194 fr.mr = mr;
195 fr.offset_in_region = offset_in_region;
196 fr.addr = addrrange_make(base, remain);
197 flatview_insert(view, i, &fr);
198 }
199 }
200
201 /* Render a memory topology into a list of disjoint absolute ranges. */
202 static FlatView generate_memory_topology(MemoryRegion *mr)
203 {
204 FlatView view;
205
206 flatview_init(&view);
207
208 render_memory_region(&view, mr, 0, addrrange_make(0, UINT64_MAX));
209
210 return view;
211 }
212
213 static void memory_region_update_topology(void)
214 {
215 FlatView old_view = current_memory_map;
216 FlatView new_view = generate_memory_topology(root_memory_region);
217 unsigned iold, inew;
218 FlatRange *frold, *frnew;
219 ram_addr_t phys_offset, region_offset;
220
221 /* Generate a symmetric difference of the old and new memory maps.
222 * Kill ranges in the old map, and instantiate ranges in the new map.
223 */
224 iold = inew = 0;
225 while (iold < old_view.nr || inew < new_view.nr) {
226 if (iold < old_view.nr) {
227 frold = &old_view.ranges[iold];
228 } else {
229 frold = NULL;
230 }
231 if (inew < new_view.nr) {
232 frnew = &new_view.ranges[inew];
233 } else {
234 frnew = NULL;
235 }
236
237 if (frold
238 && (!frnew
239 || frold->addr.start < frnew->addr.start
240 || (frold->addr.start == frnew->addr.start
241 && !flatrange_equal(frold, frnew)))) {
242 /* In old, but (not in new, or in new but attributes changed). */
243
244 cpu_register_physical_memory(frold->addr.start, frold->addr.size,
245 IO_MEM_UNASSIGNED);
246 ++iold;
247 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
248 /* In both (logging may have changed) */
249
250 ++iold;
251 ++inew;
252 /* FIXME: dirty logging */
253 } else {
254 /* In new */
255
256 phys_offset = frnew->mr->ram_addr;
257 region_offset = frnew->offset_in_region;
258 /* cpu_register_physical_memory_log() wants region_offset for
259 * mmio, but prefers offseting phys_offset for RAM. Humour it.
260 */
261 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
262 phys_offset += region_offset;
263 region_offset = 0;
264 }
265
266 cpu_register_physical_memory_log(frnew->addr.start,
267 frnew->addr.size,
268 phys_offset,
269 region_offset,
270 0);
271 ++inew;
272 }
273 }
274 current_memory_map = new_view;
275 flatview_destroy(&old_view);
276 }
277
278 void memory_region_init(MemoryRegion *mr,
279 const char *name,
280 uint64_t size)
281 {
282 mr->ops = NULL;
283 mr->parent = NULL;
284 mr->size = size;
285 mr->addr = 0;
286 mr->offset = 0;
287 mr->has_ram_addr = false;
288 mr->priority = 0;
289 mr->may_overlap = false;
290 mr->alias = NULL;
291 QTAILQ_INIT(&mr->subregions);
292 memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
293 QTAILQ_INIT(&mr->coalesced);
294 mr->name = qemu_strdup(name);
295 }
296
297 static bool memory_region_access_valid(MemoryRegion *mr,
298 target_phys_addr_t addr,
299 unsigned size)
300 {
301 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
302 return false;
303 }
304
305 /* Treat zero as compatibility all valid */
306 if (!mr->ops->valid.max_access_size) {
307 return true;
308 }
309
310 if (size > mr->ops->valid.max_access_size
311 || size < mr->ops->valid.min_access_size) {
312 return false;
313 }
314 return true;
315 }
316
317 static uint32_t memory_region_read_thunk_n(void *_mr,
318 target_phys_addr_t addr,
319 unsigned size)
320 {
321 MemoryRegion *mr = _mr;
322 unsigned access_size, access_size_min, access_size_max;
323 uint64_t access_mask;
324 uint32_t data = 0, tmp;
325 unsigned i;
326
327 if (!memory_region_access_valid(mr, addr, size)) {
328 return -1U; /* FIXME: better signalling */
329 }
330
331 /* FIXME: support unaligned access */
332
333 access_size_min = mr->ops->impl.min_access_size;
334 if (!access_size_min) {
335 access_size_min = 1;
336 }
337 access_size_max = mr->ops->impl.max_access_size;
338 if (!access_size_max) {
339 access_size_max = 4;
340 }
341 access_size = MAX(MIN(size, access_size_max), access_size_min);
342 access_mask = -1ULL >> (64 - access_size * 8);
343 addr += mr->offset;
344 for (i = 0; i < size; i += access_size) {
345 /* FIXME: big-endian support */
346 tmp = mr->ops->read(mr->opaque, addr + i, access_size);
347 data |= (tmp & access_mask) << (i * 8);
348 }
349
350 return data;
351 }
352
353 static void memory_region_write_thunk_n(void *_mr,
354 target_phys_addr_t addr,
355 unsigned size,
356 uint64_t data)
357 {
358 MemoryRegion *mr = _mr;
359 unsigned access_size, access_size_min, access_size_max;
360 uint64_t access_mask;
361 unsigned i;
362
363 if (!memory_region_access_valid(mr, addr, size)) {
364 return; /* FIXME: better signalling */
365 }
366
367 /* FIXME: support unaligned access */
368
369 access_size_min = mr->ops->impl.min_access_size;
370 if (!access_size_min) {
371 access_size_min = 1;
372 }
373 access_size_max = mr->ops->impl.max_access_size;
374 if (!access_size_max) {
375 access_size_max = 4;
376 }
377 access_size = MAX(MIN(size, access_size_max), access_size_min);
378 access_mask = -1ULL >> (64 - access_size * 8);
379 addr += mr->offset;
380 for (i = 0; i < size; i += access_size) {
381 /* FIXME: big-endian support */
382 mr->ops->write(mr->opaque, addr + i, (data >> (i * 8)) & access_mask,
383 access_size);
384 }
385 }
386
387 static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr)
388 {
389 return memory_region_read_thunk_n(mr, addr, 1);
390 }
391
392 static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr)
393 {
394 return memory_region_read_thunk_n(mr, addr, 2);
395 }
396
397 static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr)
398 {
399 return memory_region_read_thunk_n(mr, addr, 4);
400 }
401
402 static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr,
403 uint32_t data)
404 {
405 memory_region_write_thunk_n(mr, addr, 1, data);
406 }
407
408 static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr,
409 uint32_t data)
410 {
411 memory_region_write_thunk_n(mr, addr, 2, data);
412 }
413
414 static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr,
415 uint32_t data)
416 {
417 memory_region_write_thunk_n(mr, addr, 4, data);
418 }
419
420 static CPUReadMemoryFunc * const memory_region_read_thunk[] = {
421 memory_region_read_thunk_b,
422 memory_region_read_thunk_w,
423 memory_region_read_thunk_l,
424 };
425
426 static CPUWriteMemoryFunc * const memory_region_write_thunk[] = {
427 memory_region_write_thunk_b,
428 memory_region_write_thunk_w,
429 memory_region_write_thunk_l,
430 };
431
432 void memory_region_init_io(MemoryRegion *mr,
433 const MemoryRegionOps *ops,
434 void *opaque,
435 const char *name,
436 uint64_t size)
437 {
438 memory_region_init(mr, name, size);
439 mr->ops = ops;
440 mr->opaque = opaque;
441 mr->has_ram_addr = true;
442 mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk,
443 memory_region_write_thunk,
444 mr,
445 mr->ops->endianness);
446 }
447
448 void memory_region_init_ram(MemoryRegion *mr,
449 DeviceState *dev,
450 const char *name,
451 uint64_t size)
452 {
453 memory_region_init(mr, name, size);
454 mr->has_ram_addr = true;
455 mr->ram_addr = qemu_ram_alloc(dev, name, size);
456 }
457
458 void memory_region_init_ram_ptr(MemoryRegion *mr,
459 DeviceState *dev,
460 const char *name,
461 uint64_t size,
462 void *ptr)
463 {
464 memory_region_init(mr, name, size);
465 mr->has_ram_addr = true;
466 mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr);
467 }
468
469 void memory_region_init_alias(MemoryRegion *mr,
470 const char *name,
471 MemoryRegion *orig,
472 target_phys_addr_t offset,
473 uint64_t size)
474 {
475 memory_region_init(mr, name, size);
476 mr->alias = orig;
477 mr->alias_offset = offset;
478 }
479
480 void memory_region_destroy(MemoryRegion *mr)
481 {
482 assert(QTAILQ_EMPTY(&mr->subregions));
483 memory_region_clear_coalescing(mr);
484 qemu_free((char *)mr->name);
485 }
486
487 uint64_t memory_region_size(MemoryRegion *mr)
488 {
489 return mr->size;
490 }
491
492 void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
493 {
494 mr->offset = offset;
495 }
496
497 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
498 {
499 /* FIXME */
500 }
501
502 bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
503 unsigned client)
504 {
505 /* FIXME */
506 return true;
507 }
508
509 void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr)
510 {
511 /* FIXME */
512 }
513
514 void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
515 {
516 /* FIXME */
517 }
518
519 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
520 {
521 /* FIXME */
522 }
523
524 void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
525 target_phys_addr_t size, unsigned client)
526 {
527 /* FIXME */
528 }
529
530 void *memory_region_get_ram_ptr(MemoryRegion *mr)
531 {
532 if (mr->alias) {
533 return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
534 }
535
536 assert(mr->has_ram_addr);
537
538 return qemu_get_ram_ptr(mr->ram_addr);
539 }
540
541 static void memory_region_update_coalesced_range(MemoryRegion *mr)
542 {
543 FlatRange *fr;
544 CoalescedMemoryRange *cmr;
545 AddrRange tmp;
546
547 FOR_EACH_FLAT_RANGE(fr, &current_memory_map) {
548 if (fr->mr == mr) {
549 qemu_unregister_coalesced_mmio(fr->addr.start, fr->addr.size);
550 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
551 tmp = addrrange_shift(cmr->addr,
552 fr->addr.start - fr->offset_in_region);
553 if (!addrrange_intersects(tmp, fr->addr)) {
554 continue;
555 }
556 tmp = addrrange_intersection(tmp, fr->addr);
557 qemu_register_coalesced_mmio(tmp.start, tmp.size);
558 }
559 }
560 }
561 }
562
563 void memory_region_set_coalescing(MemoryRegion *mr)
564 {
565 memory_region_clear_coalescing(mr);
566 memory_region_add_coalescing(mr, 0, mr->size);
567 }
568
569 void memory_region_add_coalescing(MemoryRegion *mr,
570 target_phys_addr_t offset,
571 uint64_t size)
572 {
573 CoalescedMemoryRange *cmr = qemu_malloc(sizeof(*cmr));
574
575 cmr->addr = addrrange_make(offset, size);
576 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
577 memory_region_update_coalesced_range(mr);
578 }
579
580 void memory_region_clear_coalescing(MemoryRegion *mr)
581 {
582 CoalescedMemoryRange *cmr;
583
584 while (!QTAILQ_EMPTY(&mr->coalesced)) {
585 cmr = QTAILQ_FIRST(&mr->coalesced);
586 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
587 qemu_free(cmr);
588 }
589 memory_region_update_coalesced_range(mr);
590 }
591
592 static void memory_region_add_subregion_common(MemoryRegion *mr,
593 target_phys_addr_t offset,
594 MemoryRegion *subregion)
595 {
596 MemoryRegion *other;
597
598 assert(!subregion->parent);
599 subregion->parent = mr;
600 subregion->addr = offset;
601 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
602 if (subregion->may_overlap || other->may_overlap) {
603 continue;
604 }
605 if (offset >= other->offset + other->size
606 || offset + subregion->size <= other->offset) {
607 continue;
608 }
609 printf("warning: subregion collision %llx/%llx vs %llx/%llx\n",
610 (unsigned long long)offset,
611 (unsigned long long)subregion->size,
612 (unsigned long long)other->offset,
613 (unsigned long long)other->size);
614 }
615 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
616 if (subregion->priority >= other->priority) {
617 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
618 goto done;
619 }
620 }
621 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
622 done:
623 memory_region_update_topology();
624 }
625
626
627 void memory_region_add_subregion(MemoryRegion *mr,
628 target_phys_addr_t offset,
629 MemoryRegion *subregion)
630 {
631 subregion->may_overlap = false;
632 subregion->priority = 0;
633 memory_region_add_subregion_common(mr, offset, subregion);
634 }
635
636 void memory_region_add_subregion_overlap(MemoryRegion *mr,
637 target_phys_addr_t offset,
638 MemoryRegion *subregion,
639 unsigned priority)
640 {
641 subregion->may_overlap = true;
642 subregion->priority = priority;
643 memory_region_add_subregion_common(mr, offset, subregion);
644 }
645
646 void memory_region_del_subregion(MemoryRegion *mr,
647 MemoryRegion *subregion)
648 {
649 assert(subregion->parent == mr);
650 subregion->parent = NULL;
651 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
652 memory_region_update_topology();
653 }