]> git.proxmox.com Git - qemu.git/blame - memory.c
memory: merge adjacent segments of a single memory region
[qemu.git] / memory.c
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#include "memory.h"
15#include <assert.h>
16
17typedef struct AddrRange AddrRange;
18
19struct AddrRange {
20 uint64_t start;
21 uint64_t size;
22};
23
24static AddrRange addrrange_make(uint64_t start, uint64_t size)
25{
26 return (AddrRange) { start, size };
27}
28
29static bool addrrange_equal(AddrRange r1, AddrRange r2)
30{
31 return r1.start == r2.start && r1.size == r2.size;
32}
33
34static uint64_t addrrange_end(AddrRange r)
35{
36 return r.start + r.size;
37}
38
39static AddrRange addrrange_shift(AddrRange range, int64_t delta)
40{
41 range.start += delta;
42 return range;
43}
44
45static bool addrrange_intersects(AddrRange r1, AddrRange r2)
46{
47 return (r1.start >= r2.start && r1.start < r2.start + r2.size)
48 || (r2.start >= r1.start && r2.start < r1.start + r1.size);
49}
50
51static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
52{
53 uint64_t start = MAX(r1.start, r2.start);
54 /* off-by-one arithmetic to prevent overflow */
55 uint64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1);
56 return addrrange_make(start, end - start + 1);
57}
58
59struct CoalescedMemoryRange {
60 AddrRange addr;
61 QTAILQ_ENTRY(CoalescedMemoryRange) link;
62};
63
64typedef struct FlatRange FlatRange;
65typedef struct FlatView FlatView;
66
67/* Range of memory in the global map. Addresses are absolute. */
68struct FlatRange {
69 MemoryRegion *mr;
70 target_phys_addr_t offset_in_region;
71 AddrRange addr;
5a583347 72 uint8_t dirty_log_mask;
093bc2cd
AK
73};
74
75/* Flattened global view of current active memory hierarchy. Kept in sorted
76 * order.
77 */
78struct FlatView {
79 FlatRange *ranges;
80 unsigned nr;
81 unsigned nr_allocated;
82};
83
84#define FOR_EACH_FLAT_RANGE(var, view) \
85 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
86
87static FlatView current_memory_map;
88static MemoryRegion *root_memory_region;
89
90static bool flatrange_equal(FlatRange *a, FlatRange *b)
91{
92 return a->mr == b->mr
93 && addrrange_equal(a->addr, b->addr)
94 && a->offset_in_region == b->offset_in_region;
95}
96
97static void flatview_init(FlatView *view)
98{
99 view->ranges = NULL;
100 view->nr = 0;
101 view->nr_allocated = 0;
102}
103
104/* Insert a range into a given position. Caller is responsible for maintaining
105 * sorting order.
106 */
107static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
108{
109 if (view->nr == view->nr_allocated) {
110 view->nr_allocated = MAX(2 * view->nr, 10);
111 view->ranges = qemu_realloc(view->ranges,
112 view->nr_allocated * sizeof(*view->ranges));
113 }
114 memmove(view->ranges + pos + 1, view->ranges + pos,
115 (view->nr - pos) * sizeof(FlatRange));
116 view->ranges[pos] = *range;
117 ++view->nr;
118}
119
120static void flatview_destroy(FlatView *view)
121{
122 qemu_free(view->ranges);
123}
124
3d8e6bf9
AK
125static bool can_merge(FlatRange *r1, FlatRange *r2)
126{
127 return addrrange_end(r1->addr) == r2->addr.start
128 && r1->mr == r2->mr
129 && r1->offset_in_region + r1->addr.size == r2->offset_in_region
130 && r1->dirty_log_mask == r2->dirty_log_mask;
131}
132
133/* Attempt to simplify a view by merging ajacent ranges */
134static void flatview_simplify(FlatView *view)
135{
136 unsigned i, j;
137
138 i = 0;
139 while (i < view->nr) {
140 j = i + 1;
141 while (j < view->nr
142 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
143 view->ranges[i].addr.size += view->ranges[j].addr.size;
144 ++j;
145 }
146 ++i;
147 memmove(&view->ranges[i], &view->ranges[j],
148 (view->nr - j) * sizeof(view->ranges[j]));
149 view->nr -= j - i;
150 }
151}
152
093bc2cd
AK
153/* Render a memory region into the global view. Ranges in @view obscure
154 * ranges in @mr.
155 */
156static void render_memory_region(FlatView *view,
157 MemoryRegion *mr,
158 target_phys_addr_t base,
159 AddrRange clip)
160{
161 MemoryRegion *subregion;
162 unsigned i;
163 target_phys_addr_t offset_in_region;
164 uint64_t remain;
165 uint64_t now;
166 FlatRange fr;
167 AddrRange tmp;
168
169 base += mr->addr;
170
171 tmp = addrrange_make(base, mr->size);
172
173 if (!addrrange_intersects(tmp, clip)) {
174 return;
175 }
176
177 clip = addrrange_intersection(tmp, clip);
178
179 if (mr->alias) {
180 base -= mr->alias->addr;
181 base -= mr->alias_offset;
182 render_memory_region(view, mr->alias, base, clip);
183 return;
184 }
185
186 /* Render subregions in priority order. */
187 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
188 render_memory_region(view, subregion, base, clip);
189 }
190
191 if (!mr->has_ram_addr) {
192 return;
193 }
194
195 offset_in_region = clip.start - base;
196 base = clip.start;
197 remain = clip.size;
198
199 /* Render the region itself into any gaps left by the current view. */
200 for (i = 0; i < view->nr && remain; ++i) {
201 if (base >= addrrange_end(view->ranges[i].addr)) {
202 continue;
203 }
204 if (base < view->ranges[i].addr.start) {
205 now = MIN(remain, view->ranges[i].addr.start - base);
206 fr.mr = mr;
207 fr.offset_in_region = offset_in_region;
208 fr.addr = addrrange_make(base, now);
5a583347 209 fr.dirty_log_mask = mr->dirty_log_mask;
093bc2cd
AK
210 flatview_insert(view, i, &fr);
211 ++i;
212 base += now;
213 offset_in_region += now;
214 remain -= now;
215 }
216 if (base == view->ranges[i].addr.start) {
217 now = MIN(remain, view->ranges[i].addr.size);
218 base += now;
219 offset_in_region += now;
220 remain -= now;
221 }
222 }
223 if (remain) {
224 fr.mr = mr;
225 fr.offset_in_region = offset_in_region;
226 fr.addr = addrrange_make(base, remain);
5a583347 227 fr.dirty_log_mask = mr->dirty_log_mask;
093bc2cd
AK
228 flatview_insert(view, i, &fr);
229 }
230}
231
232/* Render a memory topology into a list of disjoint absolute ranges. */
233static FlatView generate_memory_topology(MemoryRegion *mr)
234{
235 FlatView view;
236
237 flatview_init(&view);
238
239 render_memory_region(&view, mr, 0, addrrange_make(0, UINT64_MAX));
3d8e6bf9 240 flatview_simplify(&view);
093bc2cd
AK
241
242 return view;
243}
244
245static void memory_region_update_topology(void)
246{
247 FlatView old_view = current_memory_map;
248 FlatView new_view = generate_memory_topology(root_memory_region);
249 unsigned iold, inew;
250 FlatRange *frold, *frnew;
251 ram_addr_t phys_offset, region_offset;
252
253 /* Generate a symmetric difference of the old and new memory maps.
254 * Kill ranges in the old map, and instantiate ranges in the new map.
255 */
256 iold = inew = 0;
257 while (iold < old_view.nr || inew < new_view.nr) {
258 if (iold < old_view.nr) {
259 frold = &old_view.ranges[iold];
260 } else {
261 frold = NULL;
262 }
263 if (inew < new_view.nr) {
264 frnew = &new_view.ranges[inew];
265 } else {
266 frnew = NULL;
267 }
268
269 if (frold
270 && (!frnew
271 || frold->addr.start < frnew->addr.start
272 || (frold->addr.start == frnew->addr.start
273 && !flatrange_equal(frold, frnew)))) {
274 /* In old, but (not in new, or in new but attributes changed). */
275
276 cpu_register_physical_memory(frold->addr.start, frold->addr.size,
277 IO_MEM_UNASSIGNED);
278 ++iold;
279 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
280 /* In both (logging may have changed) */
281
5a583347
AK
282 if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
283 cpu_physical_log_stop(frnew->addr.start, frnew->addr.size);
284 } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
285 cpu_physical_log_start(frnew->addr.start, frnew->addr.size);
286 }
287
093bc2cd
AK
288 ++iold;
289 ++inew;
093bc2cd
AK
290 } else {
291 /* In new */
292
293 phys_offset = frnew->mr->ram_addr;
294 region_offset = frnew->offset_in_region;
295 /* cpu_register_physical_memory_log() wants region_offset for
296 * mmio, but prefers offseting phys_offset for RAM. Humour it.
297 */
298 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
299 phys_offset += region_offset;
300 region_offset = 0;
301 }
302
303 cpu_register_physical_memory_log(frnew->addr.start,
304 frnew->addr.size,
305 phys_offset,
306 region_offset,
5a583347 307 frnew->dirty_log_mask);
093bc2cd
AK
308 ++inew;
309 }
310 }
311 current_memory_map = new_view;
312 flatview_destroy(&old_view);
313}
314
315void memory_region_init(MemoryRegion *mr,
316 const char *name,
317 uint64_t size)
318{
319 mr->ops = NULL;
320 mr->parent = NULL;
321 mr->size = size;
322 mr->addr = 0;
323 mr->offset = 0;
324 mr->has_ram_addr = false;
325 mr->priority = 0;
326 mr->may_overlap = false;
327 mr->alias = NULL;
328 QTAILQ_INIT(&mr->subregions);
329 memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
330 QTAILQ_INIT(&mr->coalesced);
331 mr->name = qemu_strdup(name);
5a583347 332 mr->dirty_log_mask = 0;
093bc2cd
AK
333}
334
335static bool memory_region_access_valid(MemoryRegion *mr,
336 target_phys_addr_t addr,
337 unsigned size)
338{
339 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
340 return false;
341 }
342
343 /* Treat zero as compatibility all valid */
344 if (!mr->ops->valid.max_access_size) {
345 return true;
346 }
347
348 if (size > mr->ops->valid.max_access_size
349 || size < mr->ops->valid.min_access_size) {
350 return false;
351 }
352 return true;
353}
354
355static uint32_t memory_region_read_thunk_n(void *_mr,
356 target_phys_addr_t addr,
357 unsigned size)
358{
359 MemoryRegion *mr = _mr;
360 unsigned access_size, access_size_min, access_size_max;
361 uint64_t access_mask;
362 uint32_t data = 0, tmp;
363 unsigned i;
364
365 if (!memory_region_access_valid(mr, addr, size)) {
366 return -1U; /* FIXME: better signalling */
367 }
368
369 /* FIXME: support unaligned access */
370
371 access_size_min = mr->ops->impl.min_access_size;
372 if (!access_size_min) {
373 access_size_min = 1;
374 }
375 access_size_max = mr->ops->impl.max_access_size;
376 if (!access_size_max) {
377 access_size_max = 4;
378 }
379 access_size = MAX(MIN(size, access_size_max), access_size_min);
380 access_mask = -1ULL >> (64 - access_size * 8);
381 addr += mr->offset;
382 for (i = 0; i < size; i += access_size) {
383 /* FIXME: big-endian support */
384 tmp = mr->ops->read(mr->opaque, addr + i, access_size);
385 data |= (tmp & access_mask) << (i * 8);
386 }
387
388 return data;
389}
390
391static void memory_region_write_thunk_n(void *_mr,
392 target_phys_addr_t addr,
393 unsigned size,
394 uint64_t data)
395{
396 MemoryRegion *mr = _mr;
397 unsigned access_size, access_size_min, access_size_max;
398 uint64_t access_mask;
399 unsigned i;
400
401 if (!memory_region_access_valid(mr, addr, size)) {
402 return; /* FIXME: better signalling */
403 }
404
405 /* FIXME: support unaligned access */
406
407 access_size_min = mr->ops->impl.min_access_size;
408 if (!access_size_min) {
409 access_size_min = 1;
410 }
411 access_size_max = mr->ops->impl.max_access_size;
412 if (!access_size_max) {
413 access_size_max = 4;
414 }
415 access_size = MAX(MIN(size, access_size_max), access_size_min);
416 access_mask = -1ULL >> (64 - access_size * 8);
417 addr += mr->offset;
418 for (i = 0; i < size; i += access_size) {
419 /* FIXME: big-endian support */
420 mr->ops->write(mr->opaque, addr + i, (data >> (i * 8)) & access_mask,
421 access_size);
422 }
423}
424
425static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr)
426{
427 return memory_region_read_thunk_n(mr, addr, 1);
428}
429
430static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr)
431{
432 return memory_region_read_thunk_n(mr, addr, 2);
433}
434
435static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr)
436{
437 return memory_region_read_thunk_n(mr, addr, 4);
438}
439
440static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr,
441 uint32_t data)
442{
443 memory_region_write_thunk_n(mr, addr, 1, data);
444}
445
446static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr,
447 uint32_t data)
448{
449 memory_region_write_thunk_n(mr, addr, 2, data);
450}
451
452static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr,
453 uint32_t data)
454{
455 memory_region_write_thunk_n(mr, addr, 4, data);
456}
457
458static CPUReadMemoryFunc * const memory_region_read_thunk[] = {
459 memory_region_read_thunk_b,
460 memory_region_read_thunk_w,
461 memory_region_read_thunk_l,
462};
463
464static CPUWriteMemoryFunc * const memory_region_write_thunk[] = {
465 memory_region_write_thunk_b,
466 memory_region_write_thunk_w,
467 memory_region_write_thunk_l,
468};
469
470void memory_region_init_io(MemoryRegion *mr,
471 const MemoryRegionOps *ops,
472 void *opaque,
473 const char *name,
474 uint64_t size)
475{
476 memory_region_init(mr, name, size);
477 mr->ops = ops;
478 mr->opaque = opaque;
479 mr->has_ram_addr = true;
480 mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk,
481 memory_region_write_thunk,
482 mr,
483 mr->ops->endianness);
484}
485
486void memory_region_init_ram(MemoryRegion *mr,
487 DeviceState *dev,
488 const char *name,
489 uint64_t size)
490{
491 memory_region_init(mr, name, size);
492 mr->has_ram_addr = true;
493 mr->ram_addr = qemu_ram_alloc(dev, name, size);
494}
495
496void memory_region_init_ram_ptr(MemoryRegion *mr,
497 DeviceState *dev,
498 const char *name,
499 uint64_t size,
500 void *ptr)
501{
502 memory_region_init(mr, name, size);
503 mr->has_ram_addr = true;
504 mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr);
505}
506
507void memory_region_init_alias(MemoryRegion *mr,
508 const char *name,
509 MemoryRegion *orig,
510 target_phys_addr_t offset,
511 uint64_t size)
512{
513 memory_region_init(mr, name, size);
514 mr->alias = orig;
515 mr->alias_offset = offset;
516}
517
518void memory_region_destroy(MemoryRegion *mr)
519{
520 assert(QTAILQ_EMPTY(&mr->subregions));
521 memory_region_clear_coalescing(mr);
522 qemu_free((char *)mr->name);
523}
524
525uint64_t memory_region_size(MemoryRegion *mr)
526{
527 return mr->size;
528}
529
530void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
531{
532 mr->offset = offset;
533}
534
535void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
536{
5a583347
AK
537 uint8_t mask = 1 << client;
538
539 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
540 memory_region_update_topology();
093bc2cd
AK
541}
542
543bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
544 unsigned client)
545{
5a583347
AK
546 assert(mr->has_ram_addr);
547 return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client);
093bc2cd
AK
548}
549
550void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr)
551{
5a583347
AK
552 assert(mr->has_ram_addr);
553 return cpu_physical_memory_set_dirty(mr->ram_addr + addr);
093bc2cd
AK
554}
555
556void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
557{
5a583347
AK
558 FlatRange *fr;
559
560 FOR_EACH_FLAT_RANGE(fr, &current_memory_map) {
561 if (fr->mr == mr) {
562 cpu_physical_sync_dirty_bitmap(fr->addr.start,
563 fr->addr.start + fr->addr.size);
564 }
565 }
093bc2cd
AK
566}
567
568void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
569{
570 /* FIXME */
571}
572
573void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
574 target_phys_addr_t size, unsigned client)
575{
5a583347
AK
576 assert(mr->has_ram_addr);
577 cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
578 mr->ram_addr + addr + size,
579 1 << client);
093bc2cd
AK
580}
581
582void *memory_region_get_ram_ptr(MemoryRegion *mr)
583{
584 if (mr->alias) {
585 return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
586 }
587
588 assert(mr->has_ram_addr);
589
590 return qemu_get_ram_ptr(mr->ram_addr);
591}
592
593static void memory_region_update_coalesced_range(MemoryRegion *mr)
594{
595 FlatRange *fr;
596 CoalescedMemoryRange *cmr;
597 AddrRange tmp;
598
599 FOR_EACH_FLAT_RANGE(fr, &current_memory_map) {
600 if (fr->mr == mr) {
601 qemu_unregister_coalesced_mmio(fr->addr.start, fr->addr.size);
602 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
603 tmp = addrrange_shift(cmr->addr,
604 fr->addr.start - fr->offset_in_region);
605 if (!addrrange_intersects(tmp, fr->addr)) {
606 continue;
607 }
608 tmp = addrrange_intersection(tmp, fr->addr);
609 qemu_register_coalesced_mmio(tmp.start, tmp.size);
610 }
611 }
612 }
613}
614
615void memory_region_set_coalescing(MemoryRegion *mr)
616{
617 memory_region_clear_coalescing(mr);
618 memory_region_add_coalescing(mr, 0, mr->size);
619}
620
621void memory_region_add_coalescing(MemoryRegion *mr,
622 target_phys_addr_t offset,
623 uint64_t size)
624{
625 CoalescedMemoryRange *cmr = qemu_malloc(sizeof(*cmr));
626
627 cmr->addr = addrrange_make(offset, size);
628 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
629 memory_region_update_coalesced_range(mr);
630}
631
632void memory_region_clear_coalescing(MemoryRegion *mr)
633{
634 CoalescedMemoryRange *cmr;
635
636 while (!QTAILQ_EMPTY(&mr->coalesced)) {
637 cmr = QTAILQ_FIRST(&mr->coalesced);
638 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
639 qemu_free(cmr);
640 }
641 memory_region_update_coalesced_range(mr);
642}
643
644static void memory_region_add_subregion_common(MemoryRegion *mr,
645 target_phys_addr_t offset,
646 MemoryRegion *subregion)
647{
648 MemoryRegion *other;
649
650 assert(!subregion->parent);
651 subregion->parent = mr;
652 subregion->addr = offset;
653 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
654 if (subregion->may_overlap || other->may_overlap) {
655 continue;
656 }
657 if (offset >= other->offset + other->size
658 || offset + subregion->size <= other->offset) {
659 continue;
660 }
661 printf("warning: subregion collision %llx/%llx vs %llx/%llx\n",
662 (unsigned long long)offset,
663 (unsigned long long)subregion->size,
664 (unsigned long long)other->offset,
665 (unsigned long long)other->size);
666 }
667 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
668 if (subregion->priority >= other->priority) {
669 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
670 goto done;
671 }
672 }
673 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
674done:
675 memory_region_update_topology();
676}
677
678
679void memory_region_add_subregion(MemoryRegion *mr,
680 target_phys_addr_t offset,
681 MemoryRegion *subregion)
682{
683 subregion->may_overlap = false;
684 subregion->priority = 0;
685 memory_region_add_subregion_common(mr, offset, subregion);
686}
687
688void memory_region_add_subregion_overlap(MemoryRegion *mr,
689 target_phys_addr_t offset,
690 MemoryRegion *subregion,
691 unsigned priority)
692{
693 subregion->may_overlap = true;
694 subregion->priority = priority;
695 memory_region_add_subregion_common(mr, offset, subregion);
696}
697
698void memory_region_del_subregion(MemoryRegion *mr,
699 MemoryRegion *subregion)
700{
701 assert(subregion->parent == mr);
702 subregion->parent = NULL;
703 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
704 memory_region_update_topology();
705}