]> git.proxmox.com Git - qemu.git/blame - memory.c
Internal interfaces for memory API
[qemu.git] / memory.c
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#include "memory.h"
1c0ffa58 15#include "exec-memory.h"
093bc2cd
AK
16#include <assert.h>
17
18typedef struct AddrRange AddrRange;
19
20struct AddrRange {
21 uint64_t start;
22 uint64_t size;
23};
24
25static AddrRange addrrange_make(uint64_t start, uint64_t size)
26{
27 return (AddrRange) { start, size };
28}
29
30static bool addrrange_equal(AddrRange r1, AddrRange r2)
31{
32 return r1.start == r2.start && r1.size == r2.size;
33}
34
35static uint64_t addrrange_end(AddrRange r)
36{
37 return r.start + r.size;
38}
39
40static AddrRange addrrange_shift(AddrRange range, int64_t delta)
41{
42 range.start += delta;
43 return range;
44}
45
46static bool addrrange_intersects(AddrRange r1, AddrRange r2)
47{
48 return (r1.start >= r2.start && r1.start < r2.start + r2.size)
49 || (r2.start >= r1.start && r2.start < r1.start + r1.size);
50}
51
52static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
53{
54 uint64_t start = MAX(r1.start, r2.start);
55 /* off-by-one arithmetic to prevent overflow */
56 uint64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1);
57 return addrrange_make(start, end - start + 1);
58}
59
60struct CoalescedMemoryRange {
61 AddrRange addr;
62 QTAILQ_ENTRY(CoalescedMemoryRange) link;
63};
64
65typedef struct FlatRange FlatRange;
66typedef struct FlatView FlatView;
67
68/* Range of memory in the global map. Addresses are absolute. */
69struct FlatRange {
70 MemoryRegion *mr;
71 target_phys_addr_t offset_in_region;
72 AddrRange addr;
5a583347 73 uint8_t dirty_log_mask;
093bc2cd
AK
74};
75
76/* Flattened global view of current active memory hierarchy. Kept in sorted
77 * order.
78 */
79struct FlatView {
80 FlatRange *ranges;
81 unsigned nr;
82 unsigned nr_allocated;
83};
84
85#define FOR_EACH_FLAT_RANGE(var, view) \
86 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
87
88static FlatView current_memory_map;
89static MemoryRegion *root_memory_region;
90
91static bool flatrange_equal(FlatRange *a, FlatRange *b)
92{
93 return a->mr == b->mr
94 && addrrange_equal(a->addr, b->addr)
95 && a->offset_in_region == b->offset_in_region;
96}
97
98static void flatview_init(FlatView *view)
99{
100 view->ranges = NULL;
101 view->nr = 0;
102 view->nr_allocated = 0;
103}
104
105/* Insert a range into a given position. Caller is responsible for maintaining
106 * sorting order.
107 */
108static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
109{
110 if (view->nr == view->nr_allocated) {
111 view->nr_allocated = MAX(2 * view->nr, 10);
112 view->ranges = qemu_realloc(view->ranges,
113 view->nr_allocated * sizeof(*view->ranges));
114 }
115 memmove(view->ranges + pos + 1, view->ranges + pos,
116 (view->nr - pos) * sizeof(FlatRange));
117 view->ranges[pos] = *range;
118 ++view->nr;
119}
120
121static void flatview_destroy(FlatView *view)
122{
123 qemu_free(view->ranges);
124}
125
3d8e6bf9
AK
126static bool can_merge(FlatRange *r1, FlatRange *r2)
127{
128 return addrrange_end(r1->addr) == r2->addr.start
129 && r1->mr == r2->mr
130 && r1->offset_in_region + r1->addr.size == r2->offset_in_region
131 && r1->dirty_log_mask == r2->dirty_log_mask;
132}
133
134/* Attempt to simplify a view by merging ajacent ranges */
135static void flatview_simplify(FlatView *view)
136{
137 unsigned i, j;
138
139 i = 0;
140 while (i < view->nr) {
141 j = i + 1;
142 while (j < view->nr
143 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
144 view->ranges[i].addr.size += view->ranges[j].addr.size;
145 ++j;
146 }
147 ++i;
148 memmove(&view->ranges[i], &view->ranges[j],
149 (view->nr - j) * sizeof(view->ranges[j]));
150 view->nr -= j - i;
151 }
152}
153
093bc2cd
AK
154/* Render a memory region into the global view. Ranges in @view obscure
155 * ranges in @mr.
156 */
157static void render_memory_region(FlatView *view,
158 MemoryRegion *mr,
159 target_phys_addr_t base,
160 AddrRange clip)
161{
162 MemoryRegion *subregion;
163 unsigned i;
164 target_phys_addr_t offset_in_region;
165 uint64_t remain;
166 uint64_t now;
167 FlatRange fr;
168 AddrRange tmp;
169
170 base += mr->addr;
171
172 tmp = addrrange_make(base, mr->size);
173
174 if (!addrrange_intersects(tmp, clip)) {
175 return;
176 }
177
178 clip = addrrange_intersection(tmp, clip);
179
180 if (mr->alias) {
181 base -= mr->alias->addr;
182 base -= mr->alias_offset;
183 render_memory_region(view, mr->alias, base, clip);
184 return;
185 }
186
187 /* Render subregions in priority order. */
188 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
189 render_memory_region(view, subregion, base, clip);
190 }
191
192 if (!mr->has_ram_addr) {
193 return;
194 }
195
196 offset_in_region = clip.start - base;
197 base = clip.start;
198 remain = clip.size;
199
200 /* Render the region itself into any gaps left by the current view. */
201 for (i = 0; i < view->nr && remain; ++i) {
202 if (base >= addrrange_end(view->ranges[i].addr)) {
203 continue;
204 }
205 if (base < view->ranges[i].addr.start) {
206 now = MIN(remain, view->ranges[i].addr.start - base);
207 fr.mr = mr;
208 fr.offset_in_region = offset_in_region;
209 fr.addr = addrrange_make(base, now);
5a583347 210 fr.dirty_log_mask = mr->dirty_log_mask;
093bc2cd
AK
211 flatview_insert(view, i, &fr);
212 ++i;
213 base += now;
214 offset_in_region += now;
215 remain -= now;
216 }
217 if (base == view->ranges[i].addr.start) {
218 now = MIN(remain, view->ranges[i].addr.size);
219 base += now;
220 offset_in_region += now;
221 remain -= now;
222 }
223 }
224 if (remain) {
225 fr.mr = mr;
226 fr.offset_in_region = offset_in_region;
227 fr.addr = addrrange_make(base, remain);
5a583347 228 fr.dirty_log_mask = mr->dirty_log_mask;
093bc2cd
AK
229 flatview_insert(view, i, &fr);
230 }
231}
232
233/* Render a memory topology into a list of disjoint absolute ranges. */
234static FlatView generate_memory_topology(MemoryRegion *mr)
235{
236 FlatView view;
237
238 flatview_init(&view);
239
240 render_memory_region(&view, mr, 0, addrrange_make(0, UINT64_MAX));
3d8e6bf9 241 flatview_simplify(&view);
093bc2cd
AK
242
243 return view;
244}
245
246static void memory_region_update_topology(void)
247{
248 FlatView old_view = current_memory_map;
249 FlatView new_view = generate_memory_topology(root_memory_region);
250 unsigned iold, inew;
251 FlatRange *frold, *frnew;
252 ram_addr_t phys_offset, region_offset;
253
254 /* Generate a symmetric difference of the old and new memory maps.
255 * Kill ranges in the old map, and instantiate ranges in the new map.
256 */
257 iold = inew = 0;
258 while (iold < old_view.nr || inew < new_view.nr) {
259 if (iold < old_view.nr) {
260 frold = &old_view.ranges[iold];
261 } else {
262 frold = NULL;
263 }
264 if (inew < new_view.nr) {
265 frnew = &new_view.ranges[inew];
266 } else {
267 frnew = NULL;
268 }
269
270 if (frold
271 && (!frnew
272 || frold->addr.start < frnew->addr.start
273 || (frold->addr.start == frnew->addr.start
274 && !flatrange_equal(frold, frnew)))) {
275 /* In old, but (not in new, or in new but attributes changed). */
276
277 cpu_register_physical_memory(frold->addr.start, frold->addr.size,
278 IO_MEM_UNASSIGNED);
279 ++iold;
280 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
281 /* In both (logging may have changed) */
282
5a583347
AK
283 if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
284 cpu_physical_log_stop(frnew->addr.start, frnew->addr.size);
285 } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
286 cpu_physical_log_start(frnew->addr.start, frnew->addr.size);
287 }
288
093bc2cd
AK
289 ++iold;
290 ++inew;
093bc2cd
AK
291 } else {
292 /* In new */
293
294 phys_offset = frnew->mr->ram_addr;
295 region_offset = frnew->offset_in_region;
296 /* cpu_register_physical_memory_log() wants region_offset for
297 * mmio, but prefers offseting phys_offset for RAM. Humour it.
298 */
299 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
300 phys_offset += region_offset;
301 region_offset = 0;
302 }
303
304 cpu_register_physical_memory_log(frnew->addr.start,
305 frnew->addr.size,
306 phys_offset,
307 region_offset,
5a583347 308 frnew->dirty_log_mask);
093bc2cd
AK
309 ++inew;
310 }
311 }
312 current_memory_map = new_view;
313 flatview_destroy(&old_view);
314}
315
316void memory_region_init(MemoryRegion *mr,
317 const char *name,
318 uint64_t size)
319{
320 mr->ops = NULL;
321 mr->parent = NULL;
322 mr->size = size;
323 mr->addr = 0;
324 mr->offset = 0;
325 mr->has_ram_addr = false;
326 mr->priority = 0;
327 mr->may_overlap = false;
328 mr->alias = NULL;
329 QTAILQ_INIT(&mr->subregions);
330 memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
331 QTAILQ_INIT(&mr->coalesced);
332 mr->name = qemu_strdup(name);
5a583347 333 mr->dirty_log_mask = 0;
093bc2cd
AK
334}
335
336static bool memory_region_access_valid(MemoryRegion *mr,
337 target_phys_addr_t addr,
338 unsigned size)
339{
340 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
341 return false;
342 }
343
344 /* Treat zero as compatibility all valid */
345 if (!mr->ops->valid.max_access_size) {
346 return true;
347 }
348
349 if (size > mr->ops->valid.max_access_size
350 || size < mr->ops->valid.min_access_size) {
351 return false;
352 }
353 return true;
354}
355
356static uint32_t memory_region_read_thunk_n(void *_mr,
357 target_phys_addr_t addr,
358 unsigned size)
359{
360 MemoryRegion *mr = _mr;
361 unsigned access_size, access_size_min, access_size_max;
362 uint64_t access_mask;
363 uint32_t data = 0, tmp;
364 unsigned i;
365
366 if (!memory_region_access_valid(mr, addr, size)) {
367 return -1U; /* FIXME: better signalling */
368 }
369
370 /* FIXME: support unaligned access */
371
372 access_size_min = mr->ops->impl.min_access_size;
373 if (!access_size_min) {
374 access_size_min = 1;
375 }
376 access_size_max = mr->ops->impl.max_access_size;
377 if (!access_size_max) {
378 access_size_max = 4;
379 }
380 access_size = MAX(MIN(size, access_size_max), access_size_min);
381 access_mask = -1ULL >> (64 - access_size * 8);
382 addr += mr->offset;
383 for (i = 0; i < size; i += access_size) {
384 /* FIXME: big-endian support */
385 tmp = mr->ops->read(mr->opaque, addr + i, access_size);
386 data |= (tmp & access_mask) << (i * 8);
387 }
388
389 return data;
390}
391
392static void memory_region_write_thunk_n(void *_mr,
393 target_phys_addr_t addr,
394 unsigned size,
395 uint64_t data)
396{
397 MemoryRegion *mr = _mr;
398 unsigned access_size, access_size_min, access_size_max;
399 uint64_t access_mask;
400 unsigned i;
401
402 if (!memory_region_access_valid(mr, addr, size)) {
403 return; /* FIXME: better signalling */
404 }
405
406 /* FIXME: support unaligned access */
407
408 access_size_min = mr->ops->impl.min_access_size;
409 if (!access_size_min) {
410 access_size_min = 1;
411 }
412 access_size_max = mr->ops->impl.max_access_size;
413 if (!access_size_max) {
414 access_size_max = 4;
415 }
416 access_size = MAX(MIN(size, access_size_max), access_size_min);
417 access_mask = -1ULL >> (64 - access_size * 8);
418 addr += mr->offset;
419 for (i = 0; i < size; i += access_size) {
420 /* FIXME: big-endian support */
421 mr->ops->write(mr->opaque, addr + i, (data >> (i * 8)) & access_mask,
422 access_size);
423 }
424}
425
426static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr)
427{
428 return memory_region_read_thunk_n(mr, addr, 1);
429}
430
431static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr)
432{
433 return memory_region_read_thunk_n(mr, addr, 2);
434}
435
436static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr)
437{
438 return memory_region_read_thunk_n(mr, addr, 4);
439}
440
441static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr,
442 uint32_t data)
443{
444 memory_region_write_thunk_n(mr, addr, 1, data);
445}
446
447static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr,
448 uint32_t data)
449{
450 memory_region_write_thunk_n(mr, addr, 2, data);
451}
452
453static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr,
454 uint32_t data)
455{
456 memory_region_write_thunk_n(mr, addr, 4, data);
457}
458
459static CPUReadMemoryFunc * const memory_region_read_thunk[] = {
460 memory_region_read_thunk_b,
461 memory_region_read_thunk_w,
462 memory_region_read_thunk_l,
463};
464
465static CPUWriteMemoryFunc * const memory_region_write_thunk[] = {
466 memory_region_write_thunk_b,
467 memory_region_write_thunk_w,
468 memory_region_write_thunk_l,
469};
470
471void memory_region_init_io(MemoryRegion *mr,
472 const MemoryRegionOps *ops,
473 void *opaque,
474 const char *name,
475 uint64_t size)
476{
477 memory_region_init(mr, name, size);
478 mr->ops = ops;
479 mr->opaque = opaque;
480 mr->has_ram_addr = true;
481 mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk,
482 memory_region_write_thunk,
483 mr,
484 mr->ops->endianness);
485}
486
487void memory_region_init_ram(MemoryRegion *mr,
488 DeviceState *dev,
489 const char *name,
490 uint64_t size)
491{
492 memory_region_init(mr, name, size);
493 mr->has_ram_addr = true;
494 mr->ram_addr = qemu_ram_alloc(dev, name, size);
495}
496
497void memory_region_init_ram_ptr(MemoryRegion *mr,
498 DeviceState *dev,
499 const char *name,
500 uint64_t size,
501 void *ptr)
502{
503 memory_region_init(mr, name, size);
504 mr->has_ram_addr = true;
505 mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr);
506}
507
508void memory_region_init_alias(MemoryRegion *mr,
509 const char *name,
510 MemoryRegion *orig,
511 target_phys_addr_t offset,
512 uint64_t size)
513{
514 memory_region_init(mr, name, size);
515 mr->alias = orig;
516 mr->alias_offset = offset;
517}
518
519void memory_region_destroy(MemoryRegion *mr)
520{
521 assert(QTAILQ_EMPTY(&mr->subregions));
522 memory_region_clear_coalescing(mr);
523 qemu_free((char *)mr->name);
524}
525
526uint64_t memory_region_size(MemoryRegion *mr)
527{
528 return mr->size;
529}
530
531void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
532{
533 mr->offset = offset;
534}
535
536void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
537{
5a583347
AK
538 uint8_t mask = 1 << client;
539
540 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
541 memory_region_update_topology();
093bc2cd
AK
542}
543
544bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
545 unsigned client)
546{
5a583347
AK
547 assert(mr->has_ram_addr);
548 return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client);
093bc2cd
AK
549}
550
551void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr)
552{
5a583347
AK
553 assert(mr->has_ram_addr);
554 return cpu_physical_memory_set_dirty(mr->ram_addr + addr);
093bc2cd
AK
555}
556
557void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
558{
5a583347
AK
559 FlatRange *fr;
560
561 FOR_EACH_FLAT_RANGE(fr, &current_memory_map) {
562 if (fr->mr == mr) {
563 cpu_physical_sync_dirty_bitmap(fr->addr.start,
564 fr->addr.start + fr->addr.size);
565 }
566 }
093bc2cd
AK
567}
568
569void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
570{
571 /* FIXME */
572}
573
574void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
575 target_phys_addr_t size, unsigned client)
576{
5a583347
AK
577 assert(mr->has_ram_addr);
578 cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
579 mr->ram_addr + addr + size,
580 1 << client);
093bc2cd
AK
581}
582
583void *memory_region_get_ram_ptr(MemoryRegion *mr)
584{
585 if (mr->alias) {
586 return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
587 }
588
589 assert(mr->has_ram_addr);
590
591 return qemu_get_ram_ptr(mr->ram_addr);
592}
593
594static void memory_region_update_coalesced_range(MemoryRegion *mr)
595{
596 FlatRange *fr;
597 CoalescedMemoryRange *cmr;
598 AddrRange tmp;
599
600 FOR_EACH_FLAT_RANGE(fr, &current_memory_map) {
601 if (fr->mr == mr) {
602 qemu_unregister_coalesced_mmio(fr->addr.start, fr->addr.size);
603 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
604 tmp = addrrange_shift(cmr->addr,
605 fr->addr.start - fr->offset_in_region);
606 if (!addrrange_intersects(tmp, fr->addr)) {
607 continue;
608 }
609 tmp = addrrange_intersection(tmp, fr->addr);
610 qemu_register_coalesced_mmio(tmp.start, tmp.size);
611 }
612 }
613 }
614}
615
616void memory_region_set_coalescing(MemoryRegion *mr)
617{
618 memory_region_clear_coalescing(mr);
619 memory_region_add_coalescing(mr, 0, mr->size);
620}
621
622void memory_region_add_coalescing(MemoryRegion *mr,
623 target_phys_addr_t offset,
624 uint64_t size)
625{
626 CoalescedMemoryRange *cmr = qemu_malloc(sizeof(*cmr));
627
628 cmr->addr = addrrange_make(offset, size);
629 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
630 memory_region_update_coalesced_range(mr);
631}
632
633void memory_region_clear_coalescing(MemoryRegion *mr)
634{
635 CoalescedMemoryRange *cmr;
636
637 while (!QTAILQ_EMPTY(&mr->coalesced)) {
638 cmr = QTAILQ_FIRST(&mr->coalesced);
639 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
640 qemu_free(cmr);
641 }
642 memory_region_update_coalesced_range(mr);
643}
644
645static void memory_region_add_subregion_common(MemoryRegion *mr,
646 target_phys_addr_t offset,
647 MemoryRegion *subregion)
648{
649 MemoryRegion *other;
650
651 assert(!subregion->parent);
652 subregion->parent = mr;
653 subregion->addr = offset;
654 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
655 if (subregion->may_overlap || other->may_overlap) {
656 continue;
657 }
658 if (offset >= other->offset + other->size
659 || offset + subregion->size <= other->offset) {
660 continue;
661 }
662 printf("warning: subregion collision %llx/%llx vs %llx/%llx\n",
663 (unsigned long long)offset,
664 (unsigned long long)subregion->size,
665 (unsigned long long)other->offset,
666 (unsigned long long)other->size);
667 }
668 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
669 if (subregion->priority >= other->priority) {
670 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
671 goto done;
672 }
673 }
674 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
675done:
676 memory_region_update_topology();
677}
678
679
680void memory_region_add_subregion(MemoryRegion *mr,
681 target_phys_addr_t offset,
682 MemoryRegion *subregion)
683{
684 subregion->may_overlap = false;
685 subregion->priority = 0;
686 memory_region_add_subregion_common(mr, offset, subregion);
687}
688
689void memory_region_add_subregion_overlap(MemoryRegion *mr,
690 target_phys_addr_t offset,
691 MemoryRegion *subregion,
692 unsigned priority)
693{
694 subregion->may_overlap = true;
695 subregion->priority = priority;
696 memory_region_add_subregion_common(mr, offset, subregion);
697}
698
699void memory_region_del_subregion(MemoryRegion *mr,
700 MemoryRegion *subregion)
701{
702 assert(subregion->parent == mr);
703 subregion->parent = NULL;
704 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
705 memory_region_update_topology();
706}
1c0ffa58
AK
707
708void set_system_memory_map(MemoryRegion *mr)
709{
710 root_memory_region = mr;
711 memory_region_update_topology();
712}