]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/memory.h
memory: add memory_region_ram_resize
[mirror_qemu.git] / include / exec / memory.h
1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef MEMORY_H
15 #define MEMORY_H
16
17 #ifndef CONFIG_USER_ONLY
18
19 #define DIRTY_MEMORY_VGA 0
20 #define DIRTY_MEMORY_CODE 1
21 #define DIRTY_MEMORY_MIGRATION 2
22 #define DIRTY_MEMORY_NUM 3 /* num of dirty bits */
23
24 #include <stdint.h>
25 #include <stdbool.h>
26 #include "qemu-common.h"
27 #include "exec/cpu-common.h"
28 #ifndef CONFIG_USER_ONLY
29 #include "exec/hwaddr.h"
30 #endif
31 #include "qemu/queue.h"
32 #include "qemu/int128.h"
33 #include "qemu/notify.h"
34 #include "qapi/error.h"
35 #include "qom/object.h"
36 #include "qemu/rcu.h"
37
38 #define MAX_PHYS_ADDR_SPACE_BITS 62
39 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
40
41 #define TYPE_MEMORY_REGION "qemu:memory-region"
42 #define MEMORY_REGION(obj) \
43 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
44
45 typedef struct MemoryRegionOps MemoryRegionOps;
46 typedef struct MemoryRegionMmio MemoryRegionMmio;
47
48 struct MemoryRegionMmio {
49 CPUReadMemoryFunc *read[3];
50 CPUWriteMemoryFunc *write[3];
51 };
52
53 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
54
55 /* See address_space_translate: bit 0 is read, bit 1 is write. */
56 typedef enum {
57 IOMMU_NONE = 0,
58 IOMMU_RO = 1,
59 IOMMU_WO = 2,
60 IOMMU_RW = 3,
61 } IOMMUAccessFlags;
62
63 struct IOMMUTLBEntry {
64 AddressSpace *target_as;
65 hwaddr iova;
66 hwaddr translated_addr;
67 hwaddr addr_mask; /* 0xfff = 4k translation */
68 IOMMUAccessFlags perm;
69 };
70
71 /*
72 * Memory region callbacks
73 */
74 struct MemoryRegionOps {
75 /* Read from the memory region. @addr is relative to @mr; @size is
76 * in bytes. */
77 uint64_t (*read)(void *opaque,
78 hwaddr addr,
79 unsigned size);
80 /* Write to the memory region. @addr is relative to @mr; @size is
81 * in bytes. */
82 void (*write)(void *opaque,
83 hwaddr addr,
84 uint64_t data,
85 unsigned size);
86
87 enum device_endian endianness;
88 /* Guest-visible constraints: */
89 struct {
90 /* If nonzero, specify bounds on access sizes beyond which a machine
91 * check is thrown.
92 */
93 unsigned min_access_size;
94 unsigned max_access_size;
95 /* If true, unaligned accesses are supported. Otherwise unaligned
96 * accesses throw machine checks.
97 */
98 bool unaligned;
99 /*
100 * If present, and returns #false, the transaction is not accepted
101 * by the device (and results in machine dependent behaviour such
102 * as a machine check exception).
103 */
104 bool (*accepts)(void *opaque, hwaddr addr,
105 unsigned size, bool is_write);
106 } valid;
107 /* Internal implementation constraints: */
108 struct {
109 /* If nonzero, specifies the minimum size implemented. Smaller sizes
110 * will be rounded upwards and a partial result will be returned.
111 */
112 unsigned min_access_size;
113 /* If nonzero, specifies the maximum size implemented. Larger sizes
114 * will be done as a series of accesses with smaller sizes.
115 */
116 unsigned max_access_size;
117 /* If true, unaligned accesses are supported. Otherwise all accesses
118 * are converted to (possibly multiple) naturally aligned accesses.
119 */
120 bool unaligned;
121 } impl;
122
123 /* If .read and .write are not present, old_mmio may be used for
124 * backwards compatibility with old mmio registration
125 */
126 const MemoryRegionMmio old_mmio;
127 };
128
129 typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps;
130
131 struct MemoryRegionIOMMUOps {
132 /* Return a TLB entry that contains a given address. */
133 IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write);
134 };
135
136 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
137 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
138
139 struct MemoryRegion {
140 Object parent_obj;
141 /* All fields are private - violators will be prosecuted */
142 const MemoryRegionOps *ops;
143 const MemoryRegionIOMMUOps *iommu_ops;
144 void *opaque;
145 MemoryRegion *container;
146 Int128 size;
147 hwaddr addr;
148 void (*destructor)(MemoryRegion *mr);
149 ram_addr_t ram_addr;
150 uint64_t align;
151 bool subpage;
152 bool terminates;
153 bool romd_mode;
154 bool ram;
155 bool skip_dump;
156 bool readonly; /* For RAM regions */
157 bool enabled;
158 bool rom_device;
159 bool warning_printed; /* For reservations */
160 bool flush_coalesced_mmio;
161 MemoryRegion *alias;
162 hwaddr alias_offset;
163 int32_t priority;
164 bool may_overlap;
165 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
166 QTAILQ_ENTRY(MemoryRegion) subregions_link;
167 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
168 const char *name;
169 uint8_t dirty_log_mask;
170 unsigned ioeventfd_nb;
171 MemoryRegionIoeventfd *ioeventfds;
172 NotifierList iommu_notify;
173 };
174
175 /**
176 * MemoryListener: callbacks structure for updates to the physical memory map
177 *
178 * Allows a component to adjust to changes in the guest-visible memory map.
179 * Use with memory_listener_register() and memory_listener_unregister().
180 */
181 struct MemoryListener {
182 void (*begin)(MemoryListener *listener);
183 void (*commit)(MemoryListener *listener);
184 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
185 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
186 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
187 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
188 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
189 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
190 void (*log_global_start)(MemoryListener *listener);
191 void (*log_global_stop)(MemoryListener *listener);
192 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
193 bool match_data, uint64_t data, EventNotifier *e);
194 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
195 bool match_data, uint64_t data, EventNotifier *e);
196 void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
197 hwaddr addr, hwaddr len);
198 void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
199 hwaddr addr, hwaddr len);
200 /* Lower = earlier (during add), later (during del) */
201 unsigned priority;
202 AddressSpace *address_space_filter;
203 QTAILQ_ENTRY(MemoryListener) link;
204 };
205
206 /**
207 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
208 */
209 struct AddressSpace {
210 /* All fields are private. */
211 struct rcu_head rcu;
212 char *name;
213 MemoryRegion *root;
214
215 /* Accessed via RCU. */
216 struct FlatView *current_map;
217
218 int ioeventfd_nb;
219 struct MemoryRegionIoeventfd *ioeventfds;
220 struct AddressSpaceDispatch *dispatch;
221 struct AddressSpaceDispatch *next_dispatch;
222 MemoryListener dispatch_listener;
223
224 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
225 };
226
227 /**
228 * MemoryRegionSection: describes a fragment of a #MemoryRegion
229 *
230 * @mr: the region, or %NULL if empty
231 * @address_space: the address space the region is mapped in
232 * @offset_within_region: the beginning of the section, relative to @mr's start
233 * @size: the size of the section; will not exceed @mr's boundaries
234 * @offset_within_address_space: the address of the first byte of the section
235 * relative to the region's address space
236 * @readonly: writes to this section are ignored
237 */
238 struct MemoryRegionSection {
239 MemoryRegion *mr;
240 AddressSpace *address_space;
241 hwaddr offset_within_region;
242 Int128 size;
243 hwaddr offset_within_address_space;
244 bool readonly;
245 };
246
247 /**
248 * memory_region_init: Initialize a memory region
249 *
250 * The region typically acts as a container for other memory regions. Use
251 * memory_region_add_subregion() to add subregions.
252 *
253 * @mr: the #MemoryRegion to be initialized
254 * @owner: the object that tracks the region's reference count
255 * @name: used for debugging; not visible to the user or ABI
256 * @size: size of the region; any subregions beyond this size will be clipped
257 */
258 void memory_region_init(MemoryRegion *mr,
259 struct Object *owner,
260 const char *name,
261 uint64_t size);
262
263 /**
264 * memory_region_ref: Add 1 to a memory region's reference count
265 *
266 * Whenever memory regions are accessed outside the BQL, they need to be
267 * preserved against hot-unplug. MemoryRegions actually do not have their
268 * own reference count; they piggyback on a QOM object, their "owner".
269 * This function adds a reference to the owner.
270 *
271 * All MemoryRegions must have an owner if they can disappear, even if the
272 * device they belong to operates exclusively under the BQL. This is because
273 * the region could be returned at any time by memory_region_find, and this
274 * is usually under guest control.
275 *
276 * @mr: the #MemoryRegion
277 */
278 void memory_region_ref(MemoryRegion *mr);
279
280 /**
281 * memory_region_unref: Remove 1 to a memory region's reference count
282 *
283 * Whenever memory regions are accessed outside the BQL, they need to be
284 * preserved against hot-unplug. MemoryRegions actually do not have their
285 * own reference count; they piggyback on a QOM object, their "owner".
286 * This function removes a reference to the owner and possibly destroys it.
287 *
288 * @mr: the #MemoryRegion
289 */
290 void memory_region_unref(MemoryRegion *mr);
291
292 /**
293 * memory_region_init_io: Initialize an I/O memory region.
294 *
295 * Accesses into the region will cause the callbacks in @ops to be called.
296 * if @size is nonzero, subregions will be clipped to @size.
297 *
298 * @mr: the #MemoryRegion to be initialized.
299 * @owner: the object that tracks the region's reference count
300 * @ops: a structure containing read and write callbacks to be used when
301 * I/O is performed on the region.
302 * @opaque: passed to to the read and write callbacks of the @ops structure.
303 * @name: used for debugging; not visible to the user or ABI
304 * @size: size of the region.
305 */
306 void memory_region_init_io(MemoryRegion *mr,
307 struct Object *owner,
308 const MemoryRegionOps *ops,
309 void *opaque,
310 const char *name,
311 uint64_t size);
312
313 /**
314 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
315 * region will modify memory directly.
316 *
317 * @mr: the #MemoryRegion to be initialized.
318 * @owner: the object that tracks the region's reference count
319 * @name: the name of the region.
320 * @size: size of the region.
321 * @errp: pointer to Error*, to store an error if it happens.
322 */
323 void memory_region_init_ram(MemoryRegion *mr,
324 struct Object *owner,
325 const char *name,
326 uint64_t size,
327 Error **errp);
328
329 /**
330 * memory_region_init_resizeable_ram: Initialize memory region with resizeable
331 * RAM. Accesses into the region will
332 * modify memory directly. Only an initial
333 * portion of this RAM is actually used.
334 * The used size can change across reboots.
335 *
336 * @mr: the #MemoryRegion to be initialized.
337 * @owner: the object that tracks the region's reference count
338 * @name: the name of the region.
339 * @size: used size of the region.
340 * @max_size: max size of the region.
341 * @resized: callback to notify owner about used size change.
342 * @errp: pointer to Error*, to store an error if it happens.
343 */
344 void memory_region_init_resizeable_ram(MemoryRegion *mr,
345 struct Object *owner,
346 const char *name,
347 uint64_t size,
348 uint64_t max_size,
349 void (*resized)(const char*,
350 uint64_t length,
351 void *host),
352 Error **errp);
353 #ifdef __linux__
354 /**
355 * memory_region_init_ram_from_file: Initialize RAM memory region with a
356 * mmap-ed backend.
357 *
358 * @mr: the #MemoryRegion to be initialized.
359 * @owner: the object that tracks the region's reference count
360 * @name: the name of the region.
361 * @size: size of the region.
362 * @share: %true if memory must be mmaped with the MAP_SHARED flag
363 * @path: the path in which to allocate the RAM.
364 * @errp: pointer to Error*, to store an error if it happens.
365 */
366 void memory_region_init_ram_from_file(MemoryRegion *mr,
367 struct Object *owner,
368 const char *name,
369 uint64_t size,
370 bool share,
371 const char *path,
372 Error **errp);
373 #endif
374
375 /**
376 * memory_region_init_ram_ptr: Initialize RAM memory region from a
377 * user-provided pointer. Accesses into the
378 * region will modify memory directly.
379 *
380 * @mr: the #MemoryRegion to be initialized.
381 * @owner: the object that tracks the region's reference count
382 * @name: the name of the region.
383 * @size: size of the region.
384 * @ptr: memory to be mapped; must contain at least @size bytes.
385 */
386 void memory_region_init_ram_ptr(MemoryRegion *mr,
387 struct Object *owner,
388 const char *name,
389 uint64_t size,
390 void *ptr);
391
392 /**
393 * memory_region_init_alias: Initialize a memory region that aliases all or a
394 * part of another memory region.
395 *
396 * @mr: the #MemoryRegion to be initialized.
397 * @owner: the object that tracks the region's reference count
398 * @name: used for debugging; not visible to the user or ABI
399 * @orig: the region to be referenced; @mr will be equivalent to
400 * @orig between @offset and @offset + @size - 1.
401 * @offset: start of the section in @orig to be referenced.
402 * @size: size of the region.
403 */
404 void memory_region_init_alias(MemoryRegion *mr,
405 struct Object *owner,
406 const char *name,
407 MemoryRegion *orig,
408 hwaddr offset,
409 uint64_t size);
410
411 /**
412 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are
413 * handled via callbacks.
414 *
415 * @mr: the #MemoryRegion to be initialized.
416 * @owner: the object that tracks the region's reference count
417 * @ops: callbacks for write access handling.
418 * @name: the name of the region.
419 * @size: size of the region.
420 * @errp: pointer to Error*, to store an error if it happens.
421 */
422 void memory_region_init_rom_device(MemoryRegion *mr,
423 struct Object *owner,
424 const MemoryRegionOps *ops,
425 void *opaque,
426 const char *name,
427 uint64_t size,
428 Error **errp);
429
430 /**
431 * memory_region_init_reservation: Initialize a memory region that reserves
432 * I/O space.
433 *
434 * A reservation region primariy serves debugging purposes. It claims I/O
435 * space that is not supposed to be handled by QEMU itself. Any access via
436 * the memory API will cause an abort().
437 *
438 * @mr: the #MemoryRegion to be initialized
439 * @owner: the object that tracks the region's reference count
440 * @name: used for debugging; not visible to the user or ABI
441 * @size: size of the region.
442 */
443 void memory_region_init_reservation(MemoryRegion *mr,
444 struct Object *owner,
445 const char *name,
446 uint64_t size);
447
448 /**
449 * memory_region_init_iommu: Initialize a memory region that translates
450 * addresses
451 *
452 * An IOMMU region translates addresses and forwards accesses to a target
453 * memory region.
454 *
455 * @mr: the #MemoryRegion to be initialized
456 * @owner: the object that tracks the region's reference count
457 * @ops: a function that translates addresses into the @target region
458 * @name: used for debugging; not visible to the user or ABI
459 * @size: size of the region.
460 */
461 void memory_region_init_iommu(MemoryRegion *mr,
462 struct Object *owner,
463 const MemoryRegionIOMMUOps *ops,
464 const char *name,
465 uint64_t size);
466
467 /**
468 * memory_region_owner: get a memory region's owner.
469 *
470 * @mr: the memory region being queried.
471 */
472 struct Object *memory_region_owner(MemoryRegion *mr);
473
474 /**
475 * memory_region_size: get a memory region's size.
476 *
477 * @mr: the memory region being queried.
478 */
479 uint64_t memory_region_size(MemoryRegion *mr);
480
481 /**
482 * memory_region_is_ram: check whether a memory region is random access
483 *
484 * Returns %true is a memory region is random access.
485 *
486 * @mr: the memory region being queried
487 */
488 bool memory_region_is_ram(MemoryRegion *mr);
489
490 /**
491 * memory_region_is_skip_dump: check whether a memory region should not be
492 * dumped
493 *
494 * Returns %true is a memory region should not be dumped(e.g. VFIO BAR MMAP).
495 *
496 * @mr: the memory region being queried
497 */
498 bool memory_region_is_skip_dump(MemoryRegion *mr);
499
500 /**
501 * memory_region_set_skip_dump: Set skip_dump flag, dump will ignore this memory
502 * region
503 *
504 * @mr: the memory region being queried
505 */
506 void memory_region_set_skip_dump(MemoryRegion *mr);
507
508 /**
509 * memory_region_is_romd: check whether a memory region is in ROMD mode
510 *
511 * Returns %true if a memory region is a ROM device and currently set to allow
512 * direct reads.
513 *
514 * @mr: the memory region being queried
515 */
516 static inline bool memory_region_is_romd(MemoryRegion *mr)
517 {
518 return mr->rom_device && mr->romd_mode;
519 }
520
521 /**
522 * memory_region_is_iommu: check whether a memory region is an iommu
523 *
524 * Returns %true is a memory region is an iommu.
525 *
526 * @mr: the memory region being queried
527 */
528 bool memory_region_is_iommu(MemoryRegion *mr);
529
530 /**
531 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
532 *
533 * @mr: the memory region that was changed
534 * @entry: the new entry in the IOMMU translation table. The entry
535 * replaces all old entries for the same virtual I/O address range.
536 * Deleted entries have .@perm == 0.
537 */
538 void memory_region_notify_iommu(MemoryRegion *mr,
539 IOMMUTLBEntry entry);
540
541 /**
542 * memory_region_register_iommu_notifier: register a notifier for changes to
543 * IOMMU translation entries.
544 *
545 * @mr: the memory region to observe
546 * @n: the notifier to be added; the notifier receives a pointer to an
547 * #IOMMUTLBEntry as the opaque value; the pointer ceases to be
548 * valid on exit from the notifier.
549 */
550 void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
551
552 /**
553 * memory_region_unregister_iommu_notifier: unregister a notifier for
554 * changes to IOMMU translation entries.
555 *
556 * @n: the notifier to be removed.
557 */
558 void memory_region_unregister_iommu_notifier(Notifier *n);
559
560 /**
561 * memory_region_name: get a memory region's name
562 *
563 * Returns the string that was used to initialize the memory region.
564 *
565 * @mr: the memory region being queried
566 */
567 const char *memory_region_name(const MemoryRegion *mr);
568
569 /**
570 * memory_region_is_logging: return whether a memory region is logging writes
571 *
572 * Returns %true if the memory region is logging writes
573 *
574 * @mr: the memory region being queried
575 */
576 bool memory_region_is_logging(MemoryRegion *mr);
577
578 /**
579 * memory_region_is_rom: check whether a memory region is ROM
580 *
581 * Returns %true is a memory region is read-only memory.
582 *
583 * @mr: the memory region being queried
584 */
585 bool memory_region_is_rom(MemoryRegion *mr);
586
587 /**
588 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
589 *
590 * Returns a file descriptor backing a file-based RAM memory region,
591 * or -1 if the region is not a file-based RAM memory region.
592 *
593 * @mr: the RAM or alias memory region being queried.
594 */
595 int memory_region_get_fd(MemoryRegion *mr);
596
597 /**
598 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
599 *
600 * Returns a host pointer to a RAM memory region (created with
601 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
602 * care.
603 *
604 * @mr: the memory region being queried.
605 */
606 void *memory_region_get_ram_ptr(MemoryRegion *mr);
607
608 /* memory_region_ram_resize: Resize a RAM region.
609 *
610 * Only legal before guest might have detected the memory size: e.g. on
611 * incoming migration, or right after reset.
612 *
613 * @mr: a memory region created with @memory_region_init_resizeable_ram.
614 * @newsize: the new size the region
615 * @errp: pointer to Error*, to store an error if it happens.
616 */
617 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
618 Error **errp);
619
620 /**
621 * memory_region_set_log: Turn dirty logging on or off for a region.
622 *
623 * Turns dirty logging on or off for a specified client (display, migration).
624 * Only meaningful for RAM regions.
625 *
626 * @mr: the memory region being updated.
627 * @log: whether dirty logging is to be enabled or disabled.
628 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
629 * %DIRTY_MEMORY_VGA.
630 */
631 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
632
633 /**
634 * memory_region_get_dirty: Check whether a range of bytes is dirty
635 * for a specified client.
636 *
637 * Checks whether a range of bytes has been written to since the last
638 * call to memory_region_reset_dirty() with the same @client. Dirty logging
639 * must be enabled.
640 *
641 * @mr: the memory region being queried.
642 * @addr: the address (relative to the start of the region) being queried.
643 * @size: the size of the range being queried.
644 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
645 * %DIRTY_MEMORY_VGA.
646 */
647 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
648 hwaddr size, unsigned client);
649
650 /**
651 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
652 *
653 * Marks a range of bytes as dirty, after it has been dirtied outside
654 * guest code.
655 *
656 * @mr: the memory region being dirtied.
657 * @addr: the address (relative to the start of the region) being dirtied.
658 * @size: size of the range being dirtied.
659 */
660 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
661 hwaddr size);
662
663 /**
664 * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
665 * for a specified client. It clears them.
666 *
667 * Checks whether a range of bytes has been written to since the last
668 * call to memory_region_reset_dirty() with the same @client. Dirty logging
669 * must be enabled.
670 *
671 * @mr: the memory region being queried.
672 * @addr: the address (relative to the start of the region) being queried.
673 * @size: the size of the range being queried.
674 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
675 * %DIRTY_MEMORY_VGA.
676 */
677 bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
678 hwaddr size, unsigned client);
679 /**
680 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
681 * any external TLBs (e.g. kvm)
682 *
683 * Flushes dirty information from accelerators such as kvm and vhost-net
684 * and makes it available to users of the memory API.
685 *
686 * @mr: the region being flushed.
687 */
688 void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
689
690 /**
691 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
692 * client.
693 *
694 * Marks a range of pages as no longer dirty.
695 *
696 * @mr: the region being updated.
697 * @addr: the start of the subrange being cleaned.
698 * @size: the size of the subrange being cleaned.
699 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
700 * %DIRTY_MEMORY_VGA.
701 */
702 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
703 hwaddr size, unsigned client);
704
705 /**
706 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
707 *
708 * Allows a memory region to be marked as read-only (turning it into a ROM).
709 * only useful on RAM regions.
710 *
711 * @mr: the region being updated.
712 * @readonly: whether rhe region is to be ROM or RAM.
713 */
714 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
715
716 /**
717 * memory_region_rom_device_set_romd: enable/disable ROMD mode
718 *
719 * Allows a ROM device (initialized with memory_region_init_rom_device() to
720 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
721 * device is mapped to guest memory and satisfies read access directly.
722 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
723 * Writes are always handled by the #MemoryRegion.write function.
724 *
725 * @mr: the memory region to be updated
726 * @romd_mode: %true to put the region into ROMD mode
727 */
728 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
729
730 /**
731 * memory_region_set_coalescing: Enable memory coalescing for the region.
732 *
733 * Enabled writes to a region to be queued for later processing. MMIO ->write
734 * callbacks may be delayed until a non-coalesced MMIO is issued.
735 * Only useful for IO regions. Roughly similar to write-combining hardware.
736 *
737 * @mr: the memory region to be write coalesced
738 */
739 void memory_region_set_coalescing(MemoryRegion *mr);
740
741 /**
742 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
743 * a region.
744 *
745 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
746 * Multiple calls can be issued coalesced disjoint ranges.
747 *
748 * @mr: the memory region to be updated.
749 * @offset: the start of the range within the region to be coalesced.
750 * @size: the size of the subrange to be coalesced.
751 */
752 void memory_region_add_coalescing(MemoryRegion *mr,
753 hwaddr offset,
754 uint64_t size);
755
756 /**
757 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
758 *
759 * Disables any coalescing caused by memory_region_set_coalescing() or
760 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
761 * hardware.
762 *
763 * @mr: the memory region to be updated.
764 */
765 void memory_region_clear_coalescing(MemoryRegion *mr);
766
767 /**
768 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
769 * accesses.
770 *
771 * Ensure that pending coalesced MMIO request are flushed before the memory
772 * region is accessed. This property is automatically enabled for all regions
773 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
774 *
775 * @mr: the memory region to be updated.
776 */
777 void memory_region_set_flush_coalesced(MemoryRegion *mr);
778
779 /**
780 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
781 * accesses.
782 *
783 * Clear the automatic coalesced MMIO flushing enabled via
784 * memory_region_set_flush_coalesced. Note that this service has no effect on
785 * memory regions that have MMIO coalescing enabled for themselves. For them,
786 * automatic flushing will stop once coalescing is disabled.
787 *
788 * @mr: the memory region to be updated.
789 */
790 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
791
792 /**
793 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
794 * is written to a location.
795 *
796 * Marks a word in an IO region (initialized with memory_region_init_io())
797 * as a trigger for an eventfd event. The I/O callback will not be called.
798 * The caller must be prepared to handle failure (that is, take the required
799 * action if the callback _is_ called).
800 *
801 * @mr: the memory region being updated.
802 * @addr: the address within @mr that is to be monitored
803 * @size: the size of the access to trigger the eventfd
804 * @match_data: whether to match against @data, instead of just @addr
805 * @data: the data to match against the guest write
806 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
807 **/
808 void memory_region_add_eventfd(MemoryRegion *mr,
809 hwaddr addr,
810 unsigned size,
811 bool match_data,
812 uint64_t data,
813 EventNotifier *e);
814
815 /**
816 * memory_region_del_eventfd: Cancel an eventfd.
817 *
818 * Cancels an eventfd trigger requested by a previous
819 * memory_region_add_eventfd() call.
820 *
821 * @mr: the memory region being updated.
822 * @addr: the address within @mr that is to be monitored
823 * @size: the size of the access to trigger the eventfd
824 * @match_data: whether to match against @data, instead of just @addr
825 * @data: the data to match against the guest write
826 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
827 */
828 void memory_region_del_eventfd(MemoryRegion *mr,
829 hwaddr addr,
830 unsigned size,
831 bool match_data,
832 uint64_t data,
833 EventNotifier *e);
834
835 /**
836 * memory_region_add_subregion: Add a subregion to a container.
837 *
838 * Adds a subregion at @offset. The subregion may not overlap with other
839 * subregions (except for those explicitly marked as overlapping). A region
840 * may only be added once as a subregion (unless removed with
841 * memory_region_del_subregion()); use memory_region_init_alias() if you
842 * want a region to be a subregion in multiple locations.
843 *
844 * @mr: the region to contain the new subregion; must be a container
845 * initialized with memory_region_init().
846 * @offset: the offset relative to @mr where @subregion is added.
847 * @subregion: the subregion to be added.
848 */
849 void memory_region_add_subregion(MemoryRegion *mr,
850 hwaddr offset,
851 MemoryRegion *subregion);
852 /**
853 * memory_region_add_subregion_overlap: Add a subregion to a container
854 * with overlap.
855 *
856 * Adds a subregion at @offset. The subregion may overlap with other
857 * subregions. Conflicts are resolved by having a higher @priority hide a
858 * lower @priority. Subregions without priority are taken as @priority 0.
859 * A region may only be added once as a subregion (unless removed with
860 * memory_region_del_subregion()); use memory_region_init_alias() if you
861 * want a region to be a subregion in multiple locations.
862 *
863 * @mr: the region to contain the new subregion; must be a container
864 * initialized with memory_region_init().
865 * @offset: the offset relative to @mr where @subregion is added.
866 * @subregion: the subregion to be added.
867 * @priority: used for resolving overlaps; highest priority wins.
868 */
869 void memory_region_add_subregion_overlap(MemoryRegion *mr,
870 hwaddr offset,
871 MemoryRegion *subregion,
872 int priority);
873
874 /**
875 * memory_region_get_ram_addr: Get the ram address associated with a memory
876 * region
877 *
878 * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen
879 * code is being reworked.
880 */
881 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
882
883 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
884 /**
885 * memory_region_del_subregion: Remove a subregion.
886 *
887 * Removes a subregion from its container.
888 *
889 * @mr: the container to be updated.
890 * @subregion: the region being removed; must be a current subregion of @mr.
891 */
892 void memory_region_del_subregion(MemoryRegion *mr,
893 MemoryRegion *subregion);
894
895 /*
896 * memory_region_set_enabled: dynamically enable or disable a region
897 *
898 * Enables or disables a memory region. A disabled memory region
899 * ignores all accesses to itself and its subregions. It does not
900 * obscure sibling subregions with lower priority - it simply behaves as
901 * if it was removed from the hierarchy.
902 *
903 * Regions default to being enabled.
904 *
905 * @mr: the region to be updated
906 * @enabled: whether to enable or disable the region
907 */
908 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
909
910 /*
911 * memory_region_set_address: dynamically update the address of a region
912 *
913 * Dynamically updates the address of a region, relative to its container.
914 * May be used on regions are currently part of a memory hierarchy.
915 *
916 * @mr: the region to be updated
917 * @addr: new address, relative to container region
918 */
919 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
920
921 /*
922 * memory_region_set_size: dynamically update the size of a region.
923 *
924 * Dynamically updates the size of a region.
925 *
926 * @mr: the region to be updated
927 * @size: used size of the region.
928 */
929 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
930
931 /*
932 * memory_region_set_alias_offset: dynamically update a memory alias's offset
933 *
934 * Dynamically updates the offset into the target region that an alias points
935 * to, as if the fourth argument to memory_region_init_alias() has changed.
936 *
937 * @mr: the #MemoryRegion to be updated; should be an alias.
938 * @offset: the new offset into the target memory region
939 */
940 void memory_region_set_alias_offset(MemoryRegion *mr,
941 hwaddr offset);
942
943 /**
944 * memory_region_present: checks if an address relative to a @container
945 * translates into #MemoryRegion within @container
946 *
947 * Answer whether a #MemoryRegion within @container covers the address
948 * @addr.
949 *
950 * @container: a #MemoryRegion within which @addr is a relative address
951 * @addr: the area within @container to be searched
952 */
953 bool memory_region_present(MemoryRegion *container, hwaddr addr);
954
955 /**
956 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
957 * into any address space.
958 *
959 * @mr: a #MemoryRegion which should be checked if it's mapped
960 */
961 bool memory_region_is_mapped(MemoryRegion *mr);
962
963 /**
964 * memory_region_find: translate an address/size relative to a
965 * MemoryRegion into a #MemoryRegionSection.
966 *
967 * Locates the first #MemoryRegion within @mr that overlaps the range
968 * given by @addr and @size.
969 *
970 * Returns a #MemoryRegionSection that describes a contiguous overlap.
971 * It will have the following characteristics:
972 * .@size = 0 iff no overlap was found
973 * .@mr is non-%NULL iff an overlap was found
974 *
975 * Remember that in the return value the @offset_within_region is
976 * relative to the returned region (in the .@mr field), not to the
977 * @mr argument.
978 *
979 * Similarly, the .@offset_within_address_space is relative to the
980 * address space that contains both regions, the passed and the
981 * returned one. However, in the special case where the @mr argument
982 * has no container (and thus is the root of the address space), the
983 * following will hold:
984 * .@offset_within_address_space >= @addr
985 * .@offset_within_address_space + .@size <= @addr + @size
986 *
987 * @mr: a MemoryRegion within which @addr is a relative address
988 * @addr: start of the area within @as to be searched
989 * @size: size of the area to be searched
990 */
991 MemoryRegionSection memory_region_find(MemoryRegion *mr,
992 hwaddr addr, uint64_t size);
993
994 /**
995 * address_space_sync_dirty_bitmap: synchronize the dirty log for all memory
996 *
997 * Synchronizes the dirty page log for an entire address space.
998 * @as: the address space that contains the memory being synchronized
999 */
1000 void address_space_sync_dirty_bitmap(AddressSpace *as);
1001
1002 /**
1003 * memory_region_transaction_begin: Start a transaction.
1004 *
1005 * During a transaction, changes will be accumulated and made visible
1006 * only when the transaction ends (is committed).
1007 */
1008 void memory_region_transaction_begin(void);
1009
1010 /**
1011 * memory_region_transaction_commit: Commit a transaction and make changes
1012 * visible to the guest.
1013 */
1014 void memory_region_transaction_commit(void);
1015
1016 /**
1017 * memory_listener_register: register callbacks to be called when memory
1018 * sections are mapped or unmapped into an address
1019 * space
1020 *
1021 * @listener: an object containing the callbacks to be called
1022 * @filter: if non-%NULL, only regions in this address space will be observed
1023 */
1024 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
1025
1026 /**
1027 * memory_listener_unregister: undo the effect of memory_listener_register()
1028 *
1029 * @listener: an object containing the callbacks to be removed
1030 */
1031 void memory_listener_unregister(MemoryListener *listener);
1032
1033 /**
1034 * memory_global_dirty_log_start: begin dirty logging for all regions
1035 */
1036 void memory_global_dirty_log_start(void);
1037
1038 /**
1039 * memory_global_dirty_log_stop: end dirty logging for all regions
1040 */
1041 void memory_global_dirty_log_stop(void);
1042
1043 void mtree_info(fprintf_function mon_printf, void *f);
1044
1045 /**
1046 * address_space_init: initializes an address space
1047 *
1048 * @as: an uninitialized #AddressSpace
1049 * @root: a #MemoryRegion that routes addesses for the address space
1050 * @name: an address space name. The name is only used for debugging
1051 * output.
1052 */
1053 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
1054
1055
1056 /**
1057 * address_space_destroy: destroy an address space
1058 *
1059 * Releases all resources associated with an address space. After an address space
1060 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1061 * as well.
1062 *
1063 * @as: address space to be destroyed
1064 */
1065 void address_space_destroy(AddressSpace *as);
1066
1067 /**
1068 * address_space_rw: read from or write to an address space.
1069 *
1070 * Return true if the operation hit any unassigned memory or encountered an
1071 * IOMMU fault.
1072 *
1073 * @as: #AddressSpace to be accessed
1074 * @addr: address within that address space
1075 * @buf: buffer with the data transferred
1076 * @is_write: indicates the transfer direction
1077 */
1078 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1079 int len, bool is_write);
1080
1081 /**
1082 * address_space_write: write to address space.
1083 *
1084 * Return true if the operation hit any unassigned memory or encountered an
1085 * IOMMU fault.
1086 *
1087 * @as: #AddressSpace to be accessed
1088 * @addr: address within that address space
1089 * @buf: buffer with the data transferred
1090 */
1091 bool address_space_write(AddressSpace *as, hwaddr addr,
1092 const uint8_t *buf, int len);
1093
1094 /**
1095 * address_space_read: read from an address space.
1096 *
1097 * Return true if the operation hit any unassigned memory or encountered an
1098 * IOMMU fault.
1099 *
1100 * @as: #AddressSpace to be accessed
1101 * @addr: address within that address space
1102 * @buf: buffer with the data transferred
1103 */
1104 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len);
1105
1106 /* address_space_translate: translate an address range into an address space
1107 * into a MemoryRegion and an address range into that section
1108 *
1109 * @as: #AddressSpace to be accessed
1110 * @addr: address within that address space
1111 * @xlat: pointer to address within the returned memory region section's
1112 * #MemoryRegion.
1113 * @len: pointer to length
1114 * @is_write: indicates the transfer direction
1115 */
1116 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
1117 hwaddr *xlat, hwaddr *len,
1118 bool is_write);
1119
1120 /* address_space_access_valid: check for validity of accessing an address
1121 * space range
1122 *
1123 * Check whether memory is assigned to the given address space range, and
1124 * access is permitted by any IOMMU regions that are active for the address
1125 * space.
1126 *
1127 * For now, addr and len should be aligned to a page size. This limitation
1128 * will be lifted in the future.
1129 *
1130 * @as: #AddressSpace to be accessed
1131 * @addr: address within that address space
1132 * @len: length of the area to be checked
1133 * @is_write: indicates the transfer direction
1134 */
1135 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
1136
1137 /* address_space_map: map a physical memory region into a host virtual address
1138 *
1139 * May map a subset of the requested range, given by and returned in @plen.
1140 * May return %NULL if resources needed to perform the mapping are exhausted.
1141 * Use only for reads OR writes - not for read-modify-write operations.
1142 * Use cpu_register_map_client() to know when retrying the map operation is
1143 * likely to succeed.
1144 *
1145 * @as: #AddressSpace to be accessed
1146 * @addr: address within that address space
1147 * @plen: pointer to length of buffer; updated on return
1148 * @is_write: indicates the transfer direction
1149 */
1150 void *address_space_map(AddressSpace *as, hwaddr addr,
1151 hwaddr *plen, bool is_write);
1152
1153 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
1154 *
1155 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
1156 * the amount of memory that was actually read or written by the caller.
1157 *
1158 * @as: #AddressSpace used
1159 * @addr: address within that address space
1160 * @len: buffer length as returned by address_space_map()
1161 * @access_len: amount of data actually transferred
1162 * @is_write: indicates the transfer direction
1163 */
1164 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
1165 int is_write, hwaddr access_len);
1166
1167
1168 #endif
1169
1170 #endif