]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/memory.h
memory: Have memory_region_init_rom_device_nomigrate() return a boolean
[mirror_qemu.git] / include / exec / memory.h
1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef MEMORY_H
15 #define MEMORY_H
16
17 #ifndef CONFIG_USER_ONLY
18
19 #include "exec/cpu-common.h"
20 #include "exec/hwaddr.h"
21 #include "exec/memattrs.h"
22 #include "exec/memop.h"
23 #include "exec/ramlist.h"
24 #include "qemu/bswap.h"
25 #include "qemu/queue.h"
26 #include "qemu/int128.h"
27 #include "qemu/range.h"
28 #include "qemu/notify.h"
29 #include "qom/object.h"
30 #include "qemu/rcu.h"
31
32 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
33
34 #define MAX_PHYS_ADDR_SPACE_BITS 62
35 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
36
37 #define TYPE_MEMORY_REGION "memory-region"
38 DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
39 TYPE_MEMORY_REGION)
40
41 #define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
42 typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
43 DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
44 IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
45
46 #define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager"
47 typedef struct RamDiscardManagerClass RamDiscardManagerClass;
48 typedef struct RamDiscardManager RamDiscardManager;
49 DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
50 RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
51
52 #ifdef CONFIG_FUZZ
53 void fuzz_dma_read_cb(size_t addr,
54 size_t len,
55 MemoryRegion *mr);
56 #else
57 static inline void fuzz_dma_read_cb(size_t addr,
58 size_t len,
59 MemoryRegion *mr)
60 {
61 /* Do Nothing */
62 }
63 #endif
64
65 /* Possible bits for global_dirty_log_{start|stop} */
66
67 /* Dirty tracking enabled because migration is running */
68 #define GLOBAL_DIRTY_MIGRATION (1U << 0)
69
70 /* Dirty tracking enabled because measuring dirty rate */
71 #define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
72
73 /* Dirty tracking enabled because dirty limit */
74 #define GLOBAL_DIRTY_LIMIT (1U << 2)
75
76 #define GLOBAL_DIRTY_MASK (0x7)
77
78 extern unsigned int global_dirty_tracking;
79
80 typedef struct MemoryRegionOps MemoryRegionOps;
81
82 struct ReservedRegion {
83 Range range;
84 unsigned type;
85 };
86
87 /**
88 * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
89 *
90 * @mr: the region, or %NULL if empty
91 * @fv: the flat view of the address space the region is mapped in
92 * @offset_within_region: the beginning of the section, relative to @mr's start
93 * @size: the size of the section; will not exceed @mr's boundaries
94 * @offset_within_address_space: the address of the first byte of the section
95 * relative to the region's address space
96 * @readonly: writes to this section are ignored
97 * @nonvolatile: this section is non-volatile
98 * @unmergeable: this section should not get merged with adjacent sections
99 */
100 struct MemoryRegionSection {
101 Int128 size;
102 MemoryRegion *mr;
103 FlatView *fv;
104 hwaddr offset_within_region;
105 hwaddr offset_within_address_space;
106 bool readonly;
107 bool nonvolatile;
108 bool unmergeable;
109 };
110
111 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
112
113 /* See address_space_translate: bit 0 is read, bit 1 is write. */
114 typedef enum {
115 IOMMU_NONE = 0,
116 IOMMU_RO = 1,
117 IOMMU_WO = 2,
118 IOMMU_RW = 3,
119 } IOMMUAccessFlags;
120
121 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
122
123 struct IOMMUTLBEntry {
124 AddressSpace *target_as;
125 hwaddr iova;
126 hwaddr translated_addr;
127 hwaddr addr_mask; /* 0xfff = 4k translation */
128 IOMMUAccessFlags perm;
129 };
130
131 /*
132 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
133 * register with one or multiple IOMMU Notifier capability bit(s).
134 *
135 * Normally there're two use cases for the notifiers:
136 *
137 * (1) When the device needs accurate synchronizations of the vIOMMU page
138 * tables, it needs to register with both MAP|UNMAP notifies (which
139 * is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below).
140 *
141 * Regarding to accurate synchronization, it's when the notified
142 * device maintains a shadow page table and must be notified on each
143 * guest MAP (page table entry creation) and UNMAP (invalidation)
144 * events (e.g. VFIO). Both notifications must be accurate so that
145 * the shadow page table is fully in sync with the guest view.
146 *
147 * (2) When the device doesn't need accurate synchronizations of the
148 * vIOMMU page tables, it needs to register only with UNMAP or
149 * DEVIOTLB_UNMAP notifies.
150 *
151 * It's when the device maintains a cache of IOMMU translations
152 * (IOTLB) and is able to fill that cache by requesting translations
153 * from the vIOMMU through a protocol similar to ATS (Address
154 * Translation Service).
155 *
156 * Note that in this mode the vIOMMU will not maintain a shadowed
157 * page table for the address space, and the UNMAP messages can cover
158 * more than the pages that used to get mapped. The IOMMU notifiee
159 * should be able to take care of over-sized invalidations.
160 */
161 typedef enum {
162 IOMMU_NOTIFIER_NONE = 0,
163 /* Notify cache invalidations */
164 IOMMU_NOTIFIER_UNMAP = 0x1,
165 /* Notify entry changes (newly created entries) */
166 IOMMU_NOTIFIER_MAP = 0x2,
167 /* Notify changes on device IOTLB entries */
168 IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
169 } IOMMUNotifierFlag;
170
171 #define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
172 #define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
173 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
174 IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
175
176 struct IOMMUNotifier;
177 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
178 IOMMUTLBEntry *data);
179
180 struct IOMMUNotifier {
181 IOMMUNotify notify;
182 IOMMUNotifierFlag notifier_flags;
183 /* Notify for address space range start <= addr <= end */
184 hwaddr start;
185 hwaddr end;
186 int iommu_idx;
187 QLIST_ENTRY(IOMMUNotifier) node;
188 };
189 typedef struct IOMMUNotifier IOMMUNotifier;
190
191 typedef struct IOMMUTLBEvent {
192 IOMMUNotifierFlag type;
193 IOMMUTLBEntry entry;
194 } IOMMUTLBEvent;
195
196 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
197 #define RAM_PREALLOC (1 << 0)
198
199 /* RAM is mmap-ed with MAP_SHARED */
200 #define RAM_SHARED (1 << 1)
201
202 /* Only a portion of RAM (used_length) is actually used, and migrated.
203 * Resizing RAM while migrating can result in the migration being canceled.
204 */
205 #define RAM_RESIZEABLE (1 << 2)
206
207 /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
208 * zero the page and wake waiting processes.
209 * (Set during postcopy)
210 */
211 #define RAM_UF_ZEROPAGE (1 << 3)
212
213 /* RAM can be migrated */
214 #define RAM_MIGRATABLE (1 << 4)
215
216 /* RAM is a persistent kind memory */
217 #define RAM_PMEM (1 << 5)
218
219
220 /*
221 * UFFDIO_WRITEPROTECT is used on this RAMBlock to
222 * support 'write-tracking' migration type.
223 * Implies ram_state->ram_wt_enabled.
224 */
225 #define RAM_UF_WRITEPROTECT (1 << 6)
226
227 /*
228 * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
229 * pages if applicable) is skipped: will bail out if not supported. When not
230 * set, the OS will do the reservation, if supported for the memory type.
231 */
232 #define RAM_NORESERVE (1 << 7)
233
234 /* RAM that isn't accessible through normal means. */
235 #define RAM_PROTECTED (1 << 8)
236
237 /* RAM is an mmap-ed named file */
238 #define RAM_NAMED_FILE (1 << 9)
239
240 /* RAM is mmap-ed read-only */
241 #define RAM_READONLY (1 << 10)
242
243 /* RAM FD is opened read-only */
244 #define RAM_READONLY_FD (1 << 11)
245
246 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
247 IOMMUNotifierFlag flags,
248 hwaddr start, hwaddr end,
249 int iommu_idx)
250 {
251 n->notify = fn;
252 n->notifier_flags = flags;
253 n->start = start;
254 n->end = end;
255 n->iommu_idx = iommu_idx;
256 }
257
258 /*
259 * Memory region callbacks
260 */
261 struct MemoryRegionOps {
262 /* Read from the memory region. @addr is relative to @mr; @size is
263 * in bytes. */
264 uint64_t (*read)(void *opaque,
265 hwaddr addr,
266 unsigned size);
267 /* Write to the memory region. @addr is relative to @mr; @size is
268 * in bytes. */
269 void (*write)(void *opaque,
270 hwaddr addr,
271 uint64_t data,
272 unsigned size);
273
274 MemTxResult (*read_with_attrs)(void *opaque,
275 hwaddr addr,
276 uint64_t *data,
277 unsigned size,
278 MemTxAttrs attrs);
279 MemTxResult (*write_with_attrs)(void *opaque,
280 hwaddr addr,
281 uint64_t data,
282 unsigned size,
283 MemTxAttrs attrs);
284
285 enum device_endian endianness;
286 /* Guest-visible constraints: */
287 struct {
288 /* If nonzero, specify bounds on access sizes beyond which a machine
289 * check is thrown.
290 */
291 unsigned min_access_size;
292 unsigned max_access_size;
293 /* If true, unaligned accesses are supported. Otherwise unaligned
294 * accesses throw machine checks.
295 */
296 bool unaligned;
297 /*
298 * If present, and returns #false, the transaction is not accepted
299 * by the device (and results in machine dependent behaviour such
300 * as a machine check exception).
301 */
302 bool (*accepts)(void *opaque, hwaddr addr,
303 unsigned size, bool is_write,
304 MemTxAttrs attrs);
305 } valid;
306 /* Internal implementation constraints: */
307 struct {
308 /* If nonzero, specifies the minimum size implemented. Smaller sizes
309 * will be rounded upwards and a partial result will be returned.
310 */
311 unsigned min_access_size;
312 /* If nonzero, specifies the maximum size implemented. Larger sizes
313 * will be done as a series of accesses with smaller sizes.
314 */
315 unsigned max_access_size;
316 /* If true, unaligned accesses are supported. Otherwise all accesses
317 * are converted to (possibly multiple) naturally aligned accesses.
318 */
319 bool unaligned;
320 } impl;
321 };
322
323 typedef struct MemoryRegionClass {
324 /* private */
325 ObjectClass parent_class;
326 } MemoryRegionClass;
327
328
329 enum IOMMUMemoryRegionAttr {
330 IOMMU_ATTR_SPAPR_TCE_FD
331 };
332
333 /*
334 * IOMMUMemoryRegionClass:
335 *
336 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
337 * and provide an implementation of at least the @translate method here
338 * to handle requests to the memory region. Other methods are optional.
339 *
340 * The IOMMU implementation must use the IOMMU notifier infrastructure
341 * to report whenever mappings are changed, by calling
342 * memory_region_notify_iommu() (or, if necessary, by calling
343 * memory_region_notify_iommu_one() for each registered notifier).
344 *
345 * Conceptually an IOMMU provides a mapping from input address
346 * to an output TLB entry. If the IOMMU is aware of memory transaction
347 * attributes and the output TLB entry depends on the transaction
348 * attributes, we represent this using IOMMU indexes. Each index
349 * selects a particular translation table that the IOMMU has:
350 *
351 * @attrs_to_index returns the IOMMU index for a set of transaction attributes
352 *
353 * @translate takes an input address and an IOMMU index
354 *
355 * and the mapping returned can only depend on the input address and the
356 * IOMMU index.
357 *
358 * Most IOMMUs don't care about the transaction attributes and support
359 * only a single IOMMU index. A more complex IOMMU might have one index
360 * for secure transactions and one for non-secure transactions.
361 */
362 struct IOMMUMemoryRegionClass {
363 /* private: */
364 MemoryRegionClass parent_class;
365
366 /* public: */
367 /**
368 * @translate:
369 *
370 * Return a TLB entry that contains a given address.
371 *
372 * The IOMMUAccessFlags indicated via @flag are optional and may
373 * be specified as IOMMU_NONE to indicate that the caller needs
374 * the full translation information for both reads and writes. If
375 * the access flags are specified then the IOMMU implementation
376 * may use this as an optimization, to stop doing a page table
377 * walk as soon as it knows that the requested permissions are not
378 * allowed. If IOMMU_NONE is passed then the IOMMU must do the
379 * full page table walk and report the permissions in the returned
380 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
381 * return different mappings for reads and writes.)
382 *
383 * The returned information remains valid while the caller is
384 * holding the big QEMU lock or is inside an RCU critical section;
385 * if the caller wishes to cache the mapping beyond that it must
386 * register an IOMMU notifier so it can invalidate its cached
387 * information when the IOMMU mapping changes.
388 *
389 * @iommu: the IOMMUMemoryRegion
390 *
391 * @hwaddr: address to be translated within the memory region
392 *
393 * @flag: requested access permission
394 *
395 * @iommu_idx: IOMMU index for the translation
396 */
397 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
398 IOMMUAccessFlags flag, int iommu_idx);
399 /**
400 * @get_min_page_size:
401 *
402 * Returns minimum supported page size in bytes.
403 *
404 * If this method is not provided then the minimum is assumed to
405 * be TARGET_PAGE_SIZE.
406 *
407 * @iommu: the IOMMUMemoryRegion
408 */
409 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
410 /**
411 * @notify_flag_changed:
412 *
413 * Called when IOMMU Notifier flag changes (ie when the set of
414 * events which IOMMU users are requesting notification for changes).
415 * Optional method -- need not be provided if the IOMMU does not
416 * need to know exactly which events must be notified.
417 *
418 * @iommu: the IOMMUMemoryRegion
419 *
420 * @old_flags: events which previously needed to be notified
421 *
422 * @new_flags: events which now need to be notified
423 *
424 * Returns 0 on success, or a negative errno; in particular
425 * returns -EINVAL if the new flag bitmap is not supported by the
426 * IOMMU memory region. In case of failure, the error object
427 * must be created
428 */
429 int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
430 IOMMUNotifierFlag old_flags,
431 IOMMUNotifierFlag new_flags,
432 Error **errp);
433 /**
434 * @replay:
435 *
436 * Called to handle memory_region_iommu_replay().
437 *
438 * The default implementation of memory_region_iommu_replay() is to
439 * call the IOMMU translate method for every page in the address space
440 * with flag == IOMMU_NONE and then call the notifier if translate
441 * returns a valid mapping. If this method is implemented then it
442 * overrides the default behaviour, and must provide the full semantics
443 * of memory_region_iommu_replay(), by calling @notifier for every
444 * translation present in the IOMMU.
445 *
446 * Optional method -- an IOMMU only needs to provide this method
447 * if the default is inefficient or produces undesirable side effects.
448 *
449 * Note: this is not related to record-and-replay functionality.
450 */
451 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
452
453 /**
454 * @get_attr:
455 *
456 * Get IOMMU misc attributes. This is an optional method that
457 * can be used to allow users of the IOMMU to get implementation-specific
458 * information. The IOMMU implements this method to handle calls
459 * by IOMMU users to memory_region_iommu_get_attr() by filling in
460 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
461 * the IOMMU supports. If the method is unimplemented then
462 * memory_region_iommu_get_attr() will always return -EINVAL.
463 *
464 * @iommu: the IOMMUMemoryRegion
465 *
466 * @attr: attribute being queried
467 *
468 * @data: memory to fill in with the attribute data
469 *
470 * Returns 0 on success, or a negative errno; in particular
471 * returns -EINVAL for unrecognized or unimplemented attribute types.
472 */
473 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
474 void *data);
475
476 /**
477 * @attrs_to_index:
478 *
479 * Return the IOMMU index to use for a given set of transaction attributes.
480 *
481 * Optional method: if an IOMMU only supports a single IOMMU index then
482 * the default implementation of memory_region_iommu_attrs_to_index()
483 * will return 0.
484 *
485 * The indexes supported by an IOMMU must be contiguous, starting at 0.
486 *
487 * @iommu: the IOMMUMemoryRegion
488 * @attrs: memory transaction attributes
489 */
490 int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
491
492 /**
493 * @num_indexes:
494 *
495 * Return the number of IOMMU indexes this IOMMU supports.
496 *
497 * Optional method: if this method is not provided, then
498 * memory_region_iommu_num_indexes() will return 1, indicating that
499 * only a single IOMMU index is supported.
500 *
501 * @iommu: the IOMMUMemoryRegion
502 */
503 int (*num_indexes)(IOMMUMemoryRegion *iommu);
504
505 /**
506 * @iommu_set_page_size_mask:
507 *
508 * Restrict the page size mask that can be supported with a given IOMMU
509 * memory region. Used for example to propagate host physical IOMMU page
510 * size mask limitations to the virtual IOMMU.
511 *
512 * Optional method: if this method is not provided, then the default global
513 * page mask is used.
514 *
515 * @iommu: the IOMMUMemoryRegion
516 *
517 * @page_size_mask: a bitmask of supported page sizes. At least one bit,
518 * representing the smallest page size, must be set. Additional set bits
519 * represent supported block sizes. For example a host physical IOMMU that
520 * uses page tables with a page size of 4kB, and supports 2MB and 4GB
521 * blocks, will set mask 0x40201000. A granule of 4kB with indiscriminate
522 * block sizes is specified with mask 0xfffffffffffff000.
523 *
524 * Returns 0 on success, or a negative error. In case of failure, the error
525 * object must be created.
526 */
527 int (*iommu_set_page_size_mask)(IOMMUMemoryRegion *iommu,
528 uint64_t page_size_mask,
529 Error **errp);
530 /**
531 * @iommu_set_iova_ranges:
532 *
533 * Propagate information about the usable IOVA ranges for a given IOMMU
534 * memory region. Used for example to propagate host physical device
535 * reserved memory region constraints to the virtual IOMMU.
536 *
537 * Optional method: if this method is not provided, then the default IOVA
538 * aperture is used.
539 *
540 * @iommu: the IOMMUMemoryRegion
541 *
542 * @iova_ranges: list of ordered IOVA ranges (at least one range)
543 *
544 * Returns 0 on success, or a negative error. In case of failure, the error
545 * object must be created.
546 */
547 int (*iommu_set_iova_ranges)(IOMMUMemoryRegion *iommu,
548 GList *iova_ranges,
549 Error **errp);
550 };
551
552 typedef struct RamDiscardListener RamDiscardListener;
553 typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
554 MemoryRegionSection *section);
555 typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
556 MemoryRegionSection *section);
557
558 struct RamDiscardListener {
559 /*
560 * @notify_populate:
561 *
562 * Notification that previously discarded memory is about to get populated.
563 * Listeners are able to object. If any listener objects, already
564 * successfully notified listeners are notified about a discard again.
565 *
566 * @rdl: the #RamDiscardListener getting notified
567 * @section: the #MemoryRegionSection to get populated. The section
568 * is aligned within the memory region to the minimum granularity
569 * unless it would exceed the registered section.
570 *
571 * Returns 0 on success. If the notification is rejected by the listener,
572 * an error is returned.
573 */
574 NotifyRamPopulate notify_populate;
575
576 /*
577 * @notify_discard:
578 *
579 * Notification that previously populated memory was discarded successfully
580 * and listeners should drop all references to such memory and prevent
581 * new population (e.g., unmap).
582 *
583 * @rdl: the #RamDiscardListener getting notified
584 * @section: the #MemoryRegionSection to get populated. The section
585 * is aligned within the memory region to the minimum granularity
586 * unless it would exceed the registered section.
587 */
588 NotifyRamDiscard notify_discard;
589
590 /*
591 * @double_discard_supported:
592 *
593 * The listener suppors getting @notify_discard notifications that span
594 * already discarded parts.
595 */
596 bool double_discard_supported;
597
598 MemoryRegionSection *section;
599 QLIST_ENTRY(RamDiscardListener) next;
600 };
601
602 static inline void ram_discard_listener_init(RamDiscardListener *rdl,
603 NotifyRamPopulate populate_fn,
604 NotifyRamDiscard discard_fn,
605 bool double_discard_supported)
606 {
607 rdl->notify_populate = populate_fn;
608 rdl->notify_discard = discard_fn;
609 rdl->double_discard_supported = double_discard_supported;
610 }
611
612 typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque);
613 typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque);
614
615 /*
616 * RamDiscardManagerClass:
617 *
618 * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
619 * regions are currently populated to be used/accessed by the VM, notifying
620 * after parts were discarded (freeing up memory) and before parts will be
621 * populated (consuming memory), to be used/accessed by the VM.
622 *
623 * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
624 * #MemoryRegion isn't mapped into an address space yet (either directly
625 * or via an alias); it cannot change while the #MemoryRegion is
626 * mapped into an address space.
627 *
628 * The #RamDiscardManager is intended to be used by technologies that are
629 * incompatible with discarding of RAM (e.g., VFIO, which may pin all
630 * memory inside a #MemoryRegion), and require proper coordination to only
631 * map the currently populated parts, to hinder parts that are expected to
632 * remain discarded from silently getting populated and consuming memory.
633 * Technologies that support discarding of RAM don't have to bother and can
634 * simply map the whole #MemoryRegion.
635 *
636 * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
637 * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
638 * Logically unplugging memory consists of discarding RAM. The VM agreed to not
639 * access unplugged (discarded) memory - especially via DMA. virtio-mem will
640 * properly coordinate with listeners before memory is plugged (populated),
641 * and after memory is unplugged (discarded).
642 *
643 * Listeners are called in multiples of the minimum granularity (unless it
644 * would exceed the registered range) and changes are aligned to the minimum
645 * granularity within the #MemoryRegion. Listeners have to prepare for memory
646 * becoming discarded in a different granularity than it was populated and the
647 * other way around.
648 */
649 struct RamDiscardManagerClass {
650 /* private */
651 InterfaceClass parent_class;
652
653 /* public */
654
655 /**
656 * @get_min_granularity:
657 *
658 * Get the minimum granularity in which listeners will get notified
659 * about changes within the #MemoryRegion via the #RamDiscardManager.
660 *
661 * @rdm: the #RamDiscardManager
662 * @mr: the #MemoryRegion
663 *
664 * Returns the minimum granularity.
665 */
666 uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
667 const MemoryRegion *mr);
668
669 /**
670 * @is_populated:
671 *
672 * Check whether the given #MemoryRegionSection is completely populated
673 * (i.e., no parts are currently discarded) via the #RamDiscardManager.
674 * There are no alignment requirements.
675 *
676 * @rdm: the #RamDiscardManager
677 * @section: the #MemoryRegionSection
678 *
679 * Returns whether the given range is completely populated.
680 */
681 bool (*is_populated)(const RamDiscardManager *rdm,
682 const MemoryRegionSection *section);
683
684 /**
685 * @replay_populated:
686 *
687 * Call the #ReplayRamPopulate callback for all populated parts within the
688 * #MemoryRegionSection via the #RamDiscardManager.
689 *
690 * In case any call fails, no further calls are made.
691 *
692 * @rdm: the #RamDiscardManager
693 * @section: the #MemoryRegionSection
694 * @replay_fn: the #ReplayRamPopulate callback
695 * @opaque: pointer to forward to the callback
696 *
697 * Returns 0 on success, or a negative error if any notification failed.
698 */
699 int (*replay_populated)(const RamDiscardManager *rdm,
700 MemoryRegionSection *section,
701 ReplayRamPopulate replay_fn, void *opaque);
702
703 /**
704 * @replay_discarded:
705 *
706 * Call the #ReplayRamDiscard callback for all discarded parts within the
707 * #MemoryRegionSection via the #RamDiscardManager.
708 *
709 * @rdm: the #RamDiscardManager
710 * @section: the #MemoryRegionSection
711 * @replay_fn: the #ReplayRamDiscard callback
712 * @opaque: pointer to forward to the callback
713 */
714 void (*replay_discarded)(const RamDiscardManager *rdm,
715 MemoryRegionSection *section,
716 ReplayRamDiscard replay_fn, void *opaque);
717
718 /**
719 * @register_listener:
720 *
721 * Register a #RamDiscardListener for the given #MemoryRegionSection and
722 * immediately notify the #RamDiscardListener about all populated parts
723 * within the #MemoryRegionSection via the #RamDiscardManager.
724 *
725 * In case any notification fails, no further notifications are triggered
726 * and an error is logged.
727 *
728 * @rdm: the #RamDiscardManager
729 * @rdl: the #RamDiscardListener
730 * @section: the #MemoryRegionSection
731 */
732 void (*register_listener)(RamDiscardManager *rdm,
733 RamDiscardListener *rdl,
734 MemoryRegionSection *section);
735
736 /**
737 * @unregister_listener:
738 *
739 * Unregister a previously registered #RamDiscardListener via the
740 * #RamDiscardManager after notifying the #RamDiscardListener about all
741 * populated parts becoming unpopulated within the registered
742 * #MemoryRegionSection.
743 *
744 * @rdm: the #RamDiscardManager
745 * @rdl: the #RamDiscardListener
746 */
747 void (*unregister_listener)(RamDiscardManager *rdm,
748 RamDiscardListener *rdl);
749 };
750
751 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
752 const MemoryRegion *mr);
753
754 bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
755 const MemoryRegionSection *section);
756
757 int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
758 MemoryRegionSection *section,
759 ReplayRamPopulate replay_fn,
760 void *opaque);
761
762 void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
763 MemoryRegionSection *section,
764 ReplayRamDiscard replay_fn,
765 void *opaque);
766
767 void ram_discard_manager_register_listener(RamDiscardManager *rdm,
768 RamDiscardListener *rdl,
769 MemoryRegionSection *section);
770
771 void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
772 RamDiscardListener *rdl);
773
774 bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
775 ram_addr_t *ram_addr, bool *read_only,
776 bool *mr_has_discard_manager);
777
778 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
779 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
780
781 /** MemoryRegion:
782 *
783 * A struct representing a memory region.
784 */
785 struct MemoryRegion {
786 Object parent_obj;
787
788 /* private: */
789
790 /* The following fields should fit in a cache line */
791 bool romd_mode;
792 bool ram;
793 bool subpage;
794 bool readonly; /* For RAM regions */
795 bool nonvolatile;
796 bool rom_device;
797 bool flush_coalesced_mmio;
798 bool unmergeable;
799 uint8_t dirty_log_mask;
800 bool is_iommu;
801 RAMBlock *ram_block;
802 Object *owner;
803 /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
804 DeviceState *dev;
805
806 const MemoryRegionOps *ops;
807 void *opaque;
808 MemoryRegion *container;
809 int mapped_via_alias; /* Mapped via an alias, container might be NULL */
810 Int128 size;
811 hwaddr addr;
812 void (*destructor)(MemoryRegion *mr);
813 uint64_t align;
814 bool terminates;
815 bool ram_device;
816 bool enabled;
817 bool warning_printed; /* For reservations */
818 uint8_t vga_logging_count;
819 MemoryRegion *alias;
820 hwaddr alias_offset;
821 int32_t priority;
822 QTAILQ_HEAD(, MemoryRegion) subregions;
823 QTAILQ_ENTRY(MemoryRegion) subregions_link;
824 QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
825 const char *name;
826 unsigned ioeventfd_nb;
827 MemoryRegionIoeventfd *ioeventfds;
828 RamDiscardManager *rdm; /* Only for RAM */
829
830 /* For devices designed to perform re-entrant IO into their own IO MRs */
831 bool disable_reentrancy_guard;
832 };
833
834 struct IOMMUMemoryRegion {
835 MemoryRegion parent_obj;
836
837 QLIST_HEAD(, IOMMUNotifier) iommu_notify;
838 IOMMUNotifierFlag iommu_notify_flags;
839 };
840
841 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
842 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
843
844 #define MEMORY_LISTENER_PRIORITY_MIN 0
845 #define MEMORY_LISTENER_PRIORITY_ACCEL 10
846 #define MEMORY_LISTENER_PRIORITY_DEV_BACKEND 10
847
848 /**
849 * struct MemoryListener: callbacks structure for updates to the physical memory map
850 *
851 * Allows a component to adjust to changes in the guest-visible memory map.
852 * Use with memory_listener_register() and memory_listener_unregister().
853 */
854 struct MemoryListener {
855 /**
856 * @begin:
857 *
858 * Called at the beginning of an address space update transaction.
859 * Followed by calls to #MemoryListener.region_add(),
860 * #MemoryListener.region_del(), #MemoryListener.region_nop(),
861 * #MemoryListener.log_start() and #MemoryListener.log_stop() in
862 * increasing address order.
863 *
864 * @listener: The #MemoryListener.
865 */
866 void (*begin)(MemoryListener *listener);
867
868 /**
869 * @commit:
870 *
871 * Called at the end of an address space update transaction,
872 * after the last call to #MemoryListener.region_add(),
873 * #MemoryListener.region_del() or #MemoryListener.region_nop(),
874 * #MemoryListener.log_start() and #MemoryListener.log_stop().
875 *
876 * @listener: The #MemoryListener.
877 */
878 void (*commit)(MemoryListener *listener);
879
880 /**
881 * @region_add:
882 *
883 * Called during an address space update transaction,
884 * for a section of the address space that is new in this address space
885 * space since the last transaction.
886 *
887 * @listener: The #MemoryListener.
888 * @section: The new #MemoryRegionSection.
889 */
890 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
891
892 /**
893 * @region_del:
894 *
895 * Called during an address space update transaction,
896 * for a section of the address space that has disappeared in the address
897 * space since the last transaction.
898 *
899 * @listener: The #MemoryListener.
900 * @section: The old #MemoryRegionSection.
901 */
902 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
903
904 /**
905 * @region_nop:
906 *
907 * Called during an address space update transaction,
908 * for a section of the address space that is in the same place in the address
909 * space as in the last transaction.
910 *
911 * @listener: The #MemoryListener.
912 * @section: The #MemoryRegionSection.
913 */
914 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
915
916 /**
917 * @log_start:
918 *
919 * Called during an address space update transaction, after
920 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
921 * #MemoryListener.region_nop(), if dirty memory logging clients have
922 * become active since the last transaction.
923 *
924 * @listener: The #MemoryListener.
925 * @section: The #MemoryRegionSection.
926 * @old: A bitmap of dirty memory logging clients that were active in
927 * the previous transaction.
928 * @new: A bitmap of dirty memory logging clients that are active in
929 * the current transaction.
930 */
931 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
932 int old, int new);
933
934 /**
935 * @log_stop:
936 *
937 * Called during an address space update transaction, after
938 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
939 * #MemoryListener.region_nop() and possibly after
940 * #MemoryListener.log_start(), if dirty memory logging clients have
941 * become inactive since the last transaction.
942 *
943 * @listener: The #MemoryListener.
944 * @section: The #MemoryRegionSection.
945 * @old: A bitmap of dirty memory logging clients that were active in
946 * the previous transaction.
947 * @new: A bitmap of dirty memory logging clients that are active in
948 * the current transaction.
949 */
950 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
951 int old, int new);
952
953 /**
954 * @log_sync:
955 *
956 * Called by memory_region_snapshot_and_clear_dirty() and
957 * memory_global_dirty_log_sync(), before accessing QEMU's "official"
958 * copy of the dirty memory bitmap for a #MemoryRegionSection.
959 *
960 * @listener: The #MemoryListener.
961 * @section: The #MemoryRegionSection.
962 */
963 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
964
965 /**
966 * @log_sync_global:
967 *
968 * This is the global version of @log_sync when the listener does
969 * not have a way to synchronize the log with finer granularity.
970 * When the listener registers with @log_sync_global defined, then
971 * its @log_sync must be NULL. Vice versa.
972 *
973 * @listener: The #MemoryListener.
974 * @last_stage: The last stage to synchronize the log during migration.
975 * The caller should guarantee that the synchronization with true for
976 * @last_stage is triggered for once after all VCPUs have been stopped.
977 */
978 void (*log_sync_global)(MemoryListener *listener, bool last_stage);
979
980 /**
981 * @log_clear:
982 *
983 * Called before reading the dirty memory bitmap for a
984 * #MemoryRegionSection.
985 *
986 * @listener: The #MemoryListener.
987 * @section: The #MemoryRegionSection.
988 */
989 void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
990
991 /**
992 * @log_global_start:
993 *
994 * Called by memory_global_dirty_log_start(), which
995 * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
996 * the address space. #MemoryListener.log_global_start() is also
997 * called when a #MemoryListener is added, if global dirty logging is
998 * active at that time.
999 *
1000 * @listener: The #MemoryListener.
1001 */
1002 void (*log_global_start)(MemoryListener *listener);
1003
1004 /**
1005 * @log_global_stop:
1006 *
1007 * Called by memory_global_dirty_log_stop(), which
1008 * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
1009 * the address space.
1010 *
1011 * @listener: The #MemoryListener.
1012 */
1013 void (*log_global_stop)(MemoryListener *listener);
1014
1015 /**
1016 * @log_global_after_sync:
1017 *
1018 * Called after reading the dirty memory bitmap
1019 * for any #MemoryRegionSection.
1020 *
1021 * @listener: The #MemoryListener.
1022 */
1023 void (*log_global_after_sync)(MemoryListener *listener);
1024
1025 /**
1026 * @eventfd_add:
1027 *
1028 * Called during an address space update transaction,
1029 * for a section of the address space that has had a new ioeventfd
1030 * registration since the last transaction.
1031 *
1032 * @listener: The #MemoryListener.
1033 * @section: The new #MemoryRegionSection.
1034 * @match_data: The @match_data parameter for the new ioeventfd.
1035 * @data: The @data parameter for the new ioeventfd.
1036 * @e: The #EventNotifier parameter for the new ioeventfd.
1037 */
1038 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
1039 bool match_data, uint64_t data, EventNotifier *e);
1040
1041 /**
1042 * @eventfd_del:
1043 *
1044 * Called during an address space update transaction,
1045 * for a section of the address space that has dropped an ioeventfd
1046 * registration since the last transaction.
1047 *
1048 * @listener: The #MemoryListener.
1049 * @section: The new #MemoryRegionSection.
1050 * @match_data: The @match_data parameter for the dropped ioeventfd.
1051 * @data: The @data parameter for the dropped ioeventfd.
1052 * @e: The #EventNotifier parameter for the dropped ioeventfd.
1053 */
1054 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
1055 bool match_data, uint64_t data, EventNotifier *e);
1056
1057 /**
1058 * @coalesced_io_add:
1059 *
1060 * Called during an address space update transaction,
1061 * for a section of the address space that has had a new coalesced
1062 * MMIO range registration since the last transaction.
1063 *
1064 * @listener: The #MemoryListener.
1065 * @section: The new #MemoryRegionSection.
1066 * @addr: The starting address for the coalesced MMIO range.
1067 * @len: The length of the coalesced MMIO range.
1068 */
1069 void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
1070 hwaddr addr, hwaddr len);
1071
1072 /**
1073 * @coalesced_io_del:
1074 *
1075 * Called during an address space update transaction,
1076 * for a section of the address space that has dropped a coalesced
1077 * MMIO range since the last transaction.
1078 *
1079 * @listener: The #MemoryListener.
1080 * @section: The new #MemoryRegionSection.
1081 * @addr: The starting address for the coalesced MMIO range.
1082 * @len: The length of the coalesced MMIO range.
1083 */
1084 void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
1085 hwaddr addr, hwaddr len);
1086 /**
1087 * @priority:
1088 *
1089 * Govern the order in which memory listeners are invoked. Lower priorities
1090 * are invoked earlier for "add" or "start" callbacks, and later for "delete"
1091 * or "stop" callbacks.
1092 */
1093 unsigned priority;
1094
1095 /**
1096 * @name:
1097 *
1098 * Name of the listener. It can be used in contexts where we'd like to
1099 * identify one memory listener with the rest.
1100 */
1101 const char *name;
1102
1103 /* private: */
1104 AddressSpace *address_space;
1105 QTAILQ_ENTRY(MemoryListener) link;
1106 QTAILQ_ENTRY(MemoryListener) link_as;
1107 };
1108
1109 /**
1110 * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
1111 */
1112 struct AddressSpace {
1113 /* private: */
1114 struct rcu_head rcu;
1115 char *name;
1116 MemoryRegion *root;
1117
1118 /* Accessed via RCU. */
1119 struct FlatView *current_map;
1120
1121 int ioeventfd_nb;
1122 int ioeventfd_notifiers;
1123 struct MemoryRegionIoeventfd *ioeventfds;
1124 QTAILQ_HEAD(, MemoryListener) listeners;
1125 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
1126 };
1127
1128 typedef struct AddressSpaceDispatch AddressSpaceDispatch;
1129 typedef struct FlatRange FlatRange;
1130
1131 /* Flattened global view of current active memory hierarchy. Kept in sorted
1132 * order.
1133 */
1134 struct FlatView {
1135 struct rcu_head rcu;
1136 unsigned ref;
1137 FlatRange *ranges;
1138 unsigned nr;
1139 unsigned nr_allocated;
1140 struct AddressSpaceDispatch *dispatch;
1141 MemoryRegion *root;
1142 };
1143
1144 static inline FlatView *address_space_to_flatview(AddressSpace *as)
1145 {
1146 return qatomic_rcu_read(&as->current_map);
1147 }
1148
1149 /**
1150 * typedef flatview_cb: callback for flatview_for_each_range()
1151 *
1152 * @start: start address of the range within the FlatView
1153 * @len: length of the range in bytes
1154 * @mr: MemoryRegion covering this range
1155 * @offset_in_region: offset of the first byte of the range within @mr
1156 * @opaque: data pointer passed to flatview_for_each_range()
1157 *
1158 * Returns: true to stop the iteration, false to keep going.
1159 */
1160 typedef bool (*flatview_cb)(Int128 start,
1161 Int128 len,
1162 const MemoryRegion *mr,
1163 hwaddr offset_in_region,
1164 void *opaque);
1165
1166 /**
1167 * flatview_for_each_range: Iterate through a FlatView
1168 * @fv: the FlatView to iterate through
1169 * @cb: function to call for each range
1170 * @opaque: opaque data pointer to pass to @cb
1171 *
1172 * A FlatView is made up of a list of non-overlapping ranges, each of
1173 * which is a slice of a MemoryRegion. This function iterates through
1174 * each range in @fv, calling @cb. The callback function can terminate
1175 * iteration early by returning 'true'.
1176 */
1177 void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
1178
1179 static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
1180 MemoryRegionSection *b)
1181 {
1182 return a->mr == b->mr &&
1183 a->fv == b->fv &&
1184 a->offset_within_region == b->offset_within_region &&
1185 a->offset_within_address_space == b->offset_within_address_space &&
1186 int128_eq(a->size, b->size) &&
1187 a->readonly == b->readonly &&
1188 a->nonvolatile == b->nonvolatile;
1189 }
1190
1191 /**
1192 * memory_region_section_new_copy: Copy a memory region section
1193 *
1194 * Allocate memory for a new copy, copy the memory region section, and
1195 * properly take a reference on all relevant members.
1196 *
1197 * @s: the #MemoryRegionSection to copy
1198 */
1199 MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
1200
1201 /**
1202 * memory_region_section_new_copy: Free a copied memory region section
1203 *
1204 * Free a copy of a memory section created via memory_region_section_new_copy().
1205 * properly dropping references on all relevant members.
1206 *
1207 * @s: the #MemoryRegionSection to copy
1208 */
1209 void memory_region_section_free_copy(MemoryRegionSection *s);
1210
1211 /**
1212 * memory_region_init: Initialize a memory region
1213 *
1214 * The region typically acts as a container for other memory regions. Use
1215 * memory_region_add_subregion() to add subregions.
1216 *
1217 * @mr: the #MemoryRegion to be initialized
1218 * @owner: the object that tracks the region's reference count
1219 * @name: used for debugging; not visible to the user or ABI
1220 * @size: size of the region; any subregions beyond this size will be clipped
1221 */
1222 void memory_region_init(MemoryRegion *mr,
1223 Object *owner,
1224 const char *name,
1225 uint64_t size);
1226
1227 /**
1228 * memory_region_ref: Add 1 to a memory region's reference count
1229 *
1230 * Whenever memory regions are accessed outside the BQL, they need to be
1231 * preserved against hot-unplug. MemoryRegions actually do not have their
1232 * own reference count; they piggyback on a QOM object, their "owner".
1233 * This function adds a reference to the owner.
1234 *
1235 * All MemoryRegions must have an owner if they can disappear, even if the
1236 * device they belong to operates exclusively under the BQL. This is because
1237 * the region could be returned at any time by memory_region_find, and this
1238 * is usually under guest control.
1239 *
1240 * @mr: the #MemoryRegion
1241 */
1242 void memory_region_ref(MemoryRegion *mr);
1243
1244 /**
1245 * memory_region_unref: Remove 1 to a memory region's reference count
1246 *
1247 * Whenever memory regions are accessed outside the BQL, they need to be
1248 * preserved against hot-unplug. MemoryRegions actually do not have their
1249 * own reference count; they piggyback on a QOM object, their "owner".
1250 * This function removes a reference to the owner and possibly destroys it.
1251 *
1252 * @mr: the #MemoryRegion
1253 */
1254 void memory_region_unref(MemoryRegion *mr);
1255
1256 /**
1257 * memory_region_init_io: Initialize an I/O memory region.
1258 *
1259 * Accesses into the region will cause the callbacks in @ops to be called.
1260 * if @size is nonzero, subregions will be clipped to @size.
1261 *
1262 * @mr: the #MemoryRegion to be initialized.
1263 * @owner: the object that tracks the region's reference count
1264 * @ops: a structure containing read and write callbacks to be used when
1265 * I/O is performed on the region.
1266 * @opaque: passed to the read and write callbacks of the @ops structure.
1267 * @name: used for debugging; not visible to the user or ABI
1268 * @size: size of the region.
1269 */
1270 void memory_region_init_io(MemoryRegion *mr,
1271 Object *owner,
1272 const MemoryRegionOps *ops,
1273 void *opaque,
1274 const char *name,
1275 uint64_t size);
1276
1277 /**
1278 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
1279 * into the region will modify memory
1280 * directly.
1281 *
1282 * @mr: the #MemoryRegion to be initialized.
1283 * @owner: the object that tracks the region's reference count
1284 * @name: Region name, becomes part of RAMBlock name used in migration stream
1285 * must be unique within any device
1286 * @size: size of the region.
1287 * @errp: pointer to Error*, to store an error if it happens.
1288 *
1289 * Note that this function does not do anything to cause the data in the
1290 * RAM memory region to be migrated; that is the responsibility of the caller.
1291 *
1292 * Return: true on success, else false setting @errp with error.
1293 */
1294 bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
1295 Object *owner,
1296 const char *name,
1297 uint64_t size,
1298 Error **errp);
1299
1300 /**
1301 * memory_region_init_ram_flags_nomigrate: Initialize RAM memory region.
1302 * Accesses into the region will
1303 * modify memory directly.
1304 *
1305 * @mr: the #MemoryRegion to be initialized.
1306 * @owner: the object that tracks the region's reference count
1307 * @name: Region name, becomes part of RAMBlock name used in migration stream
1308 * must be unique within any device
1309 * @size: size of the region.
1310 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE.
1311 * @errp: pointer to Error*, to store an error if it happens.
1312 *
1313 * Note that this function does not do anything to cause the data in the
1314 * RAM memory region to be migrated; that is the responsibility of the caller.
1315 *
1316 * Return: true on success, else false setting @errp with error.
1317 */
1318 bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
1319 Object *owner,
1320 const char *name,
1321 uint64_t size,
1322 uint32_t ram_flags,
1323 Error **errp);
1324
1325 /**
1326 * memory_region_init_resizeable_ram: Initialize memory region with resizable
1327 * RAM. Accesses into the region will
1328 * modify memory directly. Only an initial
1329 * portion of this RAM is actually used.
1330 * Changing the size while migrating
1331 * can result in the migration being
1332 * canceled.
1333 *
1334 * @mr: the #MemoryRegion to be initialized.
1335 * @owner: the object that tracks the region's reference count
1336 * @name: Region name, becomes part of RAMBlock name used in migration stream
1337 * must be unique within any device
1338 * @size: used size of the region.
1339 * @max_size: max size of the region.
1340 * @resized: callback to notify owner about used size change.
1341 * @errp: pointer to Error*, to store an error if it happens.
1342 *
1343 * Note that this function does not do anything to cause the data in the
1344 * RAM memory region to be migrated; that is the responsibility of the caller.
1345 */
1346 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1347 Object *owner,
1348 const char *name,
1349 uint64_t size,
1350 uint64_t max_size,
1351 void (*resized)(const char*,
1352 uint64_t length,
1353 void *host),
1354 Error **errp);
1355 #ifdef CONFIG_POSIX
1356
1357 /**
1358 * memory_region_init_ram_from_file: Initialize RAM memory region with a
1359 * mmap-ed backend.
1360 *
1361 * @mr: the #MemoryRegion to be initialized.
1362 * @owner: the object that tracks the region's reference count
1363 * @name: Region name, becomes part of RAMBlock name used in migration stream
1364 * must be unique within any device
1365 * @size: size of the region.
1366 * @align: alignment of the region base address; if 0, the default alignment
1367 * (getpagesize()) will be used.
1368 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
1369 * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
1370 * RAM_READONLY_FD
1371 * @path: the path in which to allocate the RAM.
1372 * @offset: offset within the file referenced by path
1373 * @errp: pointer to Error*, to store an error if it happens.
1374 *
1375 * Note that this function does not do anything to cause the data in the
1376 * RAM memory region to be migrated; that is the responsibility of the caller.
1377 */
1378 void memory_region_init_ram_from_file(MemoryRegion *mr,
1379 Object *owner,
1380 const char *name,
1381 uint64_t size,
1382 uint64_t align,
1383 uint32_t ram_flags,
1384 const char *path,
1385 ram_addr_t offset,
1386 Error **errp);
1387
1388 /**
1389 * memory_region_init_ram_from_fd: Initialize RAM memory region with a
1390 * mmap-ed backend.
1391 *
1392 * @mr: the #MemoryRegion to be initialized.
1393 * @owner: the object that tracks the region's reference count
1394 * @name: the name of the region.
1395 * @size: size of the region.
1396 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
1397 * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
1398 * RAM_READONLY_FD
1399 * @fd: the fd to mmap.
1400 * @offset: offset within the file referenced by fd
1401 * @errp: pointer to Error*, to store an error if it happens.
1402 *
1403 * Note that this function does not do anything to cause the data in the
1404 * RAM memory region to be migrated; that is the responsibility of the caller.
1405 */
1406 void memory_region_init_ram_from_fd(MemoryRegion *mr,
1407 Object *owner,
1408 const char *name,
1409 uint64_t size,
1410 uint32_t ram_flags,
1411 int fd,
1412 ram_addr_t offset,
1413 Error **errp);
1414 #endif
1415
1416 /**
1417 * memory_region_init_ram_ptr: Initialize RAM memory region from a
1418 * user-provided pointer. Accesses into the
1419 * region will modify memory directly.
1420 *
1421 * @mr: the #MemoryRegion to be initialized.
1422 * @owner: the object that tracks the region's reference count
1423 * @name: Region name, becomes part of RAMBlock name used in migration stream
1424 * must be unique within any device
1425 * @size: size of the region.
1426 * @ptr: memory to be mapped; must contain at least @size bytes.
1427 *
1428 * Note that this function does not do anything to cause the data in the
1429 * RAM memory region to be migrated; that is the responsibility of the caller.
1430 */
1431 void memory_region_init_ram_ptr(MemoryRegion *mr,
1432 Object *owner,
1433 const char *name,
1434 uint64_t size,
1435 void *ptr);
1436
1437 /**
1438 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
1439 * a user-provided pointer.
1440 *
1441 * A RAM device represents a mapping to a physical device, such as to a PCI
1442 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
1443 * into the VM address space and access to the region will modify memory
1444 * directly. However, the memory region should not be included in a memory
1445 * dump (device may not be enabled/mapped at the time of the dump), and
1446 * operations incompatible with manipulating MMIO should be avoided. Replaces
1447 * skip_dump flag.
1448 *
1449 * @mr: the #MemoryRegion to be initialized.
1450 * @owner: the object that tracks the region's reference count
1451 * @name: the name of the region.
1452 * @size: size of the region.
1453 * @ptr: memory to be mapped; must contain at least @size bytes.
1454 *
1455 * Note that this function does not do anything to cause the data in the
1456 * RAM memory region to be migrated; that is the responsibility of the caller.
1457 * (For RAM device memory regions, migrating the contents rarely makes sense.)
1458 */
1459 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1460 Object *owner,
1461 const char *name,
1462 uint64_t size,
1463 void *ptr);
1464
1465 /**
1466 * memory_region_init_alias: Initialize a memory region that aliases all or a
1467 * part of another memory region.
1468 *
1469 * @mr: the #MemoryRegion to be initialized.
1470 * @owner: the object that tracks the region's reference count
1471 * @name: used for debugging; not visible to the user or ABI
1472 * @orig: the region to be referenced; @mr will be equivalent to
1473 * @orig between @offset and @offset + @size - 1.
1474 * @offset: start of the section in @orig to be referenced.
1475 * @size: size of the region.
1476 */
1477 void memory_region_init_alias(MemoryRegion *mr,
1478 Object *owner,
1479 const char *name,
1480 MemoryRegion *orig,
1481 hwaddr offset,
1482 uint64_t size);
1483
1484 /**
1485 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
1486 *
1487 * This has the same effect as calling memory_region_init_ram_nomigrate()
1488 * and then marking the resulting region read-only with
1489 * memory_region_set_readonly().
1490 *
1491 * Note that this function does not do anything to cause the data in the
1492 * RAM side of the memory region to be migrated; that is the responsibility
1493 * of the caller.
1494 *
1495 * @mr: the #MemoryRegion to be initialized.
1496 * @owner: the object that tracks the region's reference count
1497 * @name: Region name, becomes part of RAMBlock name used in migration stream
1498 * must be unique within any device
1499 * @size: size of the region.
1500 * @errp: pointer to Error*, to store an error if it happens.
1501 *
1502 * Return: true on success, else false setting @errp with error.
1503 */
1504 bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
1505 Object *owner,
1506 const char *name,
1507 uint64_t size,
1508 Error **errp);
1509
1510 /**
1511 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
1512 * Writes are handled via callbacks.
1513 *
1514 * Note that this function does not do anything to cause the data in the
1515 * RAM side of the memory region to be migrated; that is the responsibility
1516 * of the caller.
1517 *
1518 * @mr: the #MemoryRegion to be initialized.
1519 * @owner: the object that tracks the region's reference count
1520 * @ops: callbacks for write access handling (must not be NULL).
1521 * @opaque: passed to the read and write callbacks of the @ops structure.
1522 * @name: Region name, becomes part of RAMBlock name used in migration stream
1523 * must be unique within any device
1524 * @size: size of the region.
1525 * @errp: pointer to Error*, to store an error if it happens.
1526 *
1527 * Return: true on success, else false setting @errp with error.
1528 */
1529 bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1530 Object *owner,
1531 const MemoryRegionOps *ops,
1532 void *opaque,
1533 const char *name,
1534 uint64_t size,
1535 Error **errp);
1536
1537 /**
1538 * memory_region_init_iommu: Initialize a memory region of a custom type
1539 * that translates addresses
1540 *
1541 * An IOMMU region translates addresses and forwards accesses to a target
1542 * memory region.
1543 *
1544 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
1545 * @_iommu_mr should be a pointer to enough memory for an instance of
1546 * that subclass, @instance_size is the size of that subclass, and
1547 * @mrtypename is its name. This function will initialize @_iommu_mr as an
1548 * instance of the subclass, and its methods will then be called to handle
1549 * accesses to the memory region. See the documentation of
1550 * #IOMMUMemoryRegionClass for further details.
1551 *
1552 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
1553 * @instance_size: the IOMMUMemoryRegion subclass instance size
1554 * @mrtypename: the type name of the #IOMMUMemoryRegion
1555 * @owner: the object that tracks the region's reference count
1556 * @name: used for debugging; not visible to the user or ABI
1557 * @size: size of the region.
1558 */
1559 void memory_region_init_iommu(void *_iommu_mr,
1560 size_t instance_size,
1561 const char *mrtypename,
1562 Object *owner,
1563 const char *name,
1564 uint64_t size);
1565
1566 /**
1567 * memory_region_init_ram - Initialize RAM memory region. Accesses into the
1568 * region will modify memory directly.
1569 *
1570 * @mr: the #MemoryRegion to be initialized
1571 * @owner: the object that tracks the region's reference count (must be
1572 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
1573 * @name: name of the memory region
1574 * @size: size of the region in bytes
1575 * @errp: pointer to Error*, to store an error if it happens.
1576 *
1577 * This function allocates RAM for a board model or device, and
1578 * arranges for it to be migrated (by calling vmstate_register_ram()
1579 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1580 * @owner is NULL).
1581 *
1582 * TODO: Currently we restrict @owner to being either NULL (for
1583 * global RAM regions with no owner) or devices, so that we can
1584 * give the RAM block a unique name for migration purposes.
1585 * We should lift this restriction and allow arbitrary Objects.
1586 * If you pass a non-NULL non-device @owner then we will assert.
1587 *
1588 * Return: true on success, else false setting @errp with error.
1589 */
1590 bool memory_region_init_ram(MemoryRegion *mr,
1591 Object *owner,
1592 const char *name,
1593 uint64_t size,
1594 Error **errp);
1595
1596 /**
1597 * memory_region_init_rom: Initialize a ROM memory region.
1598 *
1599 * This has the same effect as calling memory_region_init_ram()
1600 * and then marking the resulting region read-only with
1601 * memory_region_set_readonly(). This includes arranging for the
1602 * contents to be migrated.
1603 *
1604 * TODO: Currently we restrict @owner to being either NULL (for
1605 * global RAM regions with no owner) or devices, so that we can
1606 * give the RAM block a unique name for migration purposes.
1607 * We should lift this restriction and allow arbitrary Objects.
1608 * If you pass a non-NULL non-device @owner then we will assert.
1609 *
1610 * @mr: the #MemoryRegion to be initialized.
1611 * @owner: the object that tracks the region's reference count
1612 * @name: Region name, becomes part of RAMBlock name used in migration stream
1613 * must be unique within any device
1614 * @size: size of the region.
1615 * @errp: pointer to Error*, to store an error if it happens.
1616 *
1617 * Return: true on success, else false setting @errp with error.
1618 */
1619 bool memory_region_init_rom(MemoryRegion *mr,
1620 Object *owner,
1621 const char *name,
1622 uint64_t size,
1623 Error **errp);
1624
1625 /**
1626 * memory_region_init_rom_device: Initialize a ROM memory region.
1627 * Writes are handled via callbacks.
1628 *
1629 * This function initializes a memory region backed by RAM for reads
1630 * and callbacks for writes, and arranges for the RAM backing to
1631 * be migrated (by calling vmstate_register_ram()
1632 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1633 * @owner is NULL).
1634 *
1635 * TODO: Currently we restrict @owner to being either NULL (for
1636 * global RAM regions with no owner) or devices, so that we can
1637 * give the RAM block a unique name for migration purposes.
1638 * We should lift this restriction and allow arbitrary Objects.
1639 * If you pass a non-NULL non-device @owner then we will assert.
1640 *
1641 * @mr: the #MemoryRegion to be initialized.
1642 * @owner: the object that tracks the region's reference count
1643 * @ops: callbacks for write access handling (must not be NULL).
1644 * @opaque: passed to the read and write callbacks of the @ops structure.
1645 * @name: Region name, becomes part of RAMBlock name used in migration stream
1646 * must be unique within any device
1647 * @size: size of the region.
1648 * @errp: pointer to Error*, to store an error if it happens.
1649 */
1650 void memory_region_init_rom_device(MemoryRegion *mr,
1651 Object *owner,
1652 const MemoryRegionOps *ops,
1653 void *opaque,
1654 const char *name,
1655 uint64_t size,
1656 Error **errp);
1657
1658
1659 /**
1660 * memory_region_owner: get a memory region's owner.
1661 *
1662 * @mr: the memory region being queried.
1663 */
1664 Object *memory_region_owner(MemoryRegion *mr);
1665
1666 /**
1667 * memory_region_size: get a memory region's size.
1668 *
1669 * @mr: the memory region being queried.
1670 */
1671 uint64_t memory_region_size(MemoryRegion *mr);
1672
1673 /**
1674 * memory_region_is_ram: check whether a memory region is random access
1675 *
1676 * Returns %true if a memory region is random access.
1677 *
1678 * @mr: the memory region being queried
1679 */
1680 static inline bool memory_region_is_ram(MemoryRegion *mr)
1681 {
1682 return mr->ram;
1683 }
1684
1685 /**
1686 * memory_region_is_ram_device: check whether a memory region is a ram device
1687 *
1688 * Returns %true if a memory region is a device backed ram region
1689 *
1690 * @mr: the memory region being queried
1691 */
1692 bool memory_region_is_ram_device(MemoryRegion *mr);
1693
1694 /**
1695 * memory_region_is_romd: check whether a memory region is in ROMD mode
1696 *
1697 * Returns %true if a memory region is a ROM device and currently set to allow
1698 * direct reads.
1699 *
1700 * @mr: the memory region being queried
1701 */
1702 static inline bool memory_region_is_romd(MemoryRegion *mr)
1703 {
1704 return mr->rom_device && mr->romd_mode;
1705 }
1706
1707 /**
1708 * memory_region_is_protected: check whether a memory region is protected
1709 *
1710 * Returns %true if a memory region is protected RAM and cannot be accessed
1711 * via standard mechanisms, e.g. DMA.
1712 *
1713 * @mr: the memory region being queried
1714 */
1715 bool memory_region_is_protected(MemoryRegion *mr);
1716
1717 /**
1718 * memory_region_get_iommu: check whether a memory region is an iommu
1719 *
1720 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
1721 * otherwise NULL.
1722 *
1723 * @mr: the memory region being queried
1724 */
1725 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
1726 {
1727 if (mr->alias) {
1728 return memory_region_get_iommu(mr->alias);
1729 }
1730 if (mr->is_iommu) {
1731 return (IOMMUMemoryRegion *) mr;
1732 }
1733 return NULL;
1734 }
1735
1736 /**
1737 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
1738 * if an iommu or NULL if not
1739 *
1740 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
1741 * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
1742 *
1743 * @iommu_mr: the memory region being queried
1744 */
1745 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
1746 IOMMUMemoryRegion *iommu_mr)
1747 {
1748 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
1749 }
1750
1751 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
1752
1753 /**
1754 * memory_region_iommu_get_min_page_size: get minimum supported page size
1755 * for an iommu
1756 *
1757 * Returns minimum supported page size for an iommu.
1758 *
1759 * @iommu_mr: the memory region being queried
1760 */
1761 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
1762
1763 /**
1764 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1765 *
1766 * Note: for any IOMMU implementation, an in-place mapping change
1767 * should be notified with an UNMAP followed by a MAP.
1768 *
1769 * @iommu_mr: the memory region that was changed
1770 * @iommu_idx: the IOMMU index for the translation table which has changed
1771 * @event: TLB event with the new entry in the IOMMU translation table.
1772 * The entry replaces all old entries for the same virtual I/O address
1773 * range.
1774 */
1775 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1776 int iommu_idx,
1777 IOMMUTLBEvent event);
1778
1779 /**
1780 * memory_region_notify_iommu_one: notify a change in an IOMMU translation
1781 * entry to a single notifier
1782 *
1783 * This works just like memory_region_notify_iommu(), but it only
1784 * notifies a specific notifier, not all of them.
1785 *
1786 * @notifier: the notifier to be notified
1787 * @event: TLB event with the new entry in the IOMMU translation table.
1788 * The entry replaces all old entries for the same virtual I/O address
1789 * range.
1790 */
1791 void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
1792 IOMMUTLBEvent *event);
1793
1794 /**
1795 * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
1796 * translation that covers the
1797 * range of a notifier
1798 *
1799 * @notifier: the notifier to be notified
1800 */
1801 void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
1802
1803
1804 /**
1805 * memory_region_register_iommu_notifier: register a notifier for changes to
1806 * IOMMU translation entries.
1807 *
1808 * Returns 0 on success, or a negative errno otherwise. In particular,
1809 * -EINVAL indicates that at least one of the attributes of the notifier
1810 * is not supported (flag/range) by the IOMMU memory region. In case of error
1811 * the error object must be created.
1812 *
1813 * @mr: the memory region to observe
1814 * @n: the IOMMUNotifier to be added; the notify callback receives a
1815 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1816 * ceases to be valid on exit from the notifier.
1817 * @errp: pointer to Error*, to store an error if it happens.
1818 */
1819 int memory_region_register_iommu_notifier(MemoryRegion *mr,
1820 IOMMUNotifier *n, Error **errp);
1821
1822 /**
1823 * memory_region_iommu_replay: replay existing IOMMU translations to
1824 * a notifier with the minimum page granularity returned by
1825 * mr->iommu_ops->get_page_size().
1826 *
1827 * Note: this is not related to record-and-replay functionality.
1828 *
1829 * @iommu_mr: the memory region to observe
1830 * @n: the notifier to which to replay iommu mappings
1831 */
1832 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
1833
1834 /**
1835 * memory_region_unregister_iommu_notifier: unregister a notifier for
1836 * changes to IOMMU translation entries.
1837 *
1838 * @mr: the memory region which was observed and for which notity_stopped()
1839 * needs to be called
1840 * @n: the notifier to be removed.
1841 */
1842 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1843 IOMMUNotifier *n);
1844
1845 /**
1846 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1847 * defined on the IOMMU.
1848 *
1849 * Returns 0 on success, or a negative errno otherwise. In particular,
1850 * -EINVAL indicates that the IOMMU does not support the requested
1851 * attribute.
1852 *
1853 * @iommu_mr: the memory region
1854 * @attr: the requested attribute
1855 * @data: a pointer to the requested attribute data
1856 */
1857 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1858 enum IOMMUMemoryRegionAttr attr,
1859 void *data);
1860
1861 /**
1862 * memory_region_iommu_attrs_to_index: return the IOMMU index to
1863 * use for translations with the given memory transaction attributes.
1864 *
1865 * @iommu_mr: the memory region
1866 * @attrs: the memory transaction attributes
1867 */
1868 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1869 MemTxAttrs attrs);
1870
1871 /**
1872 * memory_region_iommu_num_indexes: return the total number of IOMMU
1873 * indexes that this IOMMU supports.
1874 *
1875 * @iommu_mr: the memory region
1876 */
1877 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
1878
1879 /**
1880 * memory_region_iommu_set_page_size_mask: set the supported page
1881 * sizes for a given IOMMU memory region
1882 *
1883 * @iommu_mr: IOMMU memory region
1884 * @page_size_mask: supported page size mask
1885 * @errp: pointer to Error*, to store an error if it happens.
1886 */
1887 int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
1888 uint64_t page_size_mask,
1889 Error **errp);
1890
1891 /**
1892 * memory_region_iommu_set_iova_ranges - Set the usable IOVA ranges
1893 * for a given IOMMU MR region
1894 *
1895 * @iommu: IOMMU memory region
1896 * @iova_ranges: list of ordered IOVA ranges (at least one range)
1897 * @errp: pointer to Error*, to store an error if it happens.
1898 */
1899 int memory_region_iommu_set_iova_ranges(IOMMUMemoryRegion *iommu,
1900 GList *iova_ranges,
1901 Error **errp);
1902
1903 /**
1904 * memory_region_name: get a memory region's name
1905 *
1906 * Returns the string that was used to initialize the memory region.
1907 *
1908 * @mr: the memory region being queried
1909 */
1910 const char *memory_region_name(const MemoryRegion *mr);
1911
1912 /**
1913 * memory_region_is_logging: return whether a memory region is logging writes
1914 *
1915 * Returns %true if the memory region is logging writes for the given client
1916 *
1917 * @mr: the memory region being queried
1918 * @client: the client being queried
1919 */
1920 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
1921
1922 /**
1923 * memory_region_get_dirty_log_mask: return the clients for which a
1924 * memory region is logging writes.
1925 *
1926 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1927 * are the bit indices.
1928 *
1929 * @mr: the memory region being queried
1930 */
1931 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
1932
1933 /**
1934 * memory_region_is_rom: check whether a memory region is ROM
1935 *
1936 * Returns %true if a memory region is read-only memory.
1937 *
1938 * @mr: the memory region being queried
1939 */
1940 static inline bool memory_region_is_rom(MemoryRegion *mr)
1941 {
1942 return mr->ram && mr->readonly;
1943 }
1944
1945 /**
1946 * memory_region_is_nonvolatile: check whether a memory region is non-volatile
1947 *
1948 * Returns %true is a memory region is non-volatile memory.
1949 *
1950 * @mr: the memory region being queried
1951 */
1952 static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
1953 {
1954 return mr->nonvolatile;
1955 }
1956
1957 /**
1958 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
1959 *
1960 * Returns a file descriptor backing a file-based RAM memory region,
1961 * or -1 if the region is not a file-based RAM memory region.
1962 *
1963 * @mr: the RAM or alias memory region being queried.
1964 */
1965 int memory_region_get_fd(MemoryRegion *mr);
1966
1967 /**
1968 * memory_region_from_host: Convert a pointer into a RAM memory region
1969 * and an offset within it.
1970 *
1971 * Given a host pointer inside a RAM memory region (created with
1972 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
1973 * the MemoryRegion and the offset within it.
1974 *
1975 * Use with care; by the time this function returns, the returned pointer is
1976 * not protected by RCU anymore. If the caller is not within an RCU critical
1977 * section and does not hold the iothread lock, it must have other means of
1978 * protecting the pointer, such as a reference to the region that includes
1979 * the incoming ram_addr_t.
1980 *
1981 * @ptr: the host pointer to be converted
1982 * @offset: the offset within memory region
1983 */
1984 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
1985
1986 /**
1987 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1988 *
1989 * Returns a host pointer to a RAM memory region (created with
1990 * memory_region_init_ram() or memory_region_init_ram_ptr()).
1991 *
1992 * Use with care; by the time this function returns, the returned pointer is
1993 * not protected by RCU anymore. If the caller is not within an RCU critical
1994 * section and does not hold the iothread lock, it must have other means of
1995 * protecting the pointer, such as a reference to the region that includes
1996 * the incoming ram_addr_t.
1997 *
1998 * @mr: the memory region being queried.
1999 */
2000 void *memory_region_get_ram_ptr(MemoryRegion *mr);
2001
2002 /* memory_region_ram_resize: Resize a RAM region.
2003 *
2004 * Resizing RAM while migrating can result in the migration being canceled.
2005 * Care has to be taken if the guest might have already detected the memory.
2006 *
2007 * @mr: a memory region created with @memory_region_init_resizeable_ram.
2008 * @newsize: the new size the region
2009 * @errp: pointer to Error*, to store an error if it happens.
2010 */
2011 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
2012 Error **errp);
2013
2014 /**
2015 * memory_region_msync: Synchronize selected address range of
2016 * a memory mapped region
2017 *
2018 * @mr: the memory region to be msync
2019 * @addr: the initial address of the range to be sync
2020 * @size: the size of the range to be sync
2021 */
2022 void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
2023
2024 /**
2025 * memory_region_writeback: Trigger cache writeback for
2026 * selected address range
2027 *
2028 * @mr: the memory region to be updated
2029 * @addr: the initial address of the range to be written back
2030 * @size: the size of the range to be written back
2031 */
2032 void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
2033
2034 /**
2035 * memory_region_set_log: Turn dirty logging on or off for a region.
2036 *
2037 * Turns dirty logging on or off for a specified client (display, migration).
2038 * Only meaningful for RAM regions.
2039 *
2040 * @mr: the memory region being updated.
2041 * @log: whether dirty logging is to be enabled or disabled.
2042 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
2043 */
2044 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
2045
2046 /**
2047 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
2048 *
2049 * Marks a range of bytes as dirty, after it has been dirtied outside
2050 * guest code.
2051 *
2052 * @mr: the memory region being dirtied.
2053 * @addr: the address (relative to the start of the region) being dirtied.
2054 * @size: size of the range being dirtied.
2055 */
2056 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2057 hwaddr size);
2058
2059 /**
2060 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
2061 *
2062 * This function is called when the caller wants to clear the remote
2063 * dirty bitmap of a memory range within the memory region. This can
2064 * be used by e.g. KVM to manually clear dirty log when
2065 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
2066 * kernel.
2067 *
2068 * @mr: the memory region to clear the dirty log upon
2069 * @start: start address offset within the memory region
2070 * @len: length of the memory region to clear dirty bitmap
2071 */
2072 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2073 hwaddr len);
2074
2075 /**
2076 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
2077 * bitmap and clear it.
2078 *
2079 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
2080 * returns the snapshot. The snapshot can then be used to query dirty
2081 * status, using memory_region_snapshot_get_dirty. Snapshotting allows
2082 * querying the same page multiple times, which is especially useful for
2083 * display updates where the scanlines often are not page aligned.
2084 *
2085 * The dirty bitmap region which gets copied into the snapshot (and
2086 * cleared afterwards) can be larger than requested. The boundaries
2087 * are rounded up/down so complete bitmap longs (covering 64 pages on
2088 * 64bit hosts) can be copied over into the bitmap snapshot. Which
2089 * isn't a problem for display updates as the extra pages are outside
2090 * the visible area, and in case the visible area changes a full
2091 * display redraw is due anyway. Should other use cases for this
2092 * function emerge we might have to revisit this implementation
2093 * detail.
2094 *
2095 * Use g_free to release DirtyBitmapSnapshot.
2096 *
2097 * @mr: the memory region being queried.
2098 * @addr: the address (relative to the start of the region) being queried.
2099 * @size: the size of the range being queried.
2100 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
2101 */
2102 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2103 hwaddr addr,
2104 hwaddr size,
2105 unsigned client);
2106
2107 /**
2108 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
2109 * in the specified dirty bitmap snapshot.
2110 *
2111 * @mr: the memory region being queried.
2112 * @snap: the dirty bitmap snapshot
2113 * @addr: the address (relative to the start of the region) being queried.
2114 * @size: the size of the range being queried.
2115 */
2116 bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
2117 DirtyBitmapSnapshot *snap,
2118 hwaddr addr, hwaddr size);
2119
2120 /**
2121 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
2122 * client.
2123 *
2124 * Marks a range of pages as no longer dirty.
2125 *
2126 * @mr: the region being updated.
2127 * @addr: the start of the subrange being cleaned.
2128 * @size: the size of the subrange being cleaned.
2129 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
2130 * %DIRTY_MEMORY_VGA.
2131 */
2132 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2133 hwaddr size, unsigned client);
2134
2135 /**
2136 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
2137 * TBs (for self-modifying code).
2138 *
2139 * The MemoryRegionOps->write() callback of a ROM device must use this function
2140 * to mark byte ranges that have been modified internally, such as by directly
2141 * accessing the memory returned by memory_region_get_ram_ptr().
2142 *
2143 * This function marks the range dirty and invalidates TBs so that TCG can
2144 * detect self-modifying code.
2145 *
2146 * @mr: the region being flushed.
2147 * @addr: the start, relative to the start of the region, of the range being
2148 * flushed.
2149 * @size: the size, in bytes, of the range being flushed.
2150 */
2151 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
2152
2153 /**
2154 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
2155 *
2156 * Allows a memory region to be marked as read-only (turning it into a ROM).
2157 * only useful on RAM regions.
2158 *
2159 * @mr: the region being updated.
2160 * @readonly: whether rhe region is to be ROM or RAM.
2161 */
2162 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
2163
2164 /**
2165 * memory_region_set_nonvolatile: Turn a memory region non-volatile
2166 *
2167 * Allows a memory region to be marked as non-volatile.
2168 * only useful on RAM regions.
2169 *
2170 * @mr: the region being updated.
2171 * @nonvolatile: whether rhe region is to be non-volatile.
2172 */
2173 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
2174
2175 /**
2176 * memory_region_rom_device_set_romd: enable/disable ROMD mode
2177 *
2178 * Allows a ROM device (initialized with memory_region_init_rom_device() to
2179 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
2180 * device is mapped to guest memory and satisfies read access directly.
2181 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
2182 * Writes are always handled by the #MemoryRegion.write function.
2183 *
2184 * @mr: the memory region to be updated
2185 * @romd_mode: %true to put the region into ROMD mode
2186 */
2187 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
2188
2189 /**
2190 * memory_region_set_coalescing: Enable memory coalescing for the region.
2191 *
2192 * Enabled writes to a region to be queued for later processing. MMIO ->write
2193 * callbacks may be delayed until a non-coalesced MMIO is issued.
2194 * Only useful for IO regions. Roughly similar to write-combining hardware.
2195 *
2196 * @mr: the memory region to be write coalesced
2197 */
2198 void memory_region_set_coalescing(MemoryRegion *mr);
2199
2200 /**
2201 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
2202 * a region.
2203 *
2204 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
2205 * Multiple calls can be issued coalesced disjoint ranges.
2206 *
2207 * @mr: the memory region to be updated.
2208 * @offset: the start of the range within the region to be coalesced.
2209 * @size: the size of the subrange to be coalesced.
2210 */
2211 void memory_region_add_coalescing(MemoryRegion *mr,
2212 hwaddr offset,
2213 uint64_t size);
2214
2215 /**
2216 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
2217 *
2218 * Disables any coalescing caused by memory_region_set_coalescing() or
2219 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
2220 * hardware.
2221 *
2222 * @mr: the memory region to be updated.
2223 */
2224 void memory_region_clear_coalescing(MemoryRegion *mr);
2225
2226 /**
2227 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
2228 * accesses.
2229 *
2230 * Ensure that pending coalesced MMIO request are flushed before the memory
2231 * region is accessed. This property is automatically enabled for all regions
2232 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
2233 *
2234 * @mr: the memory region to be updated.
2235 */
2236 void memory_region_set_flush_coalesced(MemoryRegion *mr);
2237
2238 /**
2239 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
2240 * accesses.
2241 *
2242 * Clear the automatic coalesced MMIO flushing enabled via
2243 * memory_region_set_flush_coalesced. Note that this service has no effect on
2244 * memory regions that have MMIO coalescing enabled for themselves. For them,
2245 * automatic flushing will stop once coalescing is disabled.
2246 *
2247 * @mr: the memory region to be updated.
2248 */
2249 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
2250
2251 /**
2252 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
2253 * is written to a location.
2254 *
2255 * Marks a word in an IO region (initialized with memory_region_init_io())
2256 * as a trigger for an eventfd event. The I/O callback will not be called.
2257 * The caller must be prepared to handle failure (that is, take the required
2258 * action if the callback _is_ called).
2259 *
2260 * @mr: the memory region being updated.
2261 * @addr: the address within @mr that is to be monitored
2262 * @size: the size of the access to trigger the eventfd
2263 * @match_data: whether to match against @data, instead of just @addr
2264 * @data: the data to match against the guest write
2265 * @e: event notifier to be triggered when @addr, @size, and @data all match.
2266 **/
2267 void memory_region_add_eventfd(MemoryRegion *mr,
2268 hwaddr addr,
2269 unsigned size,
2270 bool match_data,
2271 uint64_t data,
2272 EventNotifier *e);
2273
2274 /**
2275 * memory_region_del_eventfd: Cancel an eventfd.
2276 *
2277 * Cancels an eventfd trigger requested by a previous
2278 * memory_region_add_eventfd() call.
2279 *
2280 * @mr: the memory region being updated.
2281 * @addr: the address within @mr that is to be monitored
2282 * @size: the size of the access to trigger the eventfd
2283 * @match_data: whether to match against @data, instead of just @addr
2284 * @data: the data to match against the guest write
2285 * @e: event notifier to be triggered when @addr, @size, and @data all match.
2286 */
2287 void memory_region_del_eventfd(MemoryRegion *mr,
2288 hwaddr addr,
2289 unsigned size,
2290 bool match_data,
2291 uint64_t data,
2292 EventNotifier *e);
2293
2294 /**
2295 * memory_region_add_subregion: Add a subregion to a container.
2296 *
2297 * Adds a subregion at @offset. The subregion may not overlap with other
2298 * subregions (except for those explicitly marked as overlapping). A region
2299 * may only be added once as a subregion (unless removed with
2300 * memory_region_del_subregion()); use memory_region_init_alias() if you
2301 * want a region to be a subregion in multiple locations.
2302 *
2303 * @mr: the region to contain the new subregion; must be a container
2304 * initialized with memory_region_init().
2305 * @offset: the offset relative to @mr where @subregion is added.
2306 * @subregion: the subregion to be added.
2307 */
2308 void memory_region_add_subregion(MemoryRegion *mr,
2309 hwaddr offset,
2310 MemoryRegion *subregion);
2311 /**
2312 * memory_region_add_subregion_overlap: Add a subregion to a container
2313 * with overlap.
2314 *
2315 * Adds a subregion at @offset. The subregion may overlap with other
2316 * subregions. Conflicts are resolved by having a higher @priority hide a
2317 * lower @priority. Subregions without priority are taken as @priority 0.
2318 * A region may only be added once as a subregion (unless removed with
2319 * memory_region_del_subregion()); use memory_region_init_alias() if you
2320 * want a region to be a subregion in multiple locations.
2321 *
2322 * @mr: the region to contain the new subregion; must be a container
2323 * initialized with memory_region_init().
2324 * @offset: the offset relative to @mr where @subregion is added.
2325 * @subregion: the subregion to be added.
2326 * @priority: used for resolving overlaps; highest priority wins.
2327 */
2328 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2329 hwaddr offset,
2330 MemoryRegion *subregion,
2331 int priority);
2332
2333 /**
2334 * memory_region_get_ram_addr: Get the ram address associated with a memory
2335 * region
2336 *
2337 * @mr: the region to be queried
2338 */
2339 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
2340
2341 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
2342 /**
2343 * memory_region_del_subregion: Remove a subregion.
2344 *
2345 * Removes a subregion from its container.
2346 *
2347 * @mr: the container to be updated.
2348 * @subregion: the region being removed; must be a current subregion of @mr.
2349 */
2350 void memory_region_del_subregion(MemoryRegion *mr,
2351 MemoryRegion *subregion);
2352
2353 /*
2354 * memory_region_set_enabled: dynamically enable or disable a region
2355 *
2356 * Enables or disables a memory region. A disabled memory region
2357 * ignores all accesses to itself and its subregions. It does not
2358 * obscure sibling subregions with lower priority - it simply behaves as
2359 * if it was removed from the hierarchy.
2360 *
2361 * Regions default to being enabled.
2362 *
2363 * @mr: the region to be updated
2364 * @enabled: whether to enable or disable the region
2365 */
2366 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
2367
2368 /*
2369 * memory_region_set_address: dynamically update the address of a region
2370 *
2371 * Dynamically updates the address of a region, relative to its container.
2372 * May be used on regions are currently part of a memory hierarchy.
2373 *
2374 * @mr: the region to be updated
2375 * @addr: new address, relative to container region
2376 */
2377 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
2378
2379 /*
2380 * memory_region_set_size: dynamically update the size of a region.
2381 *
2382 * Dynamically updates the size of a region.
2383 *
2384 * @mr: the region to be updated
2385 * @size: used size of the region.
2386 */
2387 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
2388
2389 /*
2390 * memory_region_set_alias_offset: dynamically update a memory alias's offset
2391 *
2392 * Dynamically updates the offset into the target region that an alias points
2393 * to, as if the fourth argument to memory_region_init_alias() has changed.
2394 *
2395 * @mr: the #MemoryRegion to be updated; should be an alias.
2396 * @offset: the new offset into the target memory region
2397 */
2398 void memory_region_set_alias_offset(MemoryRegion *mr,
2399 hwaddr offset);
2400
2401 /*
2402 * memory_region_set_unmergeable: Set a memory region unmergeable
2403 *
2404 * Mark a memory region unmergeable, resulting in the memory region (or
2405 * everything contained in a memory region container) not getting merged when
2406 * simplifying the address space and notifying memory listeners. Consequently,
2407 * memory listeners will never get notified about ranges that are larger than
2408 * the original memory regions.
2409 *
2410 * This is primarily useful when multiple aliases to a RAM memory region are
2411 * mapped into a memory region container, and updates (e.g., enable/disable or
2412 * map/unmap) of individual memory region aliases are not supposed to affect
2413 * other memory regions in the same container.
2414 *
2415 * @mr: the #MemoryRegion to be updated
2416 * @unmergeable: whether to mark the #MemoryRegion unmergeable
2417 */
2418 void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable);
2419
2420 /**
2421 * memory_region_present: checks if an address relative to a @container
2422 * translates into #MemoryRegion within @container
2423 *
2424 * Answer whether a #MemoryRegion within @container covers the address
2425 * @addr.
2426 *
2427 * @container: a #MemoryRegion within which @addr is a relative address
2428 * @addr: the area within @container to be searched
2429 */
2430 bool memory_region_present(MemoryRegion *container, hwaddr addr);
2431
2432 /**
2433 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
2434 * into another memory region, which does not necessarily imply that it is
2435 * mapped into an address space.
2436 *
2437 * @mr: a #MemoryRegion which should be checked if it's mapped
2438 */
2439 bool memory_region_is_mapped(MemoryRegion *mr);
2440
2441 /**
2442 * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
2443 * #MemoryRegion
2444 *
2445 * The #RamDiscardManager cannot change while a memory region is mapped.
2446 *
2447 * @mr: the #MemoryRegion
2448 */
2449 RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
2450
2451 /**
2452 * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
2453 * #RamDiscardManager assigned
2454 *
2455 * @mr: the #MemoryRegion
2456 */
2457 static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
2458 {
2459 return !!memory_region_get_ram_discard_manager(mr);
2460 }
2461
2462 /**
2463 * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
2464 * #MemoryRegion
2465 *
2466 * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
2467 * that does not cover RAM, or a #MemoryRegion that already has a
2468 * #RamDiscardManager assigned.
2469 *
2470 * @mr: the #MemoryRegion
2471 * @rdm: #RamDiscardManager to set
2472 */
2473 void memory_region_set_ram_discard_manager(MemoryRegion *mr,
2474 RamDiscardManager *rdm);
2475
2476 /**
2477 * memory_region_find: translate an address/size relative to a
2478 * MemoryRegion into a #MemoryRegionSection.
2479 *
2480 * Locates the first #MemoryRegion within @mr that overlaps the range
2481 * given by @addr and @size.
2482 *
2483 * Returns a #MemoryRegionSection that describes a contiguous overlap.
2484 * It will have the following characteristics:
2485 * - @size = 0 iff no overlap was found
2486 * - @mr is non-%NULL iff an overlap was found
2487 *
2488 * Remember that in the return value the @offset_within_region is
2489 * relative to the returned region (in the .@mr field), not to the
2490 * @mr argument.
2491 *
2492 * Similarly, the .@offset_within_address_space is relative to the
2493 * address space that contains both regions, the passed and the
2494 * returned one. However, in the special case where the @mr argument
2495 * has no container (and thus is the root of the address space), the
2496 * following will hold:
2497 * - @offset_within_address_space >= @addr
2498 * - @offset_within_address_space + .@size <= @addr + @size
2499 *
2500 * @mr: a MemoryRegion within which @addr is a relative address
2501 * @addr: start of the area within @as to be searched
2502 * @size: size of the area to be searched
2503 */
2504 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2505 hwaddr addr, uint64_t size);
2506
2507 /**
2508 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2509 *
2510 * Synchronizes the dirty page log for all address spaces.
2511 *
2512 * @last_stage: whether this is the last stage of live migration
2513 */
2514 void memory_global_dirty_log_sync(bool last_stage);
2515
2516 /**
2517 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2518 *
2519 * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
2520 * This function must be called after the dirty log bitmap is cleared, and
2521 * before dirty guest memory pages are read. If you are using
2522 * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
2523 * care of doing this.
2524 */
2525 void memory_global_after_dirty_log_sync(void);
2526
2527 /**
2528 * memory_region_transaction_begin: Start a transaction.
2529 *
2530 * During a transaction, changes will be accumulated and made visible
2531 * only when the transaction ends (is committed).
2532 */
2533 void memory_region_transaction_begin(void);
2534
2535 /**
2536 * memory_region_transaction_commit: Commit a transaction and make changes
2537 * visible to the guest.
2538 */
2539 void memory_region_transaction_commit(void);
2540
2541 /**
2542 * memory_listener_register: register callbacks to be called when memory
2543 * sections are mapped or unmapped into an address
2544 * space
2545 *
2546 * @listener: an object containing the callbacks to be called
2547 * @filter: if non-%NULL, only regions in this address space will be observed
2548 */
2549 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
2550
2551 /**
2552 * memory_listener_unregister: undo the effect of memory_listener_register()
2553 *
2554 * @listener: an object containing the callbacks to be removed
2555 */
2556 void memory_listener_unregister(MemoryListener *listener);
2557
2558 /**
2559 * memory_global_dirty_log_start: begin dirty logging for all regions
2560 *
2561 * @flags: purpose of starting dirty log, migration or dirty rate
2562 */
2563 void memory_global_dirty_log_start(unsigned int flags);
2564
2565 /**
2566 * memory_global_dirty_log_stop: end dirty logging for all regions
2567 *
2568 * @flags: purpose of stopping dirty log, migration or dirty rate
2569 */
2570 void memory_global_dirty_log_stop(unsigned int flags);
2571
2572 void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
2573
2574 bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
2575 unsigned size, bool is_write,
2576 MemTxAttrs attrs);
2577
2578 /**
2579 * memory_region_dispatch_read: perform a read directly to the specified
2580 * MemoryRegion.
2581 *
2582 * @mr: #MemoryRegion to access
2583 * @addr: address within that region
2584 * @pval: pointer to uint64_t which the data is written to
2585 * @op: size, sign, and endianness of the memory operation
2586 * @attrs: memory transaction attributes to use for the access
2587 */
2588 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
2589 hwaddr addr,
2590 uint64_t *pval,
2591 MemOp op,
2592 MemTxAttrs attrs);
2593 /**
2594 * memory_region_dispatch_write: perform a write directly to the specified
2595 * MemoryRegion.
2596 *
2597 * @mr: #MemoryRegion to access
2598 * @addr: address within that region
2599 * @data: data to write
2600 * @op: size, sign, and endianness of the memory operation
2601 * @attrs: memory transaction attributes to use for the access
2602 */
2603 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
2604 hwaddr addr,
2605 uint64_t data,
2606 MemOp op,
2607 MemTxAttrs attrs);
2608
2609 /**
2610 * address_space_init: initializes an address space
2611 *
2612 * @as: an uninitialized #AddressSpace
2613 * @root: a #MemoryRegion that routes addresses for the address space
2614 * @name: an address space name. The name is only used for debugging
2615 * output.
2616 */
2617 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
2618
2619 /**
2620 * address_space_destroy: destroy an address space
2621 *
2622 * Releases all resources associated with an address space. After an address space
2623 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
2624 * as well.
2625 *
2626 * @as: address space to be destroyed
2627 */
2628 void address_space_destroy(AddressSpace *as);
2629
2630 /**
2631 * address_space_remove_listeners: unregister all listeners of an address space
2632 *
2633 * Removes all callbacks previously registered with memory_listener_register()
2634 * for @as.
2635 *
2636 * @as: an initialized #AddressSpace
2637 */
2638 void address_space_remove_listeners(AddressSpace *as);
2639
2640 /**
2641 * address_space_rw: read from or write to an address space.
2642 *
2643 * Return a MemTxResult indicating whether the operation succeeded
2644 * or failed (eg unassigned memory, device rejected the transaction,
2645 * IOMMU fault).
2646 *
2647 * @as: #AddressSpace to be accessed
2648 * @addr: address within that address space
2649 * @attrs: memory transaction attributes
2650 * @buf: buffer with the data transferred
2651 * @len: the number of bytes to read or write
2652 * @is_write: indicates the transfer direction
2653 */
2654 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
2655 MemTxAttrs attrs, void *buf,
2656 hwaddr len, bool is_write);
2657
2658 /**
2659 * address_space_write: write to address space.
2660 *
2661 * Return a MemTxResult indicating whether the operation succeeded
2662 * or failed (eg unassigned memory, device rejected the transaction,
2663 * IOMMU fault).
2664 *
2665 * @as: #AddressSpace to be accessed
2666 * @addr: address within that address space
2667 * @attrs: memory transaction attributes
2668 * @buf: buffer with the data transferred
2669 * @len: the number of bytes to write
2670 */
2671 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
2672 MemTxAttrs attrs,
2673 const void *buf, hwaddr len);
2674
2675 /**
2676 * address_space_write_rom: write to address space, including ROM.
2677 *
2678 * This function writes to the specified address space, but will
2679 * write data to both ROM and RAM. This is used for non-guest
2680 * writes like writes from the gdb debug stub or initial loading
2681 * of ROM contents.
2682 *
2683 * Note that portions of the write which attempt to write data to
2684 * a device will be silently ignored -- only real RAM and ROM will
2685 * be written to.
2686 *
2687 * Return a MemTxResult indicating whether the operation succeeded
2688 * or failed (eg unassigned memory, device rejected the transaction,
2689 * IOMMU fault).
2690 *
2691 * @as: #AddressSpace to be accessed
2692 * @addr: address within that address space
2693 * @attrs: memory transaction attributes
2694 * @buf: buffer with the data transferred
2695 * @len: the number of bytes to write
2696 */
2697 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
2698 MemTxAttrs attrs,
2699 const void *buf, hwaddr len);
2700
2701 /* address_space_ld*: load from an address space
2702 * address_space_st*: store to an address space
2703 *
2704 * These functions perform a load or store of the byte, word,
2705 * longword or quad to the specified address within the AddressSpace.
2706 * The _le suffixed functions treat the data as little endian;
2707 * _be indicates big endian; no suffix indicates "same endianness
2708 * as guest CPU".
2709 *
2710 * The "guest CPU endianness" accessors are deprecated for use outside
2711 * target-* code; devices should be CPU-agnostic and use either the LE
2712 * or the BE accessors.
2713 *
2714 * @as #AddressSpace to be accessed
2715 * @addr: address within that address space
2716 * @val: data value, for stores
2717 * @attrs: memory transaction attributes
2718 * @result: location to write the success/failure of the transaction;
2719 * if NULL, this information is discarded
2720 */
2721
2722 #define SUFFIX
2723 #define ARG1 as
2724 #define ARG1_DECL AddressSpace *as
2725 #include "exec/memory_ldst.h.inc"
2726
2727 #define SUFFIX
2728 #define ARG1 as
2729 #define ARG1_DECL AddressSpace *as
2730 #include "exec/memory_ldst_phys.h.inc"
2731
2732 struct MemoryRegionCache {
2733 void *ptr;
2734 hwaddr xlat;
2735 hwaddr len;
2736 FlatView *fv;
2737 MemoryRegionSection mrs;
2738 bool is_write;
2739 };
2740
2741 /* address_space_ld*_cached: load from a cached #MemoryRegion
2742 * address_space_st*_cached: store into a cached #MemoryRegion
2743 *
2744 * These functions perform a load or store of the byte, word,
2745 * longword or quad to the specified address. The address is
2746 * a physical address in the AddressSpace, but it must lie within
2747 * a #MemoryRegion that was mapped with address_space_cache_init.
2748 *
2749 * The _le suffixed functions treat the data as little endian;
2750 * _be indicates big endian; no suffix indicates "same endianness
2751 * as guest CPU".
2752 *
2753 * The "guest CPU endianness" accessors are deprecated for use outside
2754 * target-* code; devices should be CPU-agnostic and use either the LE
2755 * or the BE accessors.
2756 *
2757 * @cache: previously initialized #MemoryRegionCache to be accessed
2758 * @addr: address within the address space
2759 * @val: data value, for stores
2760 * @attrs: memory transaction attributes
2761 * @result: location to write the success/failure of the transaction;
2762 * if NULL, this information is discarded
2763 */
2764
2765 #define SUFFIX _cached_slow
2766 #define ARG1 cache
2767 #define ARG1_DECL MemoryRegionCache *cache
2768 #include "exec/memory_ldst.h.inc"
2769
2770 /* Inline fast path for direct RAM access. */
2771 static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
2772 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
2773 {
2774 assert(addr < cache->len);
2775 if (likely(cache->ptr)) {
2776 return ldub_p(cache->ptr + addr);
2777 } else {
2778 return address_space_ldub_cached_slow(cache, addr, attrs, result);
2779 }
2780 }
2781
2782 static inline void address_space_stb_cached(MemoryRegionCache *cache,
2783 hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
2784 {
2785 assert(addr < cache->len);
2786 if (likely(cache->ptr)) {
2787 stb_p(cache->ptr + addr, val);
2788 } else {
2789 address_space_stb_cached_slow(cache, addr, val, attrs, result);
2790 }
2791 }
2792
2793 #define ENDIANNESS _le
2794 #include "exec/memory_ldst_cached.h.inc"
2795
2796 #define ENDIANNESS _be
2797 #include "exec/memory_ldst_cached.h.inc"
2798
2799 #define SUFFIX _cached
2800 #define ARG1 cache
2801 #define ARG1_DECL MemoryRegionCache *cache
2802 #include "exec/memory_ldst_phys.h.inc"
2803
2804 /* address_space_cache_init: prepare for repeated access to a physical
2805 * memory region
2806 *
2807 * @cache: #MemoryRegionCache to be filled
2808 * @as: #AddressSpace to be accessed
2809 * @addr: address within that address space
2810 * @len: length of buffer
2811 * @is_write: indicates the transfer direction
2812 *
2813 * Will only work with RAM, and may map a subset of the requested range by
2814 * returning a value that is less than @len. On failure, return a negative
2815 * errno value.
2816 *
2817 * Because it only works with RAM, this function can be used for
2818 * read-modify-write operations. In this case, is_write should be %true.
2819 *
2820 * Note that addresses passed to the address_space_*_cached functions
2821 * are relative to @addr.
2822 */
2823 int64_t address_space_cache_init(MemoryRegionCache *cache,
2824 AddressSpace *as,
2825 hwaddr addr,
2826 hwaddr len,
2827 bool is_write);
2828
2829 /**
2830 * address_space_cache_init_empty: Initialize empty #MemoryRegionCache
2831 *
2832 * @cache: The #MemoryRegionCache to operate on.
2833 *
2834 * Initializes #MemoryRegionCache structure without memory region attached.
2835 * Cache initialized this way can only be safely destroyed, but not used.
2836 */
2837 static inline void address_space_cache_init_empty(MemoryRegionCache *cache)
2838 {
2839 cache->mrs.mr = NULL;
2840 /* There is no real need to initialize fv, but it makes Coverity happy. */
2841 cache->fv = NULL;
2842 }
2843
2844 /**
2845 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
2846 *
2847 * @cache: The #MemoryRegionCache to operate on.
2848 * @addr: The first physical address that was written, relative to the
2849 * address that was passed to @address_space_cache_init.
2850 * @access_len: The number of bytes that were written starting at @addr.
2851 */
2852 void address_space_cache_invalidate(MemoryRegionCache *cache,
2853 hwaddr addr,
2854 hwaddr access_len);
2855
2856 /**
2857 * address_space_cache_destroy: free a #MemoryRegionCache
2858 *
2859 * @cache: The #MemoryRegionCache whose memory should be released.
2860 */
2861 void address_space_cache_destroy(MemoryRegionCache *cache);
2862
2863 /* address_space_get_iotlb_entry: translate an address into an IOTLB
2864 * entry. Should be called from an RCU critical section.
2865 */
2866 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
2867 bool is_write, MemTxAttrs attrs);
2868
2869 /* address_space_translate: translate an address range into an address space
2870 * into a MemoryRegion and an address range into that section. Should be
2871 * called from an RCU critical section, to avoid that the last reference
2872 * to the returned region disappears after address_space_translate returns.
2873 *
2874 * @fv: #FlatView to be accessed
2875 * @addr: address within that address space
2876 * @xlat: pointer to address within the returned memory region section's
2877 * #MemoryRegion.
2878 * @len: pointer to length
2879 * @is_write: indicates the transfer direction
2880 * @attrs: memory attributes
2881 */
2882 MemoryRegion *flatview_translate(FlatView *fv,
2883 hwaddr addr, hwaddr *xlat,
2884 hwaddr *len, bool is_write,
2885 MemTxAttrs attrs);
2886
2887 static inline MemoryRegion *address_space_translate(AddressSpace *as,
2888 hwaddr addr, hwaddr *xlat,
2889 hwaddr *len, bool is_write,
2890 MemTxAttrs attrs)
2891 {
2892 return flatview_translate(address_space_to_flatview(as),
2893 addr, xlat, len, is_write, attrs);
2894 }
2895
2896 /* address_space_access_valid: check for validity of accessing an address
2897 * space range
2898 *
2899 * Check whether memory is assigned to the given address space range, and
2900 * access is permitted by any IOMMU regions that are active for the address
2901 * space.
2902 *
2903 * For now, addr and len should be aligned to a page size. This limitation
2904 * will be lifted in the future.
2905 *
2906 * @as: #AddressSpace to be accessed
2907 * @addr: address within that address space
2908 * @len: length of the area to be checked
2909 * @is_write: indicates the transfer direction
2910 * @attrs: memory attributes
2911 */
2912 bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
2913 bool is_write, MemTxAttrs attrs);
2914
2915 /* address_space_map: map a physical memory region into a host virtual address
2916 *
2917 * May map a subset of the requested range, given by and returned in @plen.
2918 * May return %NULL and set *@plen to zero(0), if resources needed to perform
2919 * the mapping are exhausted.
2920 * Use only for reads OR writes - not for read-modify-write operations.
2921 * Use cpu_register_map_client() to know when retrying the map operation is
2922 * likely to succeed.
2923 *
2924 * @as: #AddressSpace to be accessed
2925 * @addr: address within that address space
2926 * @plen: pointer to length of buffer; updated on return
2927 * @is_write: indicates the transfer direction
2928 * @attrs: memory attributes
2929 */
2930 void *address_space_map(AddressSpace *as, hwaddr addr,
2931 hwaddr *plen, bool is_write, MemTxAttrs attrs);
2932
2933 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
2934 *
2935 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
2936 * the amount of memory that was actually read or written by the caller.
2937 *
2938 * @as: #AddressSpace used
2939 * @buffer: host pointer as returned by address_space_map()
2940 * @len: buffer length as returned by address_space_map()
2941 * @access_len: amount of data actually transferred
2942 * @is_write: indicates the transfer direction
2943 */
2944 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2945 bool is_write, hwaddr access_len);
2946
2947
2948 /* Internal functions, part of the implementation of address_space_read. */
2949 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2950 MemTxAttrs attrs, void *buf, hwaddr len);
2951 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2952 MemTxAttrs attrs, void *buf,
2953 hwaddr len, hwaddr addr1, hwaddr l,
2954 MemoryRegion *mr);
2955 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
2956
2957 /* Internal functions, part of the implementation of address_space_read_cached
2958 * and address_space_write_cached. */
2959 MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
2960 hwaddr addr, void *buf, hwaddr len);
2961 MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
2962 hwaddr addr, const void *buf,
2963 hwaddr len);
2964
2965 int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr);
2966 bool prepare_mmio_access(MemoryRegion *mr);
2967
2968 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
2969 {
2970 if (is_write) {
2971 return memory_region_is_ram(mr) && !mr->readonly &&
2972 !mr->rom_device && !memory_region_is_ram_device(mr);
2973 } else {
2974 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
2975 memory_region_is_romd(mr);
2976 }
2977 }
2978
2979 /**
2980 * address_space_read: read from an address space.
2981 *
2982 * Return a MemTxResult indicating whether the operation succeeded
2983 * or failed (eg unassigned memory, device rejected the transaction,
2984 * IOMMU fault). Called within RCU critical section.
2985 *
2986 * @as: #AddressSpace to be accessed
2987 * @addr: address within that address space
2988 * @attrs: memory transaction attributes
2989 * @buf: buffer with the data transferred
2990 * @len: length of the data transferred
2991 */
2992 static inline __attribute__((__always_inline__))
2993 MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
2994 MemTxAttrs attrs, void *buf,
2995 hwaddr len)
2996 {
2997 MemTxResult result = MEMTX_OK;
2998 hwaddr l, addr1;
2999 void *ptr;
3000 MemoryRegion *mr;
3001 FlatView *fv;
3002
3003 if (__builtin_constant_p(len)) {
3004 if (len) {
3005 RCU_READ_LOCK_GUARD();
3006 fv = address_space_to_flatview(as);
3007 l = len;
3008 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
3009 if (len == l && memory_access_is_direct(mr, false)) {
3010 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3011 memcpy(buf, ptr, len);
3012 } else {
3013 result = flatview_read_continue(fv, addr, attrs, buf, len,
3014 addr1, l, mr);
3015 }
3016 }
3017 } else {
3018 result = address_space_read_full(as, addr, attrs, buf, len);
3019 }
3020 return result;
3021 }
3022
3023 /**
3024 * address_space_read_cached: read from a cached RAM region
3025 *
3026 * @cache: Cached region to be addressed
3027 * @addr: address relative to the base of the RAM region
3028 * @buf: buffer with the data transferred
3029 * @len: length of the data transferred
3030 */
3031 static inline MemTxResult
3032 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
3033 void *buf, hwaddr len)
3034 {
3035 assert(addr < cache->len && len <= cache->len - addr);
3036 fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
3037 if (likely(cache->ptr)) {
3038 memcpy(buf, cache->ptr + addr, len);
3039 return MEMTX_OK;
3040 } else {
3041 return address_space_read_cached_slow(cache, addr, buf, len);
3042 }
3043 }
3044
3045 /**
3046 * address_space_write_cached: write to a cached RAM region
3047 *
3048 * @cache: Cached region to be addressed
3049 * @addr: address relative to the base of the RAM region
3050 * @buf: buffer with the data transferred
3051 * @len: length of the data transferred
3052 */
3053 static inline MemTxResult
3054 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
3055 const void *buf, hwaddr len)
3056 {
3057 assert(addr < cache->len && len <= cache->len - addr);
3058 if (likely(cache->ptr)) {
3059 memcpy(cache->ptr + addr, buf, len);
3060 return MEMTX_OK;
3061 } else {
3062 return address_space_write_cached_slow(cache, addr, buf, len);
3063 }
3064 }
3065
3066 /**
3067 * address_space_set: Fill address space with a constant byte.
3068 *
3069 * Return a MemTxResult indicating whether the operation succeeded
3070 * or failed (eg unassigned memory, device rejected the transaction,
3071 * IOMMU fault).
3072 *
3073 * @as: #AddressSpace to be accessed
3074 * @addr: address within that address space
3075 * @c: constant byte to fill the memory
3076 * @len: the number of bytes to fill with the constant byte
3077 * @attrs: memory transaction attributes
3078 */
3079 MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
3080 uint8_t c, hwaddr len, MemTxAttrs attrs);
3081
3082 #ifdef NEED_CPU_H
3083 /* enum device_endian to MemOp. */
3084 static inline MemOp devend_memop(enum device_endian end)
3085 {
3086 QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
3087 DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
3088
3089 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
3090 /* Swap if non-host endianness or native (target) endianness */
3091 return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
3092 #else
3093 const int non_host_endianness =
3094 DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN;
3095
3096 /* In this case, native (target) endianness needs no swap. */
3097 return (end == non_host_endianness) ? MO_BSWAP : 0;
3098 #endif
3099 }
3100 #endif
3101
3102 /*
3103 * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
3104 * to manage the actual amount of memory consumed by the VM (then, the memory
3105 * provided by RAM blocks might be bigger than the desired memory consumption).
3106 * This *must* be set if:
3107 * - Discarding parts of a RAM blocks does not result in the change being
3108 * reflected in the VM and the pages getting freed.
3109 * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
3110 * discards blindly.
3111 * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
3112 * encrypted VMs).
3113 * Technologies that only temporarily pin the current working set of a
3114 * driver are fine, because we don't expect such pages to be discarded
3115 * (esp. based on guest action like balloon inflation).
3116 *
3117 * This is *not* to be used to protect from concurrent discards (esp.,
3118 * postcopy).
3119 *
3120 * Returns 0 if successful. Returns -EBUSY if a technology that relies on
3121 * discards to work reliably is active.
3122 */
3123 int ram_block_discard_disable(bool state);
3124
3125 /*
3126 * See ram_block_discard_disable(): only disable uncoordinated discards,
3127 * keeping coordinated discards (via the RamDiscardManager) enabled.
3128 */
3129 int ram_block_uncoordinated_discard_disable(bool state);
3130
3131 /*
3132 * Inhibit technologies that disable discarding of pages in RAM blocks.
3133 *
3134 * Returns 0 if successful. Returns -EBUSY if discards are already set to
3135 * broken.
3136 */
3137 int ram_block_discard_require(bool state);
3138
3139 /*
3140 * See ram_block_discard_require(): only inhibit technologies that disable
3141 * uncoordinated discarding of pages in RAM blocks, allowing co-existance with
3142 * technologies that only inhibit uncoordinated discards (via the
3143 * RamDiscardManager).
3144 */
3145 int ram_block_coordinated_discard_require(bool state);
3146
3147 /*
3148 * Test if any discarding of memory in ram blocks is disabled.
3149 */
3150 bool ram_block_discard_is_disabled(void);
3151
3152 /*
3153 * Test if any discarding of memory in ram blocks is required to work reliably.
3154 */
3155 bool ram_block_discard_is_required(void);
3156
3157 #endif
3158
3159 #endif