]> git.proxmox.com Git - mirror_qemu.git/blame - include/exec/memory.h
exec/memory: Introduce RAM_NAMED_FILE flag
[mirror_qemu.git] / include / exec / memory.h
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#ifndef MEMORY_H
15#define MEMORY_H
16
17#ifndef CONFIG_USER_ONLY
18
022c62cb
PB
19#include "exec/cpu-common.h"
20#include "exec/hwaddr.h"
cc05c43a 21#include "exec/memattrs.h"
e67c9046 22#include "exec/memop.h"
0987d735 23#include "exec/ramlist.h"
1b53ecd9 24#include "qemu/bswap.h"
1de7afc9 25#include "qemu/queue.h"
1de7afc9 26#include "qemu/int128.h"
06866575 27#include "qemu/notify.h"
b4fefef9 28#include "qom/object.h"
374f2981 29#include "qemu/rcu.h"
093bc2cd 30
07bdaa41
PB
31#define RAM_ADDR_INVALID (~(ram_addr_t)0)
32
052e87b0
PB
33#define MAX_PHYS_ADDR_SPACE_BITS 62
34#define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
35
bb3c92ed 36#define TYPE_MEMORY_REGION "memory-region"
8110fa1d
EH
37DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
38 TYPE_MEMORY_REGION)
b4fefef9 39
bb3c92ed 40#define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
db1015e9 41typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
8110fa1d
EH
42DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
43 IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
3df9d748 44
8947d7fc
DH
45#define TYPE_RAM_DISCARD_MANAGER "qemu:ram-discard-manager"
46typedef struct RamDiscardManagerClass RamDiscardManagerClass;
47typedef struct RamDiscardManager RamDiscardManager;
48DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
49 RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
50
20f5a302
AB
51#ifdef CONFIG_FUZZ
52void fuzz_dma_read_cb(size_t addr,
53 size_t len,
fc1c8344 54 MemoryRegion *mr);
e7d3222e
AB
55#else
56static inline void fuzz_dma_read_cb(size_t addr,
57 size_t len,
fc1c8344 58 MemoryRegion *mr)
e7d3222e
AB
59{
60 /* Do Nothing */
61}
20f5a302
AB
62#endif
63
63b41db4
HH
64/* Possible bits for global_dirty_log_{start|stop} */
65
66/* Dirty tracking enabled because migration is running */
67#define GLOBAL_DIRTY_MIGRATION (1U << 0)
68
69/* Dirty tracking enabled because measuring dirty rate */
70#define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
71
cc2b33ea
HH
72/* Dirty tracking enabled because dirty limit */
73#define GLOBAL_DIRTY_LIMIT (1U << 2)
74
75#define GLOBAL_DIRTY_MASK (0x7)
63b41db4
HH
76
77extern unsigned int global_dirty_tracking;
ae7a2bca 78
093bc2cd 79typedef struct MemoryRegionOps MemoryRegionOps;
74901c3b 80
f7806925
EA
81struct ReservedRegion {
82 hwaddr low;
83 hwaddr high;
84 unsigned type;
85};
86
8947d7fc
DH
87/**
88 * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
89 *
90 * @mr: the region, or %NULL if empty
91 * @fv: the flat view of the address space the region is mapped in
92 * @offset_within_region: the beginning of the section, relative to @mr's start
93 * @size: the size of the section; will not exceed @mr's boundaries
94 * @offset_within_address_space: the address of the first byte of the section
95 * relative to the region's address space
96 * @readonly: writes to this section are ignored
97 * @nonvolatile: this section is non-volatile
98 */
99struct MemoryRegionSection {
100 Int128 size;
101 MemoryRegion *mr;
102 FlatView *fv;
103 hwaddr offset_within_region;
104 hwaddr offset_within_address_space;
105 bool readonly;
106 bool nonvolatile;
107};
108
30951157
AK
109typedef struct IOMMUTLBEntry IOMMUTLBEntry;
110
111/* See address_space_translate: bit 0 is read, bit 1 is write. */
112typedef enum {
113 IOMMU_NONE = 0,
114 IOMMU_RO = 1,
115 IOMMU_WO = 2,
116 IOMMU_RW = 3,
117} IOMMUAccessFlags;
118
f06a696d
PX
119#define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
120
30951157
AK
121struct IOMMUTLBEntry {
122 AddressSpace *target_as;
123 hwaddr iova;
124 hwaddr translated_addr;
125 hwaddr addr_mask; /* 0xfff = 4k translation */
126 IOMMUAccessFlags perm;
127};
128
cdb30812
PX
129/*
130 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
131 * register with one or multiple IOMMU Notifier capability bit(s).
8a7c6060
PX
132 *
133 * Normally there're two use cases for the notifiers:
134 *
135 * (1) When the device needs accurate synchronizations of the vIOMMU page
136 * tables, it needs to register with both MAP|UNMAP notifies (which
137 * is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below).
138 *
139 * Regarding to accurate synchronization, it's when the notified
140 * device maintains a shadow page table and must be notified on each
141 * guest MAP (page table entry creation) and UNMAP (invalidation)
142 * events (e.g. VFIO). Both notifications must be accurate so that
143 * the shadow page table is fully in sync with the guest view.
144 *
145 * (2) When the device doesn't need accurate synchronizations of the
146 * vIOMMU page tables, it needs to register only with UNMAP or
147 * DEVIOTLB_UNMAP notifies.
148 *
149 * It's when the device maintains a cache of IOMMU translations
150 * (IOTLB) and is able to fill that cache by requesting translations
151 * from the vIOMMU through a protocol similar to ATS (Address
152 * Translation Service).
153 *
154 * Note that in this mode the vIOMMU will not maintain a shadowed
155 * page table for the address space, and the UNMAP messages can cover
156 * more than the pages that used to get mapped. The IOMMU notifiee
157 * should be able to take care of over-sized invalidations.
cdb30812
PX
158 */
159typedef enum {
160 IOMMU_NOTIFIER_NONE = 0,
161 /* Notify cache invalidations */
162 IOMMU_NOTIFIER_UNMAP = 0x1,
163 /* Notify entry changes (newly created entries) */
164 IOMMU_NOTIFIER_MAP = 0x2,
b68ba1ca
EP
165 /* Notify changes on device IOTLB entries */
166 IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
cdb30812
PX
167} IOMMUNotifierFlag;
168
b68ba1ca
EP
169#define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
170#define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
171#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
172 IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
cdb30812 173
698feb5e
PX
174struct IOMMUNotifier;
175typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
176 IOMMUTLBEntry *data);
177
cdb30812 178struct IOMMUNotifier {
698feb5e 179 IOMMUNotify notify;
cdb30812 180 IOMMUNotifierFlag notifier_flags;
698feb5e
PX
181 /* Notify for address space range start <= addr <= end */
182 hwaddr start;
183 hwaddr end;
cb1efcf4 184 int iommu_idx;
cdb30812
PX
185 QLIST_ENTRY(IOMMUNotifier) node;
186};
187typedef struct IOMMUNotifier IOMMUNotifier;
188
5039caf3
EP
189typedef struct IOMMUTLBEvent {
190 IOMMUNotifierFlag type;
191 IOMMUTLBEntry entry;
192} IOMMUTLBEvent;
193
b0e5de93
JH
194/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
195#define RAM_PREALLOC (1 << 0)
196
197/* RAM is mmap-ed with MAP_SHARED */
198#define RAM_SHARED (1 << 1)
199
200/* Only a portion of RAM (used_length) is actually used, and migrated.
c7c0e724 201 * Resizing RAM while migrating can result in the migration being canceled.
b0e5de93
JH
202 */
203#define RAM_RESIZEABLE (1 << 2)
204
205/* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
206 * zero the page and wake waiting processes.
207 * (Set during postcopy)
208 */
209#define RAM_UF_ZEROPAGE (1 << 3)
210
211/* RAM can be migrated */
212#define RAM_MIGRATABLE (1 << 4)
213
a4de8552
JH
214/* RAM is a persistent kind memory */
215#define RAM_PMEM (1 << 5)
216
0e9b5cd6 217
278e2f55
AG
218/*
219 * UFFDIO_WRITEPROTECT is used on this RAMBlock to
220 * support 'write-tracking' migration type.
221 * Implies ram_state->ram_wt_enabled.
222 */
223#define RAM_UF_WRITEPROTECT (1 << 6)
224
8dbe22c6
DH
225/*
226 * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
227 * pages if applicable) is skipped: will bail out if not supported. When not
228 * set, the OS will do the reservation, if supported for the memory type.
229 */
230#define RAM_NORESERVE (1 << 7)
231
56918a12
SC
232/* RAM that isn't accessible through normal means. */
233#define RAM_PROTECTED (1 << 8)
234
b0182e53
SS
235/* RAM is an mmap-ed named file */
236#define RAM_NAMED_FILE (1 << 9)
237
698feb5e
PX
238static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
239 IOMMUNotifierFlag flags,
cb1efcf4
PM
240 hwaddr start, hwaddr end,
241 int iommu_idx)
698feb5e
PX
242{
243 n->notify = fn;
244 n->notifier_flags = flags;
245 n->start = start;
246 n->end = end;
cb1efcf4 247 n->iommu_idx = iommu_idx;
698feb5e
PX
248}
249
093bc2cd
AK
250/*
251 * Memory region callbacks
252 */
253struct MemoryRegionOps {
254 /* Read from the memory region. @addr is relative to @mr; @size is
255 * in bytes. */
256 uint64_t (*read)(void *opaque,
a8170e5e 257 hwaddr addr,
093bc2cd
AK
258 unsigned size);
259 /* Write to the memory region. @addr is relative to @mr; @size is
260 * in bytes. */
261 void (*write)(void *opaque,
a8170e5e 262 hwaddr addr,
093bc2cd
AK
263 uint64_t data,
264 unsigned size);
265
cc05c43a
PM
266 MemTxResult (*read_with_attrs)(void *opaque,
267 hwaddr addr,
268 uint64_t *data,
269 unsigned size,
270 MemTxAttrs attrs);
271 MemTxResult (*write_with_attrs)(void *opaque,
272 hwaddr addr,
273 uint64_t data,
274 unsigned size,
275 MemTxAttrs attrs);
276
093bc2cd
AK
277 enum device_endian endianness;
278 /* Guest-visible constraints: */
279 struct {
280 /* If nonzero, specify bounds on access sizes beyond which a machine
281 * check is thrown.
282 */
283 unsigned min_access_size;
284 unsigned max_access_size;
285 /* If true, unaligned accesses are supported. Otherwise unaligned
286 * accesses throw machine checks.
287 */
288 bool unaligned;
897fa7cf
AK
289 /*
290 * If present, and returns #false, the transaction is not accepted
291 * by the device (and results in machine dependent behaviour such
292 * as a machine check exception).
293 */
a8170e5e 294 bool (*accepts)(void *opaque, hwaddr addr,
8372d383
PM
295 unsigned size, bool is_write,
296 MemTxAttrs attrs);
093bc2cd
AK
297 } valid;
298 /* Internal implementation constraints: */
299 struct {
300 /* If nonzero, specifies the minimum size implemented. Smaller sizes
301 * will be rounded upwards and a partial result will be returned.
302 */
303 unsigned min_access_size;
304 /* If nonzero, specifies the maximum size implemented. Larger sizes
305 * will be done as a series of accesses with smaller sizes.
306 */
307 unsigned max_access_size;
308 /* If true, unaligned accesses are supported. Otherwise all accesses
309 * are converted to (possibly multiple) naturally aligned accesses.
310 */
edc1ba7a 311 bool unaligned;
093bc2cd
AK
312 } impl;
313};
314
1b53ecd9
MA
315typedef struct MemoryRegionClass {
316 /* private */
317 ObjectClass parent_class;
318} MemoryRegionClass;
319
320
f1334de6
AK
321enum IOMMUMemoryRegionAttr {
322 IOMMU_ATTR_SPAPR_TCE_FD
323};
324
acbef3cc 325/*
2ce931d0
PM
326 * IOMMUMemoryRegionClass:
327 *
328 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
329 * and provide an implementation of at least the @translate method here
330 * to handle requests to the memory region. Other methods are optional.
331 *
332 * The IOMMU implementation must use the IOMMU notifier infrastructure
333 * to report whenever mappings are changed, by calling
334 * memory_region_notify_iommu() (or, if necessary, by calling
3b5ebf85 335 * memory_region_notify_iommu_one() for each registered notifier).
21f40209
PM
336 *
337 * Conceptually an IOMMU provides a mapping from input address
338 * to an output TLB entry. If the IOMMU is aware of memory transaction
339 * attributes and the output TLB entry depends on the transaction
340 * attributes, we represent this using IOMMU indexes. Each index
341 * selects a particular translation table that the IOMMU has:
ffb716f0 342 *
21f40209 343 * @attrs_to_index returns the IOMMU index for a set of transaction attributes
ffb716f0 344 *
21f40209 345 * @translate takes an input address and an IOMMU index
ffb716f0 346 *
21f40209
PM
347 * and the mapping returned can only depend on the input address and the
348 * IOMMU index.
349 *
350 * Most IOMMUs don't care about the transaction attributes and support
351 * only a single IOMMU index. A more complex IOMMU might have one index
352 * for secure transactions and one for non-secure transactions.
2ce931d0 353 */
db1015e9 354struct IOMMUMemoryRegionClass {
ffb716f0 355 /* private: */
1b53ecd9 356 MemoryRegionClass parent_class;
30951157 357
ffb716f0
EH
358 /* public: */
359 /**
360 * @translate:
361 *
2ce931d0
PM
362 * Return a TLB entry that contains a given address.
363 *
364 * The IOMMUAccessFlags indicated via @flag are optional and may
365 * be specified as IOMMU_NONE to indicate that the caller needs
366 * the full translation information for both reads and writes. If
367 * the access flags are specified then the IOMMU implementation
368 * may use this as an optimization, to stop doing a page table
369 * walk as soon as it knows that the requested permissions are not
370 * allowed. If IOMMU_NONE is passed then the IOMMU must do the
371 * full page table walk and report the permissions in the returned
372 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
373 * return different mappings for reads and writes.)
374 *
375 * The returned information remains valid while the caller is
376 * holding the big QEMU lock or is inside an RCU critical section;
377 * if the caller wishes to cache the mapping beyond that it must
378 * register an IOMMU notifier so it can invalidate its cached
379 * information when the IOMMU mapping changes.
380 *
381 * @iommu: the IOMMUMemoryRegion
ffb716f0 382 *
2ce931d0 383 * @hwaddr: address to be translated within the memory region
ffb716f0
EH
384 *
385 * @flag: requested access permission
386 *
2c91bcf2 387 * @iommu_idx: IOMMU index for the translation
bf55b7af 388 */
3df9d748 389 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
2c91bcf2 390 IOMMUAccessFlags flag, int iommu_idx);
ffb716f0
EH
391 /**
392 * @get_min_page_size:
393 *
394 * Returns minimum supported page size in bytes.
395 *
2ce931d0
PM
396 * If this method is not provided then the minimum is assumed to
397 * be TARGET_PAGE_SIZE.
398 *
399 * @iommu: the IOMMUMemoryRegion
400 */
3df9d748 401 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
ffb716f0
EH
402 /**
403 * @notify_flag_changed:
404 *
405 * Called when IOMMU Notifier flag changes (ie when the set of
2ce931d0
PM
406 * events which IOMMU users are requesting notification for changes).
407 * Optional method -- need not be provided if the IOMMU does not
408 * need to know exactly which events must be notified.
409 *
410 * @iommu: the IOMMUMemoryRegion
ffb716f0 411 *
2ce931d0 412 * @old_flags: events which previously needed to be notified
ffb716f0 413 *
2ce931d0 414 * @new_flags: events which now need to be notified
549d4005
EA
415 *
416 * Returns 0 on success, or a negative errno; in particular
417 * returns -EINVAL if the new flag bitmap is not supported by the
418 * IOMMU memory region. In case of failure, the error object
419 * must be created
2ce931d0 420 */
549d4005
EA
421 int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
422 IOMMUNotifierFlag old_flags,
423 IOMMUNotifierFlag new_flags,
424 Error **errp);
ffb716f0
EH
425 /**
426 * @replay:
427 *
428 * Called to handle memory_region_iommu_replay().
2ce931d0
PM
429 *
430 * The default implementation of memory_region_iommu_replay() is to
431 * call the IOMMU translate method for every page in the address space
432 * with flag == IOMMU_NONE and then call the notifier if translate
433 * returns a valid mapping. If this method is implemented then it
434 * overrides the default behaviour, and must provide the full semantics
435 * of memory_region_iommu_replay(), by calling @notifier for every
436 * translation present in the IOMMU.
437 *
438 * Optional method -- an IOMMU only needs to provide this method
439 * if the default is inefficient or produces undesirable side effects.
440 *
441 * Note: this is not related to record-and-replay functionality.
442 */
3df9d748 443 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
f1334de6 444
ffb716f0
EH
445 /**
446 * @get_attr:
447 *
448 * Get IOMMU misc attributes. This is an optional method that
2ce931d0
PM
449 * can be used to allow users of the IOMMU to get implementation-specific
450 * information. The IOMMU implements this method to handle calls
451 * by IOMMU users to memory_region_iommu_get_attr() by filling in
452 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
453 * the IOMMU supports. If the method is unimplemented then
454 * memory_region_iommu_get_attr() will always return -EINVAL.
455 *
456 * @iommu: the IOMMUMemoryRegion
ffb716f0 457 *
2ce931d0 458 * @attr: attribute being queried
ffb716f0 459 *
2ce931d0
PM
460 * @data: memory to fill in with the attribute data
461 *
462 * Returns 0 on success, or a negative errno; in particular
463 * returns -EINVAL for unrecognized or unimplemented attribute types.
464 */
465 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
f1334de6 466 void *data);
21f40209 467
ffb716f0
EH
468 /**
469 * @attrs_to_index:
470 *
471 * Return the IOMMU index to use for a given set of transaction attributes.
21f40209
PM
472 *
473 * Optional method: if an IOMMU only supports a single IOMMU index then
474 * the default implementation of memory_region_iommu_attrs_to_index()
475 * will return 0.
476 *
477 * The indexes supported by an IOMMU must be contiguous, starting at 0.
478 *
479 * @iommu: the IOMMUMemoryRegion
480 * @attrs: memory transaction attributes
481 */
482 int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
483
ffb716f0
EH
484 /**
485 * @num_indexes:
486 *
487 * Return the number of IOMMU indexes this IOMMU supports.
21f40209
PM
488 *
489 * Optional method: if this method is not provided, then
490 * memory_region_iommu_num_indexes() will return 1, indicating that
491 * only a single IOMMU index is supported.
492 *
493 * @iommu: the IOMMUMemoryRegion
494 */
495 int (*num_indexes)(IOMMUMemoryRegion *iommu);
457f8cbb
BB
496
497 /**
498 * @iommu_set_page_size_mask:
499 *
500 * Restrict the page size mask that can be supported with a given IOMMU
501 * memory region. Used for example to propagate host physical IOMMU page
502 * size mask limitations to the virtual IOMMU.
503 *
504 * Optional method: if this method is not provided, then the default global
505 * page mask is used.
506 *
507 * @iommu: the IOMMUMemoryRegion
508 *
509 * @page_size_mask: a bitmask of supported page sizes. At least one bit,
510 * representing the smallest page size, must be set. Additional set bits
511 * represent supported block sizes. For example a host physical IOMMU that
512 * uses page tables with a page size of 4kB, and supports 2MB and 4GB
513 * blocks, will set mask 0x40201000. A granule of 4kB with indiscriminate
514 * block sizes is specified with mask 0xfffffffffffff000.
515 *
516 * Returns 0 on success, or a negative error. In case of failure, the error
517 * object must be created.
518 */
519 int (*iommu_set_page_size_mask)(IOMMUMemoryRegion *iommu,
520 uint64_t page_size_mask,
521 Error **errp);
db1015e9 522};
30951157 523
8947d7fc
DH
524typedef struct RamDiscardListener RamDiscardListener;
525typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
526 MemoryRegionSection *section);
527typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
528 MemoryRegionSection *section);
529
530struct RamDiscardListener {
531 /*
532 * @notify_populate:
533 *
534 * Notification that previously discarded memory is about to get populated.
535 * Listeners are able to object. If any listener objects, already
536 * successfully notified listeners are notified about a discard again.
537 *
538 * @rdl: the #RamDiscardListener getting notified
539 * @section: the #MemoryRegionSection to get populated. The section
540 * is aligned within the memory region to the minimum granularity
541 * unless it would exceed the registered section.
542 *
543 * Returns 0 on success. If the notification is rejected by the listener,
544 * an error is returned.
545 */
546 NotifyRamPopulate notify_populate;
547
548 /*
549 * @notify_discard:
550 *
551 * Notification that previously populated memory was discarded successfully
552 * and listeners should drop all references to such memory and prevent
553 * new population (e.g., unmap).
554 *
555 * @rdl: the #RamDiscardListener getting notified
556 * @section: the #MemoryRegionSection to get populated. The section
557 * is aligned within the memory region to the minimum granularity
558 * unless it would exceed the registered section.
559 */
560 NotifyRamDiscard notify_discard;
561
562 /*
563 * @double_discard_supported:
564 *
565 * The listener suppors getting @notify_discard notifications that span
566 * already discarded parts.
567 */
568 bool double_discard_supported;
569
570 MemoryRegionSection *section;
571 QLIST_ENTRY(RamDiscardListener) next;
572};
573
574static inline void ram_discard_listener_init(RamDiscardListener *rdl,
575 NotifyRamPopulate populate_fn,
576 NotifyRamDiscard discard_fn,
577 bool double_discard_supported)
578{
579 rdl->notify_populate = populate_fn;
580 rdl->notify_discard = discard_fn;
581 rdl->double_discard_supported = double_discard_supported;
582}
583
584typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque);
adaf9d92 585typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque);
8947d7fc
DH
586
587/*
588 * RamDiscardManagerClass:
589 *
590 * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
591 * regions are currently populated to be used/accessed by the VM, notifying
592 * after parts were discarded (freeing up memory) and before parts will be
2cb40d44 593 * populated (consuming memory), to be used/accessed by the VM.
8947d7fc
DH
594 *
595 * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
596 * #MemoryRegion isn't mapped yet; it cannot change while the #MemoryRegion is
597 * mapped.
598 *
599 * The #RamDiscardManager is intended to be used by technologies that are
600 * incompatible with discarding of RAM (e.g., VFIO, which may pin all
601 * memory inside a #MemoryRegion), and require proper coordination to only
602 * map the currently populated parts, to hinder parts that are expected to
603 * remain discarded from silently getting populated and consuming memory.
604 * Technologies that support discarding of RAM don't have to bother and can
605 * simply map the whole #MemoryRegion.
606 *
607 * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
608 * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
609 * Logically unplugging memory consists of discarding RAM. The VM agreed to not
610 * access unplugged (discarded) memory - especially via DMA. virtio-mem will
611 * properly coordinate with listeners before memory is plugged (populated),
612 * and after memory is unplugged (discarded).
613 *
614 * Listeners are called in multiples of the minimum granularity (unless it
615 * would exceed the registered range) and changes are aligned to the minimum
616 * granularity within the #MemoryRegion. Listeners have to prepare for memory
2cb40d44 617 * becoming discarded in a different granularity than it was populated and the
8947d7fc
DH
618 * other way around.
619 */
620struct RamDiscardManagerClass {
621 /* private */
622 InterfaceClass parent_class;
623
624 /* public */
625
626 /**
627 * @get_min_granularity:
628 *
629 * Get the minimum granularity in which listeners will get notified
630 * about changes within the #MemoryRegion via the #RamDiscardManager.
631 *
632 * @rdm: the #RamDiscardManager
633 * @mr: the #MemoryRegion
634 *
635 * Returns the minimum granularity.
636 */
637 uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
638 const MemoryRegion *mr);
639
640 /**
641 * @is_populated:
642 *
643 * Check whether the given #MemoryRegionSection is completely populated
644 * (i.e., no parts are currently discarded) via the #RamDiscardManager.
645 * There are no alignment requirements.
646 *
647 * @rdm: the #RamDiscardManager
648 * @section: the #MemoryRegionSection
649 *
650 * Returns whether the given range is completely populated.
651 */
652 bool (*is_populated)(const RamDiscardManager *rdm,
653 const MemoryRegionSection *section);
654
655 /**
656 * @replay_populated:
657 *
658 * Call the #ReplayRamPopulate callback for all populated parts within the
659 * #MemoryRegionSection via the #RamDiscardManager.
660 *
661 * In case any call fails, no further calls are made.
662 *
663 * @rdm: the #RamDiscardManager
664 * @section: the #MemoryRegionSection
665 * @replay_fn: the #ReplayRamPopulate callback
666 * @opaque: pointer to forward to the callback
667 *
668 * Returns 0 on success, or a negative error if any notification failed.
669 */
670 int (*replay_populated)(const RamDiscardManager *rdm,
671 MemoryRegionSection *section,
672 ReplayRamPopulate replay_fn, void *opaque);
673
adaf9d92
DH
674 /**
675 * @replay_discarded:
676 *
677 * Call the #ReplayRamDiscard callback for all discarded parts within the
678 * #MemoryRegionSection via the #RamDiscardManager.
679 *
680 * @rdm: the #RamDiscardManager
681 * @section: the #MemoryRegionSection
682 * @replay_fn: the #ReplayRamDiscard callback
683 * @opaque: pointer to forward to the callback
684 */
685 void (*replay_discarded)(const RamDiscardManager *rdm,
686 MemoryRegionSection *section,
687 ReplayRamDiscard replay_fn, void *opaque);
688
8947d7fc
DH
689 /**
690 * @register_listener:
691 *
692 * Register a #RamDiscardListener for the given #MemoryRegionSection and
693 * immediately notify the #RamDiscardListener about all populated parts
694 * within the #MemoryRegionSection via the #RamDiscardManager.
695 *
696 * In case any notification fails, no further notifications are triggered
697 * and an error is logged.
698 *
699 * @rdm: the #RamDiscardManager
700 * @rdl: the #RamDiscardListener
701 * @section: the #MemoryRegionSection
702 */
703 void (*register_listener)(RamDiscardManager *rdm,
704 RamDiscardListener *rdl,
705 MemoryRegionSection *section);
706
707 /**
708 * @unregister_listener:
709 *
710 * Unregister a previously registered #RamDiscardListener via the
711 * #RamDiscardManager after notifying the #RamDiscardListener about all
712 * populated parts becoming unpopulated within the registered
713 * #MemoryRegionSection.
714 *
715 * @rdm: the #RamDiscardManager
716 * @rdl: the #RamDiscardListener
717 */
718 void (*unregister_listener)(RamDiscardManager *rdm,
719 RamDiscardListener *rdl);
720};
721
722uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
723 const MemoryRegion *mr);
724
725bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
726 const MemoryRegionSection *section);
727
728int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
729 MemoryRegionSection *section,
730 ReplayRamPopulate replay_fn,
731 void *opaque);
732
adaf9d92
DH
733void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
734 MemoryRegionSection *section,
735 ReplayRamDiscard replay_fn,
736 void *opaque);
737
8947d7fc
DH
738void ram_discard_manager_register_listener(RamDiscardManager *rdm,
739 RamDiscardListener *rdl,
740 MemoryRegionSection *section);
741
742void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
743 RamDiscardListener *rdl);
744
baa44bce
CL
745bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
746 ram_addr_t *ram_addr, bool *read_only,
747 bool *mr_has_discard_manager);
748
093bc2cd 749typedef struct CoalescedMemoryRange CoalescedMemoryRange;
3e9d69e7 750typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
093bc2cd 751
08226b44
PB
752/** MemoryRegion:
753 *
754 * A struct representing a memory region.
755 */
093bc2cd 756struct MemoryRegion {
b4fefef9 757 Object parent_obj;
a676854f 758
08226b44 759 /* private: */
a676854f
PB
760
761 /* The following fields should fit in a cache line */
762 bool romd_mode;
763 bool ram;
764 bool subpage;
765 bool readonly; /* For RAM regions */
c26763f8 766 bool nonvolatile;
a676854f
PB
767 bool rom_device;
768 bool flush_coalesced_mmio;
a676854f 769 uint8_t dirty_log_mask;
3df9d748 770 bool is_iommu;
58eaa217 771 RAMBlock *ram_block;
612263cf 772 Object *owner;
a2e1753b
AB
773 /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
774 DeviceState *dev;
a676854f
PB
775
776 const MemoryRegionOps *ops;
093bc2cd 777 void *opaque;
feca4ac1 778 MemoryRegion *container;
5ead6218 779 int mapped_via_alias; /* Mapped via an alias, container might be NULL */
08dafab4 780 Int128 size;
a8170e5e 781 hwaddr addr;
545e92e0 782 void (*destructor)(MemoryRegion *mr);
a2b257d6 783 uint64_t align;
14a3c10a 784 bool terminates;
21e00fa5 785 bool ram_device;
6bba19ba 786 bool enabled;
1660e72d 787 bool warning_printed; /* For reservations */
deb809ed 788 uint8_t vga_logging_count;
093bc2cd 789 MemoryRegion *alias;
a8170e5e 790 hwaddr alias_offset;
d33382da 791 int32_t priority;
b58deb34 792 QTAILQ_HEAD(, MemoryRegion) subregions;
093bc2cd 793 QTAILQ_ENTRY(MemoryRegion) subregions_link;
b58deb34 794 QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
302fa283 795 const char *name;
3e9d69e7
AK
796 unsigned ioeventfd_nb;
797 MemoryRegionIoeventfd *ioeventfds;
8947d7fc 798 RamDiscardManager *rdm; /* Only for RAM */
a2e1753b
AB
799
800 /* For devices designed to perform re-entrant IO into their own IO MRs */
801 bool disable_reentrancy_guard;
3df9d748
AK
802};
803
804struct IOMMUMemoryRegion {
805 MemoryRegion parent_obj;
806
cdb30812 807 QLIST_HEAD(, IOMMUNotifier) iommu_notify;
5bf3d319 808 IOMMUNotifierFlag iommu_notify_flags;
093bc2cd
AK
809};
810
512fa408
PX
811#define IOMMU_NOTIFIER_FOREACH(n, mr) \
812 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
813
c2fc83e8 814/**
301302f0 815 * struct MemoryListener: callbacks structure for updates to the physical memory map
c2fc83e8
PB
816 *
817 * Allows a component to adjust to changes in the guest-visible memory map.
818 * Use with memory_listener_register() and memory_listener_unregister().
819 */
820struct MemoryListener {
5d248213
PB
821 /**
822 * @begin:
823 *
824 * Called at the beginning of an address space update transaction.
825 * Followed by calls to #MemoryListener.region_add(),
826 * #MemoryListener.region_del(), #MemoryListener.region_nop(),
827 * #MemoryListener.log_start() and #MemoryListener.log_stop() in
828 * increasing address order.
829 *
830 * @listener: The #MemoryListener.
831 */
c2fc83e8 832 void (*begin)(MemoryListener *listener);
5d248213
PB
833
834 /**
835 * @commit:
836 *
837 * Called at the end of an address space update transaction,
838 * after the last call to #MemoryListener.region_add(),
839 * #MemoryListener.region_del() or #MemoryListener.region_nop(),
840 * #MemoryListener.log_start() and #MemoryListener.log_stop().
841 *
842 * @listener: The #MemoryListener.
843 */
c2fc83e8 844 void (*commit)(MemoryListener *listener);
5d248213
PB
845
846 /**
847 * @region_add:
848 *
849 * Called during an address space update transaction,
850 * for a section of the address space that is new in this address space
851 * space since the last transaction.
852 *
853 * @listener: The #MemoryListener.
854 * @section: The new #MemoryRegionSection.
855 */
c2fc83e8 856 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
5d248213
PB
857
858 /**
859 * @region_del:
860 *
861 * Called during an address space update transaction,
862 * for a section of the address space that has disappeared in the address
863 * space since the last transaction.
864 *
865 * @listener: The #MemoryListener.
866 * @section: The old #MemoryRegionSection.
867 */
c2fc83e8 868 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
5d248213
PB
869
870 /**
871 * @region_nop:
872 *
873 * Called during an address space update transaction,
874 * for a section of the address space that is in the same place in the address
875 * space as in the last transaction.
876 *
877 * @listener: The #MemoryListener.
878 * @section: The #MemoryRegionSection.
879 */
c2fc83e8 880 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
5d248213
PB
881
882 /**
883 * @log_start:
884 *
885 * Called during an address space update transaction, after
d7878875 886 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
5d248213
PB
887 * #MemoryListener.region_nop(), if dirty memory logging clients have
888 * become active since the last transaction.
889 *
890 * @listener: The #MemoryListener.
891 * @section: The #MemoryRegionSection.
892 * @old: A bitmap of dirty memory logging clients that were active in
893 * the previous transaction.
894 * @new: A bitmap of dirty memory logging clients that are active in
895 * the current transaction.
896 */
b2dfd71c
PB
897 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
898 int old, int new);
5d248213
PB
899
900 /**
901 * @log_stop:
902 *
903 * Called during an address space update transaction, after
904 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
905 * #MemoryListener.region_nop() and possibly after
906 * #MemoryListener.log_start(), if dirty memory logging clients have
907 * become inactive since the last transaction.
908 *
909 * @listener: The #MemoryListener.
910 * @section: The #MemoryRegionSection.
911 * @old: A bitmap of dirty memory logging clients that were active in
912 * the previous transaction.
913 * @new: A bitmap of dirty memory logging clients that are active in
914 * the current transaction.
915 */
b2dfd71c
PB
916 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
917 int old, int new);
5d248213
PB
918
919 /**
920 * @log_sync:
921 *
922 * Called by memory_region_snapshot_and_clear_dirty() and
923 * memory_global_dirty_log_sync(), before accessing QEMU's "official"
924 * copy of the dirty memory bitmap for a #MemoryRegionSection.
925 *
926 * @listener: The #MemoryListener.
927 * @section: The #MemoryRegionSection.
928 */
c2fc83e8 929 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
5d248213 930
b87eaa9b
PX
931 /**
932 * @log_sync_global:
933 *
934 * This is the global version of @log_sync when the listener does
935 * not have a way to synchronize the log with finer granularity.
936 * When the listener registers with @log_sync_global defined, then
937 * its @log_sync must be NULL. Vice versa.
938 *
939 * @listener: The #MemoryListener.
1e493be5
GS
940 * @last_stage: The last stage to synchronize the log during migration.
941 * The caller should gurantee that the synchronization with true for
942 * @last_stage is triggered for once after all VCPUs have been stopped.
b87eaa9b 943 */
1e493be5 944 void (*log_sync_global)(MemoryListener *listener, bool last_stage);
b87eaa9b 945
5d248213
PB
946 /**
947 * @log_clear:
948 *
949 * Called before reading the dirty memory bitmap for a
950 * #MemoryRegionSection.
951 *
952 * @listener: The #MemoryListener.
953 * @section: The #MemoryRegionSection.
954 */
077874e0 955 void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
5d248213
PB
956
957 /**
958 * @log_global_start:
959 *
960 * Called by memory_global_dirty_log_start(), which
961 * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
962 * the address space. #MemoryListener.log_global_start() is also
963 * called when a #MemoryListener is added, if global dirty logging is
964 * active at that time.
965 *
966 * @listener: The #MemoryListener.
967 */
c2fc83e8 968 void (*log_global_start)(MemoryListener *listener);
5d248213
PB
969
970 /**
971 * @log_global_stop:
972 *
973 * Called by memory_global_dirty_log_stop(), which
974 * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
975 * the address space.
976 *
977 * @listener: The #MemoryListener.
978 */
c2fc83e8 979 void (*log_global_stop)(MemoryListener *listener);
5d248213
PB
980
981 /**
982 * @log_global_after_sync:
983 *
984 * Called after reading the dirty memory bitmap
985 * for any #MemoryRegionSection.
986 *
987 * @listener: The #MemoryListener.
988 */
9458a9a1 989 void (*log_global_after_sync)(MemoryListener *listener);
5d248213
PB
990
991 /**
992 * @eventfd_add:
993 *
994 * Called during an address space update transaction,
995 * for a section of the address space that has had a new ioeventfd
996 * registration since the last transaction.
997 *
998 * @listener: The #MemoryListener.
999 * @section: The new #MemoryRegionSection.
1000 * @match_data: The @match_data parameter for the new ioeventfd.
1001 * @data: The @data parameter for the new ioeventfd.
1002 * @e: The #EventNotifier parameter for the new ioeventfd.
1003 */
c2fc83e8
PB
1004 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
1005 bool match_data, uint64_t data, EventNotifier *e);
5d248213
PB
1006
1007 /**
1008 * @eventfd_del:
1009 *
1010 * Called during an address space update transaction,
1011 * for a section of the address space that has dropped an ioeventfd
1012 * registration since the last transaction.
1013 *
1014 * @listener: The #MemoryListener.
1015 * @section: The new #MemoryRegionSection.
1016 * @match_data: The @match_data parameter for the dropped ioeventfd.
1017 * @data: The @data parameter for the dropped ioeventfd.
1018 * @e: The #EventNotifier parameter for the dropped ioeventfd.
1019 */
c2fc83e8
PB
1020 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
1021 bool match_data, uint64_t data, EventNotifier *e);
5d248213
PB
1022
1023 /**
1024 * @coalesced_io_add:
1025 *
1026 * Called during an address space update transaction,
1027 * for a section of the address space that has had a new coalesced
1028 * MMIO range registration since the last transaction.
1029 *
1030 * @listener: The #MemoryListener.
1031 * @section: The new #MemoryRegionSection.
1032 * @addr: The starting address for the coalesced MMIO range.
1033 * @len: The length of the coalesced MMIO range.
1034 */
e6d34aee 1035 void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
c2fc83e8 1036 hwaddr addr, hwaddr len);
5d248213
PB
1037
1038 /**
1039 * @coalesced_io_del:
1040 *
1041 * Called during an address space update transaction,
1042 * for a section of the address space that has dropped a coalesced
1043 * MMIO range since the last transaction.
1044 *
1045 * @listener: The #MemoryListener.
1046 * @section: The new #MemoryRegionSection.
1047 * @addr: The starting address for the coalesced MMIO range.
1048 * @len: The length of the coalesced MMIO range.
1049 */
e6d34aee 1050 void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
c2fc83e8 1051 hwaddr addr, hwaddr len);
5d248213
PB
1052 /**
1053 * @priority:
1054 *
1055 * Govern the order in which memory listeners are invoked. Lower priorities
1056 * are invoked earlier for "add" or "start" callbacks, and later for "delete"
1057 * or "stop" callbacks.
1058 */
c2fc83e8 1059 unsigned priority;
5d248213 1060
142518bd
PX
1061 /**
1062 * @name:
1063 *
1064 * Name of the listener. It can be used in contexts where we'd like to
1065 * identify one memory listener with the rest.
1066 */
1067 const char *name;
1068
5d248213 1069 /* private: */
d45fa784 1070 AddressSpace *address_space;
c2fc83e8 1071 QTAILQ_ENTRY(MemoryListener) link;
9a54635d 1072 QTAILQ_ENTRY(MemoryListener) link_as;
c2fc83e8
PB
1073};
1074
9ad2bbc1 1075/**
301302f0 1076 * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
9ad2bbc1
AK
1077 */
1078struct AddressSpace {
08226b44 1079 /* private: */
374f2981 1080 struct rcu_head rcu;
7dca8043 1081 char *name;
9ad2bbc1 1082 MemoryRegion *root;
374f2981
PB
1083
1084 /* Accessed via RCU. */
9ad2bbc1 1085 struct FlatView *current_map;
374f2981 1086
9ad2bbc1
AK
1087 int ioeventfd_nb;
1088 struct MemoryRegionIoeventfd *ioeventfds;
eae3eb3e 1089 QTAILQ_HEAD(, MemoryListener) listeners;
0d673e36 1090 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
9ad2bbc1
AK
1091};
1092
785a507e
PB
1093typedef struct AddressSpaceDispatch AddressSpaceDispatch;
1094typedef struct FlatRange FlatRange;
1095
1096/* Flattened global view of current active memory hierarchy. Kept in sorted
1097 * order.
1098 */
1099struct FlatView {
1100 struct rcu_head rcu;
1101 unsigned ref;
1102 FlatRange *ranges;
1103 unsigned nr;
1104 unsigned nr_allocated;
1105 struct AddressSpaceDispatch *dispatch;
1106 MemoryRegion *root;
1107};
1108
1109static inline FlatView *address_space_to_flatview(AddressSpace *as)
1110{
d73415a3 1111 return qatomic_rcu_read(&as->current_map);
785a507e
PB
1112}
1113
a5e32ec1
PM
1114/**
1115 * typedef flatview_cb: callback for flatview_for_each_range()
1116 *
1117 * @start: start address of the range within the FlatView
1118 * @len: length of the range in bytes
1119 * @mr: MemoryRegion covering this range
b3566001 1120 * @offset_in_region: offset of the first byte of the range within @mr
a5e32ec1
PM
1121 * @opaque: data pointer passed to flatview_for_each_range()
1122 *
1123 * Returns: true to stop the iteration, false to keep going.
1124 */
d1e8cf77
PM
1125typedef bool (*flatview_cb)(Int128 start,
1126 Int128 len,
a5e32ec1 1127 const MemoryRegion *mr,
b3566001 1128 hwaddr offset_in_region,
a5e32ec1 1129 void *opaque);
fb5ef4ee 1130
a5e32ec1
PM
1131/**
1132 * flatview_for_each_range: Iterate through a FlatView
1133 * @fv: the FlatView to iterate through
1134 * @cb: function to call for each range
1135 * @opaque: opaque data pointer to pass to @cb
1136 *
1137 * A FlatView is made up of a list of non-overlapping ranges, each of
1138 * which is a slice of a MemoryRegion. This function iterates through
1139 * each range in @fv, calling @cb. The callback function can terminate
1140 * iteration early by returning 'true'.
1141 */
1142void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
16620684 1143
9366cf02
DDAG
1144static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
1145 MemoryRegionSection *b)
1146{
1147 return a->mr == b->mr &&
1148 a->fv == b->fv &&
1149 a->offset_within_region == b->offset_within_region &&
1150 a->offset_within_address_space == b->offset_within_address_space &&
1151 int128_eq(a->size, b->size) &&
1152 a->readonly == b->readonly &&
1153 a->nonvolatile == b->nonvolatile;
1154}
1155
22843838
DH
1156/**
1157 * memory_region_section_new_copy: Copy a memory region section
1158 *
1159 * Allocate memory for a new copy, copy the memory region section, and
1160 * properly take a reference on all relevant members.
1161 *
1162 * @s: the #MemoryRegionSection to copy
1163 */
1164MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
1165
1166/**
1167 * memory_region_section_new_copy: Free a copied memory region section
1168 *
1169 * Free a copy of a memory section created via memory_region_section_new_copy().
1170 * properly dropping references on all relevant members.
1171 *
1172 * @s: the #MemoryRegionSection to copy
1173 */
1174void memory_region_section_free_copy(MemoryRegionSection *s);
1175
093bc2cd
AK
1176/**
1177 * memory_region_init: Initialize a memory region
1178 *
69ddaf66 1179 * The region typically acts as a container for other memory regions. Use
093bc2cd
AK
1180 * memory_region_add_subregion() to add subregions.
1181 *
1182 * @mr: the #MemoryRegion to be initialized
2c9b15ca 1183 * @owner: the object that tracks the region's reference count
093bc2cd
AK
1184 * @name: used for debugging; not visible to the user or ABI
1185 * @size: size of the region; any subregions beyond this size will be clipped
1186 */
1187void memory_region_init(MemoryRegion *mr,
d32335e8 1188 Object *owner,
093bc2cd
AK
1189 const char *name,
1190 uint64_t size);
46637be2
PB
1191
1192/**
1193 * memory_region_ref: Add 1 to a memory region's reference count
1194 *
1195 * Whenever memory regions are accessed outside the BQL, they need to be
1196 * preserved against hot-unplug. MemoryRegions actually do not have their
1197 * own reference count; they piggyback on a QOM object, their "owner".
1198 * This function adds a reference to the owner.
1199 *
1200 * All MemoryRegions must have an owner if they can disappear, even if the
1201 * device they belong to operates exclusively under the BQL. This is because
1202 * the region could be returned at any time by memory_region_find, and this
1203 * is usually under guest control.
1204 *
1205 * @mr: the #MemoryRegion
1206 */
1207void memory_region_ref(MemoryRegion *mr);
1208
1209/**
1210 * memory_region_unref: Remove 1 to a memory region's reference count
1211 *
1212 * Whenever memory regions are accessed outside the BQL, they need to be
1213 * preserved against hot-unplug. MemoryRegions actually do not have their
1214 * own reference count; they piggyback on a QOM object, their "owner".
1215 * This function removes a reference to the owner and possibly destroys it.
1216 *
1217 * @mr: the #MemoryRegion
1218 */
1219void memory_region_unref(MemoryRegion *mr);
1220
093bc2cd
AK
1221/**
1222 * memory_region_init_io: Initialize an I/O memory region.
1223 *
69ddaf66 1224 * Accesses into the region will cause the callbacks in @ops to be called.
093bc2cd
AK
1225 * if @size is nonzero, subregions will be clipped to @size.
1226 *
1227 * @mr: the #MemoryRegion to be initialized.
2c9b15ca 1228 * @owner: the object that tracks the region's reference count
093bc2cd
AK
1229 * @ops: a structure containing read and write callbacks to be used when
1230 * I/O is performed on the region.
b6af0975 1231 * @opaque: passed to the read and write callbacks of the @ops structure.
093bc2cd
AK
1232 * @name: used for debugging; not visible to the user or ABI
1233 * @size: size of the region.
1234 */
1235void memory_region_init_io(MemoryRegion *mr,
d32335e8 1236 Object *owner,
093bc2cd
AK
1237 const MemoryRegionOps *ops,
1238 void *opaque,
1239 const char *name,
1240 uint64_t size);
1241
1242/**
1cfe48c1
PM
1243 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
1244 * into the region will modify memory
1245 * directly.
093bc2cd
AK
1246 *
1247 * @mr: the #MemoryRegion to be initialized.
2c9b15ca 1248 * @owner: the object that tracks the region's reference count
e8f5fe2d
DDAG
1249 * @name: Region name, becomes part of RAMBlock name used in migration stream
1250 * must be unique within any device
093bc2cd 1251 * @size: size of the region.
49946538 1252 * @errp: pointer to Error*, to store an error if it happens.
a5c0234b
PM
1253 *
1254 * Note that this function does not do anything to cause the data in the
1255 * RAM memory region to be migrated; that is the responsibility of the caller.
093bc2cd 1256 */
1cfe48c1 1257void memory_region_init_ram_nomigrate(MemoryRegion *mr,
d32335e8 1258 Object *owner,
1cfe48c1
PM
1259 const char *name,
1260 uint64_t size,
1261 Error **errp);
093bc2cd 1262
06329cce 1263/**
7f863cba
DH
1264 * memory_region_init_ram_flags_nomigrate: Initialize RAM memory region.
1265 * Accesses into the region will
1266 * modify memory directly.
06329cce
MA
1267 *
1268 * @mr: the #MemoryRegion to be initialized.
1269 * @owner: the object that tracks the region's reference count
1270 * @name: Region name, becomes part of RAMBlock name used in migration stream
1271 * must be unique within any device
1272 * @size: size of the region.
8dbe22c6 1273 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE.
06329cce
MA
1274 * @errp: pointer to Error*, to store an error if it happens.
1275 *
7f863cba
DH
1276 * Note that this function does not do anything to cause the data in the
1277 * RAM memory region to be migrated; that is the responsibility of the caller.
06329cce 1278 */
7f863cba
DH
1279void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
1280 Object *owner,
1281 const char *name,
1282 uint64_t size,
1283 uint32_t ram_flags,
1284 Error **errp);
06329cce 1285
60786ef3 1286/**
2cb40d44 1287 * memory_region_init_resizeable_ram: Initialize memory region with resizable
60786ef3
MT
1288 * RAM. Accesses into the region will
1289 * modify memory directly. Only an initial
1290 * portion of this RAM is actually used.
c7c0e724
DH
1291 * Changing the size while migrating
1292 * can result in the migration being
1293 * canceled.
60786ef3
MT
1294 *
1295 * @mr: the #MemoryRegion to be initialized.
1296 * @owner: the object that tracks the region's reference count
e8f5fe2d
DDAG
1297 * @name: Region name, becomes part of RAMBlock name used in migration stream
1298 * must be unique within any device
60786ef3
MT
1299 * @size: used size of the region.
1300 * @max_size: max size of the region.
1301 * @resized: callback to notify owner about used size change.
1302 * @errp: pointer to Error*, to store an error if it happens.
a5c0234b
PM
1303 *
1304 * Note that this function does not do anything to cause the data in the
1305 * RAM memory region to be migrated; that is the responsibility of the caller.
60786ef3
MT
1306 */
1307void memory_region_init_resizeable_ram(MemoryRegion *mr,
d32335e8 1308 Object *owner,
60786ef3
MT
1309 const char *name,
1310 uint64_t size,
1311 uint64_t max_size,
1312 void (*resized)(const char*,
1313 uint64_t length,
1314 void *host),
1315 Error **errp);
d5dbde46 1316#ifdef CONFIG_POSIX
cbfc0171 1317
0b183fc8
PB
1318/**
1319 * memory_region_init_ram_from_file: Initialize RAM memory region with a
1320 * mmap-ed backend.
1321 *
1322 * @mr: the #MemoryRegion to be initialized.
1323 * @owner: the object that tracks the region's reference count
e8f5fe2d
DDAG
1324 * @name: Region name, becomes part of RAMBlock name used in migration stream
1325 * must be unique within any device
0b183fc8 1326 * @size: size of the region.
98376843
HZ
1327 * @align: alignment of the region base address; if 0, the default alignment
1328 * (getpagesize()) will be used.
8dbe22c6
DH
1329 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
1330 * RAM_NORESERVE,
0b183fc8 1331 * @path: the path in which to allocate the RAM.
4b870dc4 1332 * @offset: offset within the file referenced by path
369d6dc4 1333 * @readonly: true to open @path for reading, false for read/write.
7f56e740 1334 * @errp: pointer to Error*, to store an error if it happens.
a5c0234b
PM
1335 *
1336 * Note that this function does not do anything to cause the data in the
1337 * RAM memory region to be migrated; that is the responsibility of the caller.
0b183fc8
PB
1338 */
1339void memory_region_init_ram_from_file(MemoryRegion *mr,
d32335e8 1340 Object *owner,
0b183fc8
PB
1341 const char *name,
1342 uint64_t size,
98376843 1343 uint64_t align,
cbfc0171 1344 uint32_t ram_flags,
7f56e740 1345 const char *path,
4b870dc4 1346 ram_addr_t offset,
369d6dc4 1347 bool readonly,
7f56e740 1348 Error **errp);
fea617c5
MAL
1349
1350/**
1351 * memory_region_init_ram_from_fd: Initialize RAM memory region with a
1352 * mmap-ed backend.
1353 *
1354 * @mr: the #MemoryRegion to be initialized.
1355 * @owner: the object that tracks the region's reference count
1356 * @name: the name of the region.
1357 * @size: size of the region.
8dbe22c6 1358 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
56918a12 1359 * RAM_NORESERVE, RAM_PROTECTED.
fea617c5 1360 * @fd: the fd to mmap.
44a4ff31 1361 * @offset: offset within the file referenced by fd
fea617c5 1362 * @errp: pointer to Error*, to store an error if it happens.
a5c0234b
PM
1363 *
1364 * Note that this function does not do anything to cause the data in the
1365 * RAM memory region to be migrated; that is the responsibility of the caller.
fea617c5
MAL
1366 */
1367void memory_region_init_ram_from_fd(MemoryRegion *mr,
d32335e8 1368 Object *owner,
fea617c5
MAL
1369 const char *name,
1370 uint64_t size,
d5015b80 1371 uint32_t ram_flags,
fea617c5 1372 int fd,
44a4ff31 1373 ram_addr_t offset,
fea617c5 1374 Error **errp);
0b183fc8
PB
1375#endif
1376
093bc2cd 1377/**
1a7e8cae
BZ
1378 * memory_region_init_ram_ptr: Initialize RAM memory region from a
1379 * user-provided pointer. Accesses into the
1380 * region will modify memory directly.
093bc2cd
AK
1381 *
1382 * @mr: the #MemoryRegion to be initialized.
2c9b15ca 1383 * @owner: the object that tracks the region's reference count
e8f5fe2d
DDAG
1384 * @name: Region name, becomes part of RAMBlock name used in migration stream
1385 * must be unique within any device
093bc2cd
AK
1386 * @size: size of the region.
1387 * @ptr: memory to be mapped; must contain at least @size bytes.
a5c0234b
PM
1388 *
1389 * Note that this function does not do anything to cause the data in the
1390 * RAM memory region to be migrated; that is the responsibility of the caller.
093bc2cd
AK
1391 */
1392void memory_region_init_ram_ptr(MemoryRegion *mr,
d32335e8 1393 Object *owner,
093bc2cd
AK
1394 const char *name,
1395 uint64_t size,
1396 void *ptr);
1397
21e00fa5
AW
1398/**
1399 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
1400 * a user-provided pointer.
1401 *
1402 * A RAM device represents a mapping to a physical device, such as to a PCI
1403 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
1404 * into the VM address space and access to the region will modify memory
1405 * directly. However, the memory region should not be included in a memory
1406 * dump (device may not be enabled/mapped at the time of the dump), and
1407 * operations incompatible with manipulating MMIO should be avoided. Replaces
1408 * skip_dump flag.
1409 *
1410 * @mr: the #MemoryRegion to be initialized.
1411 * @owner: the object that tracks the region's reference count
1412 * @name: the name of the region.
1413 * @size: size of the region.
1414 * @ptr: memory to be mapped; must contain at least @size bytes.
a5c0234b
PM
1415 *
1416 * Note that this function does not do anything to cause the data in the
1417 * RAM memory region to be migrated; that is the responsibility of the caller.
1418 * (For RAM device memory regions, migrating the contents rarely makes sense.)
21e00fa5
AW
1419 */
1420void memory_region_init_ram_device_ptr(MemoryRegion *mr,
d32335e8 1421 Object *owner,
21e00fa5
AW
1422 const char *name,
1423 uint64_t size,
1424 void *ptr);
1425
093bc2cd
AK
1426/**
1427 * memory_region_init_alias: Initialize a memory region that aliases all or a
1428 * part of another memory region.
1429 *
1430 * @mr: the #MemoryRegion to be initialized.
2c9b15ca 1431 * @owner: the object that tracks the region's reference count
093bc2cd
AK
1432 * @name: used for debugging; not visible to the user or ABI
1433 * @orig: the region to be referenced; @mr will be equivalent to
1434 * @orig between @offset and @offset + @size - 1.
1435 * @offset: start of the section in @orig to be referenced.
1436 * @size: size of the region.
1437 */
1438void memory_region_init_alias(MemoryRegion *mr,
d32335e8 1439 Object *owner,
093bc2cd
AK
1440 const char *name,
1441 MemoryRegion *orig,
a8170e5e 1442 hwaddr offset,
093bc2cd 1443 uint64_t size);
d0a9b5bc 1444
a1777f7f 1445/**
b59821a9 1446 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
a1777f7f 1447 *
b59821a9 1448 * This has the same effect as calling memory_region_init_ram_nomigrate()
a1777f7f
PM
1449 * and then marking the resulting region read-only with
1450 * memory_region_set_readonly().
1451 *
b59821a9
PM
1452 * Note that this function does not do anything to cause the data in the
1453 * RAM side of the memory region to be migrated; that is the responsibility
1454 * of the caller.
1455 *
a1777f7f
PM
1456 * @mr: the #MemoryRegion to be initialized.
1457 * @owner: the object that tracks the region's reference count
e8f5fe2d
DDAG
1458 * @name: Region name, becomes part of RAMBlock name used in migration stream
1459 * must be unique within any device
a1777f7f
PM
1460 * @size: size of the region.
1461 * @errp: pointer to Error*, to store an error if it happens.
1462 */
b59821a9 1463void memory_region_init_rom_nomigrate(MemoryRegion *mr,
d32335e8 1464 Object *owner,
b59821a9
PM
1465 const char *name,
1466 uint64_t size,
1467 Error **errp);
a1777f7f 1468
d0a9b5bc 1469/**
b59821a9
PM
1470 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
1471 * Writes are handled via callbacks.
1472 *
1473 * Note that this function does not do anything to cause the data in the
1474 * RAM side of the memory region to be migrated; that is the responsibility
1475 * of the caller.
d0a9b5bc
AK
1476 *
1477 * @mr: the #MemoryRegion to be initialized.
2c9b15ca 1478 * @owner: the object that tracks the region's reference count
39e0b03d 1479 * @ops: callbacks for write access handling (must not be NULL).
57914ecb 1480 * @opaque: passed to the read and write callbacks of the @ops structure.
e8f5fe2d
DDAG
1481 * @name: Region name, becomes part of RAMBlock name used in migration stream
1482 * must be unique within any device
d0a9b5bc 1483 * @size: size of the region.
33e0eb52 1484 * @errp: pointer to Error*, to store an error if it happens.
d0a9b5bc 1485 */
b59821a9 1486void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
d32335e8 1487 Object *owner,
b59821a9
PM
1488 const MemoryRegionOps *ops,
1489 void *opaque,
1490 const char *name,
1491 uint64_t size,
1492 Error **errp);
d0a9b5bc 1493
30951157 1494/**
1221a474
AK
1495 * memory_region_init_iommu: Initialize a memory region of a custom type
1496 * that translates addresses
30951157
AK
1497 *
1498 * An IOMMU region translates addresses and forwards accesses to a target
1499 * memory region.
1500 *
2ce931d0
PM
1501 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
1502 * @_iommu_mr should be a pointer to enough memory for an instance of
1503 * that subclass, @instance_size is the size of that subclass, and
1504 * @mrtypename is its name. This function will initialize @_iommu_mr as an
1505 * instance of the subclass, and its methods will then be called to handle
1506 * accesses to the memory region. See the documentation of
1507 * #IOMMUMemoryRegionClass for further details.
1508 *
1221a474
AK
1509 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
1510 * @instance_size: the IOMMUMemoryRegion subclass instance size
57914ecb 1511 * @mrtypename: the type name of the #IOMMUMemoryRegion
2c9b15ca 1512 * @owner: the object that tracks the region's reference count
30951157
AK
1513 * @name: used for debugging; not visible to the user or ABI
1514 * @size: size of the region.
1515 */
1221a474
AK
1516void memory_region_init_iommu(void *_iommu_mr,
1517 size_t instance_size,
1518 const char *mrtypename,
1519 Object *owner,
30951157
AK
1520 const char *name,
1521 uint64_t size);
1522
b08199c6
PM
1523/**
1524 * memory_region_init_ram - Initialize RAM memory region. Accesses into the
1525 * region will modify memory directly.
1526 *
1527 * @mr: the #MemoryRegion to be initialized
1528 * @owner: the object that tracks the region's reference count (must be
1529 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
1530 * @name: name of the memory region
1531 * @size: size of the region in bytes
1532 * @errp: pointer to Error*, to store an error if it happens.
1533 *
1534 * This function allocates RAM for a board model or device, and
1535 * arranges for it to be migrated (by calling vmstate_register_ram()
1536 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1537 * @owner is NULL).
1538 *
1539 * TODO: Currently we restrict @owner to being either NULL (for
1540 * global RAM regions with no owner) or devices, so that we can
1541 * give the RAM block a unique name for migration purposes.
1542 * We should lift this restriction and allow arbitrary Objects.
1543 * If you pass a non-NULL non-device @owner then we will assert.
1544 */
1545void memory_region_init_ram(MemoryRegion *mr,
d32335e8 1546 Object *owner,
b08199c6
PM
1547 const char *name,
1548 uint64_t size,
1549 Error **errp);
1550
1551/**
1552 * memory_region_init_rom: Initialize a ROM memory region.
1553 *
1554 * This has the same effect as calling memory_region_init_ram()
1555 * and then marking the resulting region read-only with
1556 * memory_region_set_readonly(). This includes arranging for the
1557 * contents to be migrated.
1558 *
1559 * TODO: Currently we restrict @owner to being either NULL (for
1560 * global RAM regions with no owner) or devices, so that we can
1561 * give the RAM block a unique name for migration purposes.
1562 * We should lift this restriction and allow arbitrary Objects.
1563 * If you pass a non-NULL non-device @owner then we will assert.
1564 *
1565 * @mr: the #MemoryRegion to be initialized.
1566 * @owner: the object that tracks the region's reference count
1567 * @name: Region name, becomes part of RAMBlock name used in migration stream
1568 * must be unique within any device
1569 * @size: size of the region.
1570 * @errp: pointer to Error*, to store an error if it happens.
1571 */
1572void memory_region_init_rom(MemoryRegion *mr,
d32335e8 1573 Object *owner,
b08199c6
PM
1574 const char *name,
1575 uint64_t size,
1576 Error **errp);
1577
1578/**
1579 * memory_region_init_rom_device: Initialize a ROM memory region.
1580 * Writes are handled via callbacks.
1581 *
1582 * This function initializes a memory region backed by RAM for reads
1583 * and callbacks for writes, and arranges for the RAM backing to
1584 * be migrated (by calling vmstate_register_ram()
1585 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1586 * @owner is NULL).
1587 *
1588 * TODO: Currently we restrict @owner to being either NULL (for
1589 * global RAM regions with no owner) or devices, so that we can
1590 * give the RAM block a unique name for migration purposes.
1591 * We should lift this restriction and allow arbitrary Objects.
1592 * If you pass a non-NULL non-device @owner then we will assert.
1593 *
1594 * @mr: the #MemoryRegion to be initialized.
1595 * @owner: the object that tracks the region's reference count
1596 * @ops: callbacks for write access handling (must not be NULL).
5d248213 1597 * @opaque: passed to the read and write callbacks of the @ops structure.
b08199c6
PM
1598 * @name: Region name, becomes part of RAMBlock name used in migration stream
1599 * must be unique within any device
1600 * @size: size of the region.
1601 * @errp: pointer to Error*, to store an error if it happens.
1602 */
1603void memory_region_init_rom_device(MemoryRegion *mr,
d32335e8 1604 Object *owner,
b08199c6
PM
1605 const MemoryRegionOps *ops,
1606 void *opaque,
1607 const char *name,
1608 uint64_t size,
1609 Error **errp);
1610
1611
803c0816
PB
1612/**
1613 * memory_region_owner: get a memory region's owner.
1614 *
1615 * @mr: the memory region being queried.
1616 */
d32335e8 1617Object *memory_region_owner(MemoryRegion *mr);
803c0816 1618
093bc2cd
AK
1619/**
1620 * memory_region_size: get a memory region's size.
1621 *
1622 * @mr: the memory region being queried.
1623 */
1624uint64_t memory_region_size(MemoryRegion *mr);
1625
8ea9252a
AK
1626/**
1627 * memory_region_is_ram: check whether a memory region is random access
1628 *
847b31f0 1629 * Returns %true if a memory region is random access.
8ea9252a
AK
1630 *
1631 * @mr: the memory region being queried
1632 */
1619d1fe
PB
1633static inline bool memory_region_is_ram(MemoryRegion *mr)
1634{
1635 return mr->ram;
1636}
8ea9252a 1637
e4dc3f59 1638/**
21e00fa5 1639 * memory_region_is_ram_device: check whether a memory region is a ram device
e4dc3f59 1640 *
847b31f0 1641 * Returns %true if a memory region is a device backed ram region
e4dc3f59
ND
1642 *
1643 * @mr: the memory region being queried
1644 */
21e00fa5 1645bool memory_region_is_ram_device(MemoryRegion *mr);
e4dc3f59 1646
fd062573 1647/**
5f9a5ea1 1648 * memory_region_is_romd: check whether a memory region is in ROMD mode
fd062573 1649 *
5f9a5ea1 1650 * Returns %true if a memory region is a ROM device and currently set to allow
fd062573
BS
1651 * direct reads.
1652 *
1653 * @mr: the memory region being queried
1654 */
1655static inline bool memory_region_is_romd(MemoryRegion *mr)
1656{
5f9a5ea1 1657 return mr->rom_device && mr->romd_mode;
fd062573
BS
1658}
1659
56918a12
SC
1660/**
1661 * memory_region_is_protected: check whether a memory region is protected
1662 *
1663 * Returns %true if a memory region is protected RAM and cannot be accessed
1664 * via standard mechanisms, e.g. DMA.
1665 *
1666 * @mr: the memory region being queried
1667 */
1668bool memory_region_is_protected(MemoryRegion *mr);
1669
30951157 1670/**
3df9d748 1671 * memory_region_get_iommu: check whether a memory region is an iommu
30951157 1672 *
3df9d748
AK
1673 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
1674 * otherwise NULL.
30951157
AK
1675 *
1676 * @mr: the memory region being queried
1677 */
3df9d748 1678static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
1619d1fe 1679{
12d37882 1680 if (mr->alias) {
3df9d748
AK
1681 return memory_region_get_iommu(mr->alias);
1682 }
1683 if (mr->is_iommu) {
1684 return (IOMMUMemoryRegion *) mr;
12d37882 1685 }
3df9d748 1686 return NULL;
1619d1fe
PB
1687}
1688
1221a474
AK
1689/**
1690 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
1691 * if an iommu or NULL if not
1692 *
57914ecb
JZ
1693 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
1694 * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
1221a474 1695 *
5d248213 1696 * @iommu_mr: the memory region being queried
1221a474
AK
1697 */
1698static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
1699 IOMMUMemoryRegion *iommu_mr)
1700{
1701 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
1702}
1703
3df9d748 1704#define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
30951157 1705
f682e9c2
AK
1706/**
1707 * memory_region_iommu_get_min_page_size: get minimum supported page size
1708 * for an iommu
1709 *
1710 * Returns minimum supported page size for an iommu.
1711 *
3df9d748 1712 * @iommu_mr: the memory region being queried
f682e9c2 1713 */
3df9d748 1714uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
f682e9c2 1715
06866575
DG
1716/**
1717 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1718 *
cdb30812
PX
1719 * Note: for any IOMMU implementation, an in-place mapping change
1720 * should be notified with an UNMAP followed by a MAP.
1721 *
3df9d748 1722 * @iommu_mr: the memory region that was changed
cb1efcf4 1723 * @iommu_idx: the IOMMU index for the translation table which has changed
5039caf3
EP
1724 * @event: TLB event with the new entry in the IOMMU translation table.
1725 * The entry replaces all old entries for the same virtual I/O address
1726 * range.
06866575 1727 */
3df9d748 1728void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
cb1efcf4 1729 int iommu_idx,
5039caf3 1730 IOMMUTLBEvent event);
06866575 1731
bd2bfa4c 1732/**
3b5ebf85 1733 * memory_region_notify_iommu_one: notify a change in an IOMMU translation
bd2bfa4c
PX
1734 * entry to a single notifier
1735 *
1736 * This works just like memory_region_notify_iommu(), but it only
1737 * notifies a specific notifier, not all of them.
1738 *
1739 * @notifier: the notifier to be notified
5039caf3
EP
1740 * @event: TLB event with the new entry in the IOMMU translation table.
1741 * The entry replaces all old entries for the same virtual I/O address
1742 * range.
bd2bfa4c 1743 */
3b5ebf85 1744void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
5039caf3 1745 IOMMUTLBEvent *event);
bd2bfa4c 1746
7caebbf9
JW
1747/**
1748 * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
1749 * translation that covers the
1750 * range of a notifier
1751 *
1752 * @notifier: the notifier to be notified
1753 */
afa55c6e 1754void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
7caebbf9
JW
1755
1756
06866575
DG
1757/**
1758 * memory_region_register_iommu_notifier: register a notifier for changes to
1759 * IOMMU translation entries.
1760 *
549d4005
EA
1761 * Returns 0 on success, or a negative errno otherwise. In particular,
1762 * -EINVAL indicates that at least one of the attributes of the notifier
1763 * is not supported (flag/range) by the IOMMU memory region. In case of error
1764 * the error object must be created.
1765 *
06866575 1766 * @mr: the memory region to observe
cdb30812
PX
1767 * @n: the IOMMUNotifier to be added; the notify callback receives a
1768 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1769 * ceases to be valid on exit from the notifier.
5d248213 1770 * @errp: pointer to Error*, to store an error if it happens.
06866575 1771 */
549d4005
EA
1772int memory_region_register_iommu_notifier(MemoryRegion *mr,
1773 IOMMUNotifier *n, Error **errp);
06866575 1774
a788f227
DG
1775/**
1776 * memory_region_iommu_replay: replay existing IOMMU translations to
f682e9c2
AK
1777 * a notifier with the minimum page granularity returned by
1778 * mr->iommu_ops->get_page_size().
a788f227 1779 *
2ce931d0
PM
1780 * Note: this is not related to record-and-replay functionality.
1781 *
3df9d748 1782 * @iommu_mr: the memory region to observe
a788f227 1783 * @n: the notifier to which to replay iommu mappings
a788f227 1784 */
3df9d748 1785void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
a788f227 1786
06866575
DG
1787/**
1788 * memory_region_unregister_iommu_notifier: unregister a notifier for
1789 * changes to IOMMU translation entries.
1790 *
d22d8956
AK
1791 * @mr: the memory region which was observed and for which notity_stopped()
1792 * needs to be called
06866575
DG
1793 * @n: the notifier to be removed.
1794 */
cdb30812
PX
1795void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1796 IOMMUNotifier *n);
06866575 1797
f1334de6
AK
1798/**
1799 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1800 * defined on the IOMMU.
1801 *
2ce931d0
PM
1802 * Returns 0 on success, or a negative errno otherwise. In particular,
1803 * -EINVAL indicates that the IOMMU does not support the requested
1804 * attribute.
f1334de6
AK
1805 *
1806 * @iommu_mr: the memory region
1807 * @attr: the requested attribute
1808 * @data: a pointer to the requested attribute data
1809 */
1810int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1811 enum IOMMUMemoryRegionAttr attr,
1812 void *data);
1813
21f40209
PM
1814/**
1815 * memory_region_iommu_attrs_to_index: return the IOMMU index to
1816 * use for translations with the given memory transaction attributes.
1817 *
1818 * @iommu_mr: the memory region
1819 * @attrs: the memory transaction attributes
1820 */
1821int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1822 MemTxAttrs attrs);
1823
1824/**
1825 * memory_region_iommu_num_indexes: return the total number of IOMMU
1826 * indexes that this IOMMU supports.
1827 *
1828 * @iommu_mr: the memory region
1829 */
1830int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
1831
457f8cbb
BB
1832/**
1833 * memory_region_iommu_set_page_size_mask: set the supported page
1834 * sizes for a given IOMMU memory region
1835 *
1836 * @iommu_mr: IOMMU memory region
1837 * @page_size_mask: supported page size mask
1838 * @errp: pointer to Error*, to store an error if it happens.
1839 */
1840int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
1841 uint64_t page_size_mask,
1842 Error **errp);
1843
8991c79b
AK
1844/**
1845 * memory_region_name: get a memory region's name
1846 *
1847 * Returns the string that was used to initialize the memory region.
1848 *
1849 * @mr: the memory region being queried
1850 */
5d546d4b 1851const char *memory_region_name(const MemoryRegion *mr);
8991c79b 1852
55043ba3
AK
1853/**
1854 * memory_region_is_logging: return whether a memory region is logging writes
1855 *
2d1a35be 1856 * Returns %true if the memory region is logging writes for the given client
55043ba3
AK
1857 *
1858 * @mr: the memory region being queried
2d1a35be 1859 * @client: the client being queried
55043ba3 1860 */
2d1a35be
PB
1861bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
1862
1863/**
1864 * memory_region_get_dirty_log_mask: return the clients for which a
1865 * memory region is logging writes.
1866 *
677e7805
PB
1867 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1868 * are the bit indices.
2d1a35be
PB
1869 *
1870 * @mr: the memory region being queried
1871 */
1872uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
55043ba3 1873
ce7923da
AK
1874/**
1875 * memory_region_is_rom: check whether a memory region is ROM
1876 *
847b31f0 1877 * Returns %true if a memory region is read-only memory.
ce7923da
AK
1878 *
1879 * @mr: the memory region being queried
1880 */
1619d1fe
PB
1881static inline bool memory_region_is_rom(MemoryRegion *mr)
1882{
1883 return mr->ram && mr->readonly;
1884}
1885
c26763f8
MAL
1886/**
1887 * memory_region_is_nonvolatile: check whether a memory region is non-volatile
1888 *
1889 * Returns %true is a memory region is non-volatile memory.
1890 *
1891 * @mr: the memory region being queried
1892 */
1893static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
1894{
1895 return mr->nonvolatile;
1896}
ce7923da 1897
a35ba7be
PB
1898/**
1899 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
1900 *
1901 * Returns a file descriptor backing a file-based RAM memory region,
1902 * or -1 if the region is not a file-based RAM memory region.
1903 *
1904 * @mr: the RAM or alias memory region being queried.
1905 */
1906int memory_region_get_fd(MemoryRegion *mr);
1907
07bdaa41
PB
1908/**
1909 * memory_region_from_host: Convert a pointer into a RAM memory region
1910 * and an offset within it.
1911 *
1912 * Given a host pointer inside a RAM memory region (created with
1913 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
1914 * the MemoryRegion and the offset within it.
1915 *
1916 * Use with care; by the time this function returns, the returned pointer is
1917 * not protected by RCU anymore. If the caller is not within an RCU critical
1918 * section and does not hold the iothread lock, it must have other means of
1919 * protecting the pointer, such as a reference to the region that includes
1920 * the incoming ram_addr_t.
1921 *
57914ecb
JZ
1922 * @ptr: the host pointer to be converted
1923 * @offset: the offset within memory region
07bdaa41
PB
1924 */
1925MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
1926
093bc2cd
AK
1927/**
1928 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1929 *
1930 * Returns a host pointer to a RAM memory region (created with
49b24afc
PB
1931 * memory_region_init_ram() or memory_region_init_ram_ptr()).
1932 *
1933 * Use with care; by the time this function returns, the returned pointer is
1934 * not protected by RCU anymore. If the caller is not within an RCU critical
1935 * section and does not hold the iothread lock, it must have other means of
1936 * protecting the pointer, such as a reference to the region that includes
1937 * the incoming ram_addr_t.
093bc2cd
AK
1938 *
1939 * @mr: the memory region being queried.
1940 */
1941void *memory_region_get_ram_ptr(MemoryRegion *mr);
1942
37d7c084
PB
1943/* memory_region_ram_resize: Resize a RAM region.
1944 *
c7c0e724
DH
1945 * Resizing RAM while migrating can result in the migration being canceled.
1946 * Care has to be taken if the guest might have already detected the memory.
37d7c084
PB
1947 *
1948 * @mr: a memory region created with @memory_region_init_resizeable_ram.
1949 * @newsize: the new size the region
1950 * @errp: pointer to Error*, to store an error if it happens.
1951 */
1952void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
1953 Error **errp);
9ecc996a
PMD
1954
1955/**
1956 * memory_region_msync: Synchronize selected address range of
1957 * a memory mapped region
1958 *
1959 * @mr: the memory region to be msync
1960 * @addr: the initial address of the range to be sync
1961 * @size: the size of the range to be sync
1962 */
1963void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
1964
61c490e2 1965/**
9ecc996a 1966 * memory_region_writeback: Trigger cache writeback for
5d248213 1967 * selected address range
61c490e2 1968 *
5d248213
PB
1969 * @mr: the memory region to be updated
1970 * @addr: the initial address of the range to be written back
1971 * @size: the size of the range to be written back
61c490e2 1972 */
4dfe59d1 1973void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
37d7c084 1974
093bc2cd
AK
1975/**
1976 * memory_region_set_log: Turn dirty logging on or off for a region.
1977 *
1978 * Turns dirty logging on or off for a specified client (display, migration).
1979 * Only meaningful for RAM regions.
1980 *
1981 * @mr: the memory region being updated.
1982 * @log: whether dirty logging is to be enabled or disabled.
dbddac6d 1983 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
093bc2cd
AK
1984 */
1985void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
1986
093bc2cd 1987/**
fd4aa979 1988 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
093bc2cd 1989 *
fd4aa979
BS
1990 * Marks a range of bytes as dirty, after it has been dirtied outside
1991 * guest code.
093bc2cd 1992 *
fd4aa979 1993 * @mr: the memory region being dirtied.
093bc2cd 1994 * @addr: the address (relative to the start of the region) being dirtied.
fd4aa979 1995 * @size: size of the range being dirtied.
093bc2cd 1996 */
a8170e5e
AK
1997void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1998 hwaddr size);
093bc2cd 1999
077874e0
PX
2000/**
2001 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
2002 *
2003 * This function is called when the caller wants to clear the remote
2004 * dirty bitmap of a memory range within the memory region. This can
2005 * be used by e.g. KVM to manually clear dirty log when
2006 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
2007 * kernel.
2008 *
2009 * @mr: the memory region to clear the dirty log upon
2010 * @start: start address offset within the memory region
2011 * @len: length of the memory region to clear dirty bitmap
2012 */
2013void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2014 hwaddr len);
2015
8deaf12c
GH
2016/**
2017 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
2018 * bitmap and clear it.
2019 *
2020 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
2021 * returns the snapshot. The snapshot can then be used to query dirty
77302fb5
PB
2022 * status, using memory_region_snapshot_get_dirty. Snapshotting allows
2023 * querying the same page multiple times, which is especially useful for
2024 * display updates where the scanlines often are not page aligned.
8deaf12c 2025 *
1e458f11 2026 * The dirty bitmap region which gets copied into the snapshot (and
8deaf12c
GH
2027 * cleared afterwards) can be larger than requested. The boundaries
2028 * are rounded up/down so complete bitmap longs (covering 64 pages on
2029 * 64bit hosts) can be copied over into the bitmap snapshot. Which
2030 * isn't a problem for display updates as the extra pages are outside
2031 * the visible area, and in case the visible area changes a full
2032 * display redraw is due anyway. Should other use cases for this
2033 * function emerge we might have to revisit this implementation
2034 * detail.
2035 *
2036 * Use g_free to release DirtyBitmapSnapshot.
2037 *
2038 * @mr: the memory region being queried.
2039 * @addr: the address (relative to the start of the region) being queried.
2040 * @size: the size of the range being queried.
2041 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
2042 */
2043DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2044 hwaddr addr,
2045 hwaddr size,
2046 unsigned client);
2047
2048/**
2049 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
2050 * in the specified dirty bitmap snapshot.
2051 *
2052 * @mr: the memory region being queried.
2053 * @snap: the dirty bitmap snapshot
2054 * @addr: the address (relative to the start of the region) being queried.
2055 * @size: the size of the range being queried.
2056 */
2057bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
2058 DirtyBitmapSnapshot *snap,
2059 hwaddr addr, hwaddr size);
2060
093bc2cd
AK
2061/**
2062 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
2063 * client.
2064 *
2065 * Marks a range of pages as no longer dirty.
2066 *
2067 * @mr: the region being updated.
2068 * @addr: the start of the subrange being cleaned.
2069 * @size: the size of the subrange being cleaned.
2070 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
2071 * %DIRTY_MEMORY_VGA.
2072 */
a8170e5e
AK
2073void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2074 hwaddr size, unsigned client);
093bc2cd 2075
047be4ed
SH
2076/**
2077 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
2078 * TBs (for self-modifying code).
2079 *
2080 * The MemoryRegionOps->write() callback of a ROM device must use this function
2081 * to mark byte ranges that have been modified internally, such as by directly
2082 * accessing the memory returned by memory_region_get_ram_ptr().
2083 *
2084 * This function marks the range dirty and invalidates TBs so that TCG can
2085 * detect self-modifying code.
2086 *
2087 * @mr: the region being flushed.
2088 * @addr: the start, relative to the start of the region, of the range being
2089 * flushed.
2090 * @size: the size, in bytes, of the range being flushed.
2091 */
2092void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
2093
093bc2cd
AK
2094/**
2095 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
2096 *
2097 * Allows a memory region to be marked as read-only (turning it into a ROM).
2098 * only useful on RAM regions.
2099 *
2100 * @mr: the region being updated.
2101 * @readonly: whether rhe region is to be ROM or RAM.
2102 */
2103void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
2104
c26763f8
MAL
2105/**
2106 * memory_region_set_nonvolatile: Turn a memory region non-volatile
2107 *
2108 * Allows a memory region to be marked as non-volatile.
2109 * only useful on RAM regions.
2110 *
2111 * @mr: the region being updated.
2112 * @nonvolatile: whether rhe region is to be non-volatile.
2113 */
2114void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
2115
d0a9b5bc 2116/**
5f9a5ea1 2117 * memory_region_rom_device_set_romd: enable/disable ROMD mode
d0a9b5bc
AK
2118 *
2119 * Allows a ROM device (initialized with memory_region_init_rom_device() to
5f9a5ea1
JK
2120 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
2121 * device is mapped to guest memory and satisfies read access directly.
2122 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
2123 * Writes are always handled by the #MemoryRegion.write function.
d0a9b5bc
AK
2124 *
2125 * @mr: the memory region to be updated
5f9a5ea1 2126 * @romd_mode: %true to put the region into ROMD mode
d0a9b5bc 2127 */
5f9a5ea1 2128void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
d0a9b5bc 2129
093bc2cd
AK
2130/**
2131 * memory_region_set_coalescing: Enable memory coalescing for the region.
2132 *
2133 * Enabled writes to a region to be queued for later processing. MMIO ->write
2134 * callbacks may be delayed until a non-coalesced MMIO is issued.
2135 * Only useful for IO regions. Roughly similar to write-combining hardware.
2136 *
2137 * @mr: the memory region to be write coalesced
2138 */
2139void memory_region_set_coalescing(MemoryRegion *mr);
2140
2141/**
2142 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
2143 * a region.
2144 *
2145 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
2146 * Multiple calls can be issued coalesced disjoint ranges.
2147 *
2148 * @mr: the memory region to be updated.
2149 * @offset: the start of the range within the region to be coalesced.
2150 * @size: the size of the subrange to be coalesced.
2151 */
2152void memory_region_add_coalescing(MemoryRegion *mr,
a8170e5e 2153 hwaddr offset,
093bc2cd
AK
2154 uint64_t size);
2155
2156/**
2157 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
2158 *
2159 * Disables any coalescing caused by memory_region_set_coalescing() or
2160 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
2161 * hardware.
2162 *
2163 * @mr: the memory region to be updated.
2164 */
2165void memory_region_clear_coalescing(MemoryRegion *mr);
2166
d410515e
JK
2167/**
2168 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
2169 * accesses.
2170 *
2171 * Ensure that pending coalesced MMIO request are flushed before the memory
2172 * region is accessed. This property is automatically enabled for all regions
2173 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
2174 *
2175 * @mr: the memory region to be updated.
2176 */
2177void memory_region_set_flush_coalesced(MemoryRegion *mr);
2178
2179/**
2180 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
2181 * accesses.
2182 *
2183 * Clear the automatic coalesced MMIO flushing enabled via
2184 * memory_region_set_flush_coalesced. Note that this service has no effect on
2185 * memory regions that have MMIO coalescing enabled for themselves. For them,
2186 * automatic flushing will stop once coalescing is disabled.
2187 *
2188 * @mr: the memory region to be updated.
2189 */
2190void memory_region_clear_flush_coalesced(MemoryRegion *mr);
2191
3e9d69e7
AK
2192/**
2193 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
2194 * is written to a location.
2195 *
2196 * Marks a word in an IO region (initialized with memory_region_init_io())
2197 * as a trigger for an eventfd event. The I/O callback will not be called.
69ddaf66 2198 * The caller must be prepared to handle failure (that is, take the required
3e9d69e7
AK
2199 * action if the callback _is_ called).
2200 *
2201 * @mr: the memory region being updated.
2202 * @addr: the address within @mr that is to be monitored
2203 * @size: the size of the access to trigger the eventfd
2204 * @match_data: whether to match against @data, instead of just @addr
2205 * @data: the data to match against the guest write
57914ecb 2206 * @e: event notifier to be triggered when @addr, @size, and @data all match.
3e9d69e7
AK
2207 **/
2208void memory_region_add_eventfd(MemoryRegion *mr,
a8170e5e 2209 hwaddr addr,
3e9d69e7
AK
2210 unsigned size,
2211 bool match_data,
2212 uint64_t data,
753d5e14 2213 EventNotifier *e);
3e9d69e7
AK
2214
2215/**
69ddaf66 2216 * memory_region_del_eventfd: Cancel an eventfd.
3e9d69e7 2217 *
69ddaf66
ASRJ
2218 * Cancels an eventfd trigger requested by a previous
2219 * memory_region_add_eventfd() call.
3e9d69e7
AK
2220 *
2221 * @mr: the memory region being updated.
2222 * @addr: the address within @mr that is to be monitored
2223 * @size: the size of the access to trigger the eventfd
2224 * @match_data: whether to match against @data, instead of just @addr
2225 * @data: the data to match against the guest write
57914ecb 2226 * @e: event notifier to be triggered when @addr, @size, and @data all match.
3e9d69e7
AK
2227 */
2228void memory_region_del_eventfd(MemoryRegion *mr,
a8170e5e 2229 hwaddr addr,
3e9d69e7
AK
2230 unsigned size,
2231 bool match_data,
2232 uint64_t data,
753d5e14
PB
2233 EventNotifier *e);
2234
093bc2cd 2235/**
69ddaf66 2236 * memory_region_add_subregion: Add a subregion to a container.
093bc2cd 2237 *
69ddaf66 2238 * Adds a subregion at @offset. The subregion may not overlap with other
093bc2cd
AK
2239 * subregions (except for those explicitly marked as overlapping). A region
2240 * may only be added once as a subregion (unless removed with
2241 * memory_region_del_subregion()); use memory_region_init_alias() if you
2242 * want a region to be a subregion in multiple locations.
2243 *
2244 * @mr: the region to contain the new subregion; must be a container
2245 * initialized with memory_region_init().
2246 * @offset: the offset relative to @mr where @subregion is added.
2247 * @subregion: the subregion to be added.
2248 */
2249void memory_region_add_subregion(MemoryRegion *mr,
a8170e5e 2250 hwaddr offset,
093bc2cd
AK
2251 MemoryRegion *subregion);
2252/**
1a7e8cae
BZ
2253 * memory_region_add_subregion_overlap: Add a subregion to a container
2254 * with overlap.
093bc2cd 2255 *
69ddaf66 2256 * Adds a subregion at @offset. The subregion may overlap with other
093bc2cd
AK
2257 * subregions. Conflicts are resolved by having a higher @priority hide a
2258 * lower @priority. Subregions without priority are taken as @priority 0.
2259 * A region may only be added once as a subregion (unless removed with
2260 * memory_region_del_subregion()); use memory_region_init_alias() if you
2261 * want a region to be a subregion in multiple locations.
2262 *
2263 * @mr: the region to contain the new subregion; must be a container
2264 * initialized with memory_region_init().
2265 * @offset: the offset relative to @mr where @subregion is added.
2266 * @subregion: the subregion to be added.
2267 * @priority: used for resolving overlaps; highest priority wins.
2268 */
2269void memory_region_add_subregion_overlap(MemoryRegion *mr,
a8170e5e 2270 hwaddr offset,
093bc2cd 2271 MemoryRegion *subregion,
a1ff8ae0 2272 int priority);
e34911c4
AK
2273
2274/**
2275 * memory_region_get_ram_addr: Get the ram address associated with a memory
2276 * region
5d248213
PB
2277 *
2278 * @mr: the region to be queried
e34911c4 2279 */
7ebb2745 2280ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
e34911c4 2281
a2b257d6 2282uint64_t memory_region_get_alignment(const MemoryRegion *mr);
093bc2cd
AK
2283/**
2284 * memory_region_del_subregion: Remove a subregion.
2285 *
2286 * Removes a subregion from its container.
2287 *
2288 * @mr: the container to be updated.
2289 * @subregion: the region being removed; must be a current subregion of @mr.
2290 */
2291void memory_region_del_subregion(MemoryRegion *mr,
2292 MemoryRegion *subregion);
2293
6bba19ba
AK
2294/*
2295 * memory_region_set_enabled: dynamically enable or disable a region
2296 *
2297 * Enables or disables a memory region. A disabled memory region
2298 * ignores all accesses to itself and its subregions. It does not
2299 * obscure sibling subregions with lower priority - it simply behaves as
2300 * if it was removed from the hierarchy.
2301 *
2302 * Regions default to being enabled.
2303 *
2304 * @mr: the region to be updated
2305 * @enabled: whether to enable or disable the region
2306 */
2307void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
2308
2282e1af
AK
2309/*
2310 * memory_region_set_address: dynamically update the address of a region
2311 *
feca4ac1 2312 * Dynamically updates the address of a region, relative to its container.
2282e1af
AK
2313 * May be used on regions are currently part of a memory hierarchy.
2314 *
2315 * @mr: the region to be updated
feca4ac1 2316 * @addr: new address, relative to container region
2282e1af 2317 */
a8170e5e 2318void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
2282e1af 2319
e7af4c67
MT
2320/*
2321 * memory_region_set_size: dynamically update the size of a region.
2322 *
2323 * Dynamically updates the size of a region.
2324 *
2325 * @mr: the region to be updated
2326 * @size: used size of the region.
2327 */
2328void memory_region_set_size(MemoryRegion *mr, uint64_t size);
2329
4703359e
AK
2330/*
2331 * memory_region_set_alias_offset: dynamically update a memory alias's offset
2332 *
2333 * Dynamically updates the offset into the target region that an alias points
2334 * to, as if the fourth argument to memory_region_init_alias() has changed.
2335 *
2336 * @mr: the #MemoryRegion to be updated; should be an alias.
2337 * @offset: the new offset into the target memory region
2338 */
2339void memory_region_set_alias_offset(MemoryRegion *mr,
a8170e5e 2340 hwaddr offset);
4703359e 2341
3ce10901 2342/**
feca4ac1
PB
2343 * memory_region_present: checks if an address relative to a @container
2344 * translates into #MemoryRegion within @container
3ce10901 2345 *
feca4ac1 2346 * Answer whether a #MemoryRegion within @container covers the address
3ce10901
PB
2347 * @addr.
2348 *
feca4ac1
PB
2349 * @container: a #MemoryRegion within which @addr is a relative address
2350 * @addr: the area within @container to be searched
3ce10901 2351 */
feca4ac1 2352bool memory_region_present(MemoryRegion *container, hwaddr addr);
3ce10901 2353
eed2bacf
IM
2354/**
2355 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
455faf03
DH
2356 * into another memory region, which does not necessarily imply that it is
2357 * mapped into an address space.
eed2bacf
IM
2358 *
2359 * @mr: a #MemoryRegion which should be checked if it's mapped
2360 */
2361bool memory_region_is_mapped(MemoryRegion *mr);
2362
8947d7fc
DH
2363/**
2364 * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
2365 * #MemoryRegion
2366 *
2367 * The #RamDiscardManager cannot change while a memory region is mapped.
2368 *
2369 * @mr: the #MemoryRegion
2370 */
2371RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
2372
2373/**
2374 * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
2375 * #RamDiscardManager assigned
2376 *
2377 * @mr: the #MemoryRegion
2378 */
2379static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
2380{
2381 return !!memory_region_get_ram_discard_manager(mr);
2382}
2383
2384/**
2385 * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
2386 * #MemoryRegion
2387 *
2388 * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
2389 * that does not cover RAM, or a #MemoryRegion that already has a
2390 * #RamDiscardManager assigned.
2391 *
2392 * @mr: the #MemoryRegion
2393 * @rdm: #RamDiscardManager to set
2394 */
2395void memory_region_set_ram_discard_manager(MemoryRegion *mr,
2396 RamDiscardManager *rdm);
2397
e2177955 2398/**
73034e9e
PB
2399 * memory_region_find: translate an address/size relative to a
2400 * MemoryRegion into a #MemoryRegionSection.
e2177955 2401 *
73034e9e
PB
2402 * Locates the first #MemoryRegion within @mr that overlaps the range
2403 * given by @addr and @size.
e2177955
AK
2404 *
2405 * Returns a #MemoryRegionSection that describes a contiguous overlap.
2406 * It will have the following characteristics:
08226b44
PB
2407 * - @size = 0 iff no overlap was found
2408 * - @mr is non-%NULL iff an overlap was found
e2177955 2409 *
73034e9e
PB
2410 * Remember that in the return value the @offset_within_region is
2411 * relative to the returned region (in the .@mr field), not to the
2412 * @mr argument.
2413 *
2414 * Similarly, the .@offset_within_address_space is relative to the
2415 * address space that contains both regions, the passed and the
2416 * returned one. However, in the special case where the @mr argument
feca4ac1 2417 * has no container (and thus is the root of the address space), the
73034e9e 2418 * following will hold:
08226b44
PB
2419 * - @offset_within_address_space >= @addr
2420 * - @offset_within_address_space + .@size <= @addr + @size
73034e9e
PB
2421 *
2422 * @mr: a MemoryRegion within which @addr is a relative address
2423 * @addr: start of the area within @as to be searched
e2177955
AK
2424 * @size: size of the area to be searched
2425 */
73034e9e 2426MemoryRegionSection memory_region_find(MemoryRegion *mr,
a8170e5e 2427 hwaddr addr, uint64_t size);
e2177955 2428
86e775c6 2429/**
9c1f8f44 2430 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
86e775c6 2431 *
9c1f8f44 2432 * Synchronizes the dirty page log for all address spaces.
1e493be5
GS
2433 *
2434 * @last_stage: whether this is the last stage of live migration
86e775c6 2435 */
1e493be5 2436void memory_global_dirty_log_sync(bool last_stage);
9458a9a1
PB
2437
2438/**
2439 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2440 *
2441 * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
2442 * This function must be called after the dirty log bitmap is cleared, and
2443 * before dirty guest memory pages are read. If you are using
2444 * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
2445 * care of doing this.
2446 */
2447void memory_global_after_dirty_log_sync(void);
86e775c6 2448
69ddaf66
ASRJ
2449/**
2450 * memory_region_transaction_begin: Start a transaction.
2451 *
2452 * During a transaction, changes will be accumulated and made visible
dabdf394 2453 * only when the transaction ends (is committed).
4ef4db86
AK
2454 */
2455void memory_region_transaction_begin(void);
69ddaf66
ASRJ
2456
2457/**
2458 * memory_region_transaction_commit: Commit a transaction and make changes
2459 * visible to the guest.
4ef4db86
AK
2460 */
2461void memory_region_transaction_commit(void);
2462
7664e80c
AK
2463/**
2464 * memory_listener_register: register callbacks to be called when memory
2465 * sections are mapped or unmapped into an address
2466 * space
2467 *
2468 * @listener: an object containing the callbacks to be called
7376e582 2469 * @filter: if non-%NULL, only regions in this address space will be observed
7664e80c 2470 */
f6790af6 2471void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
7664e80c
AK
2472
2473/**
2474 * memory_listener_unregister: undo the effect of memory_listener_register()
2475 *
2476 * @listener: an object containing the callbacks to be removed
2477 */
2478void memory_listener_unregister(MemoryListener *listener);
2479
2480/**
2481 * memory_global_dirty_log_start: begin dirty logging for all regions
63b41db4
HH
2482 *
2483 * @flags: purpose of starting dirty log, migration or dirty rate
7664e80c 2484 */
63b41db4 2485void memory_global_dirty_log_start(unsigned int flags);
7664e80c
AK
2486
2487/**
1a7e8cae 2488 * memory_global_dirty_log_stop: end dirty logging for all regions
63b41db4
HH
2489 *
2490 * @flags: purpose of stopping dirty log, migration or dirty rate
7664e80c 2491 */
63b41db4 2492void memory_global_dirty_log_stop(unsigned int flags);
7664e80c 2493
2261d393 2494void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
314e2987 2495
94e273db
PMD
2496bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
2497 unsigned size, bool is_write,
2498 MemTxAttrs attrs);
2499
3b643495
PM
2500/**
2501 * memory_region_dispatch_read: perform a read directly to the specified
2502 * MemoryRegion.
2503 *
2504 * @mr: #MemoryRegion to access
2505 * @addr: address within that region
2506 * @pval: pointer to uint64_t which the data is written to
e67c9046 2507 * @op: size, sign, and endianness of the memory operation
3b643495
PM
2508 * @attrs: memory transaction attributes to use for the access
2509 */
2510MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
2511 hwaddr addr,
2512 uint64_t *pval,
e67c9046 2513 MemOp op,
3b643495
PM
2514 MemTxAttrs attrs);
2515/**
2516 * memory_region_dispatch_write: perform a write directly to the specified
2517 * MemoryRegion.
2518 *
2519 * @mr: #MemoryRegion to access
2520 * @addr: address within that region
2521 * @data: data to write
e67c9046 2522 * @op: size, sign, and endianness of the memory operation
3b643495
PM
2523 * @attrs: memory transaction attributes to use for the access
2524 */
2525MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
2526 hwaddr addr,
2527 uint64_t data,
e67c9046 2528 MemOp op,
3b643495
PM
2529 MemTxAttrs attrs);
2530
9ad2bbc1
AK
2531/**
2532 * address_space_init: initializes an address space
2533 *
2534 * @as: an uninitialized #AddressSpace
67cc32eb 2535 * @root: a #MemoryRegion that routes addresses for the address space
7dca8043
AK
2536 * @name: an address space name. The name is only used for debugging
2537 * output.
9ad2bbc1 2538 */
7dca8043 2539void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
9ad2bbc1 2540
83f3c251
AK
2541/**
2542 * address_space_destroy: destroy an address space
2543 *
2544 * Releases all resources associated with an address space. After an address space
2545 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
2546 * as well.
2547 *
2548 * @as: address space to be destroyed
2549 */
2550void address_space_destroy(AddressSpace *as);
2551
a2166410
GK
2552/**
2553 * address_space_remove_listeners: unregister all listeners of an address space
2554 *
2555 * Removes all callbacks previously registered with memory_listener_register()
2556 * for @as.
2557 *
2558 * @as: an initialized #AddressSpace
2559 */
2560void address_space_remove_listeners(AddressSpace *as);
2561
ac1970fb
AK
2562/**
2563 * address_space_rw: read from or write to an address space.
2564 *
5c9eb028
PM
2565 * Return a MemTxResult indicating whether the operation succeeded
2566 * or failed (eg unassigned memory, device rejected the transaction,
2567 * IOMMU fault).
fd8aaa76 2568 *
ac1970fb
AK
2569 * @as: #AddressSpace to be accessed
2570 * @addr: address within that address space
5c9eb028 2571 * @attrs: memory transaction attributes
ac1970fb 2572 * @buf: buffer with the data transferred
57914ecb 2573 * @len: the number of bytes to read or write
ac1970fb
AK
2574 * @is_write: indicates the transfer direction
2575 */
5c9eb028 2576MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
daa3dda4 2577 MemTxAttrs attrs, void *buf,
0c249ff7 2578 hwaddr len, bool is_write);
ac1970fb
AK
2579
2580/**
2581 * address_space_write: write to address space.
2582 *
5c9eb028
PM
2583 * Return a MemTxResult indicating whether the operation succeeded
2584 * or failed (eg unassigned memory, device rejected the transaction,
2585 * IOMMU fault).
fd8aaa76 2586 *
ac1970fb
AK
2587 * @as: #AddressSpace to be accessed
2588 * @addr: address within that address space
5c9eb028 2589 * @attrs: memory transaction attributes
ac1970fb 2590 * @buf: buffer with the data transferred
57914ecb 2591 * @len: the number of bytes to write
ac1970fb 2592 */
5c9eb028
PM
2593MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
2594 MemTxAttrs attrs,
daa3dda4 2595 const void *buf, hwaddr len);
ac1970fb 2596
3c8133f9
PM
2597/**
2598 * address_space_write_rom: write to address space, including ROM.
2599 *
2600 * This function writes to the specified address space, but will
2601 * write data to both ROM and RAM. This is used for non-guest
2602 * writes like writes from the gdb debug stub or initial loading
2603 * of ROM contents.
2604 *
2605 * Note that portions of the write which attempt to write data to
2606 * a device will be silently ignored -- only real RAM and ROM will
2607 * be written to.
2608 *
2609 * Return a MemTxResult indicating whether the operation succeeded
2610 * or failed (eg unassigned memory, device rejected the transaction,
2611 * IOMMU fault).
2612 *
2613 * @as: #AddressSpace to be accessed
2614 * @addr: address within that address space
2615 * @attrs: memory transaction attributes
2616 * @buf: buffer with the data transferred
2617 * @len: the number of bytes to write
2618 */
2619MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
2620 MemTxAttrs attrs,
daa3dda4 2621 const void *buf, hwaddr len);
3c8133f9 2622
3cc8f884 2623/* address_space_ld*: load from an address space
50013115
PM
2624 * address_space_st*: store to an address space
2625 *
2626 * These functions perform a load or store of the byte, word,
2627 * longword or quad to the specified address within the AddressSpace.
2628 * The _le suffixed functions treat the data as little endian;
2629 * _be indicates big endian; no suffix indicates "same endianness
2630 * as guest CPU".
2631 *
2632 * The "guest CPU endianness" accessors are deprecated for use outside
2633 * target-* code; devices should be CPU-agnostic and use either the LE
2634 * or the BE accessors.
2635 *
2636 * @as #AddressSpace to be accessed
2637 * @addr: address within that address space
2638 * @val: data value, for stores
2639 * @attrs: memory transaction attributes
2640 * @result: location to write the success/failure of the transaction;
2641 * if NULL, this information is discarded
2642 */
4269c82b
PB
2643
2644#define SUFFIX
2645#define ARG1 as
2646#define ARG1_DECL AddressSpace *as
0979ed01 2647#include "exec/memory_ldst.h.inc"
4269c82b
PB
2648
2649#define SUFFIX
2650#define ARG1 as
2651#define ARG1_DECL AddressSpace *as
0979ed01 2652#include "exec/memory_ldst_phys.h.inc"
0ce265ff 2653
1f4e496e 2654struct MemoryRegionCache {
48564041 2655 void *ptr;
1f4e496e 2656 hwaddr xlat;
1f4e496e 2657 hwaddr len;
48564041
PB
2658 FlatView *fv;
2659 MemoryRegionSection mrs;
2660 bool is_write;
1f4e496e
PB
2661};
2662
48564041
PB
2663#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
2664
5eba0404 2665
4269c82b
PB
2666/* address_space_ld*_cached: load from a cached #MemoryRegion
2667 * address_space_st*_cached: store into a cached #MemoryRegion
2668 *
2669 * These functions perform a load or store of the byte, word,
2670 * longword or quad to the specified address. The address is
2671 * a physical address in the AddressSpace, but it must lie within
2672 * a #MemoryRegion that was mapped with address_space_cache_init.
2673 *
2674 * The _le suffixed functions treat the data as little endian;
2675 * _be indicates big endian; no suffix indicates "same endianness
2676 * as guest CPU".
2677 *
2678 * The "guest CPU endianness" accessors are deprecated for use outside
2679 * target-* code; devices should be CPU-agnostic and use either the LE
2680 * or the BE accessors.
2681 *
2682 * @cache: previously initialized #MemoryRegionCache to be accessed
2683 * @addr: address within the address space
2684 * @val: data value, for stores
2685 * @attrs: memory transaction attributes
2686 * @result: location to write the success/failure of the transaction;
2687 * if NULL, this information is discarded
2688 */
2689
48564041 2690#define SUFFIX _cached_slow
4269c82b
PB
2691#define ARG1 cache
2692#define ARG1_DECL MemoryRegionCache *cache
0979ed01 2693#include "exec/memory_ldst.h.inc"
4269c82b 2694
48564041
PB
2695/* Inline fast path for direct RAM access. */
2696static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
2697 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
2698{
2699 assert(addr < cache->len);
2700 if (likely(cache->ptr)) {
2701 return ldub_p(cache->ptr + addr);
2702 } else {
2703 return address_space_ldub_cached_slow(cache, addr, attrs, result);
2704 }
2705}
2706
2707static inline void address_space_stb_cached(MemoryRegionCache *cache,
4121f4b3 2708 hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
48564041
PB
2709{
2710 assert(addr < cache->len);
2711 if (likely(cache->ptr)) {
2712 stb_p(cache->ptr + addr, val);
2713 } else {
2714 address_space_stb_cached_slow(cache, addr, val, attrs, result);
2715 }
2716}
2717
2718#define ENDIANNESS _le
0979ed01 2719#include "exec/memory_ldst_cached.h.inc"
48564041
PB
2720
2721#define ENDIANNESS _be
0979ed01 2722#include "exec/memory_ldst_cached.h.inc"
48564041 2723
4269c82b
PB
2724#define SUFFIX _cached
2725#define ARG1 cache
2726#define ARG1_DECL MemoryRegionCache *cache
0979ed01 2727#include "exec/memory_ldst_phys.h.inc"
4269c82b 2728
1f4e496e
PB
2729/* address_space_cache_init: prepare for repeated access to a physical
2730 * memory region
2731 *
2732 * @cache: #MemoryRegionCache to be filled
2733 * @as: #AddressSpace to be accessed
2734 * @addr: address within that address space
2735 * @len: length of buffer
2736 * @is_write: indicates the transfer direction
2737 *
2738 * Will only work with RAM, and may map a subset of the requested range by
2739 * returning a value that is less than @len. On failure, return a negative
2740 * errno value.
2741 *
2742 * Because it only works with RAM, this function can be used for
2743 * read-modify-write operations. In this case, is_write should be %true.
2744 *
2745 * Note that addresses passed to the address_space_*_cached functions
2746 * are relative to @addr.
2747 */
2748int64_t address_space_cache_init(MemoryRegionCache *cache,
2749 AddressSpace *as,
2750 hwaddr addr,
2751 hwaddr len,
2752 bool is_write);
2753
2754/**
2755 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
2756 *
2757 * @cache: The #MemoryRegionCache to operate on.
2758 * @addr: The first physical address that was written, relative to the
2759 * address that was passed to @address_space_cache_init.
2760 * @access_len: The number of bytes that were written starting at @addr.
2761 */
2762void address_space_cache_invalidate(MemoryRegionCache *cache,
2763 hwaddr addr,
2764 hwaddr access_len);
2765
2766/**
2767 * address_space_cache_destroy: free a #MemoryRegionCache
2768 *
2769 * @cache: The #MemoryRegionCache whose memory should be released.
2770 */
2771void address_space_cache_destroy(MemoryRegionCache *cache);
2772
052c8fa9
JW
2773/* address_space_get_iotlb_entry: translate an address into an IOTLB
2774 * entry. Should be called from an RCU critical section.
2775 */
2776IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
7446eb07 2777 bool is_write, MemTxAttrs attrs);
1f4e496e 2778
149f54b5 2779/* address_space_translate: translate an address range into an address space
41063e1e
PB
2780 * into a MemoryRegion and an address range into that section. Should be
2781 * called from an RCU critical section, to avoid that the last reference
2782 * to the returned region disappears after address_space_translate returns.
149f54b5 2783 *
57914ecb 2784 * @fv: #FlatView to be accessed
149f54b5
PB
2785 * @addr: address within that address space
2786 * @xlat: pointer to address within the returned memory region section's
2787 * #MemoryRegion.
2788 * @len: pointer to length
2789 * @is_write: indicates the transfer direction
bc6b1cec 2790 * @attrs: memory attributes
149f54b5 2791 */
16620684
AK
2792MemoryRegion *flatview_translate(FlatView *fv,
2793 hwaddr addr, hwaddr *xlat,
efa99a2f
PM
2794 hwaddr *len, bool is_write,
2795 MemTxAttrs attrs);
16620684
AK
2796
2797static inline MemoryRegion *address_space_translate(AddressSpace *as,
2798 hwaddr addr, hwaddr *xlat,
bc6b1cec
PM
2799 hwaddr *len, bool is_write,
2800 MemTxAttrs attrs)
16620684
AK
2801{
2802 return flatview_translate(address_space_to_flatview(as),
efa99a2f 2803 addr, xlat, len, is_write, attrs);
16620684 2804}
149f54b5 2805
51644ab7
PB
2806/* address_space_access_valid: check for validity of accessing an address
2807 * space range
2808 *
30951157
AK
2809 * Check whether memory is assigned to the given address space range, and
2810 * access is permitted by any IOMMU regions that are active for the address
2811 * space.
51644ab7
PB
2812 *
2813 * For now, addr and len should be aligned to a page size. This limitation
2814 * will be lifted in the future.
2815 *
2816 * @as: #AddressSpace to be accessed
2817 * @addr: address within that address space
2818 * @len: length of the area to be checked
2819 * @is_write: indicates the transfer direction
fddffa42 2820 * @attrs: memory attributes
51644ab7 2821 */
0c249ff7 2822bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
fddffa42 2823 bool is_write, MemTxAttrs attrs);
51644ab7 2824
ac1970fb
AK
2825/* address_space_map: map a physical memory region into a host virtual address
2826 *
2827 * May map a subset of the requested range, given by and returned in @plen.
77f55eac
PP
2828 * May return %NULL and set *@plen to zero(0), if resources needed to perform
2829 * the mapping are exhausted.
ac1970fb
AK
2830 * Use only for reads OR writes - not for read-modify-write operations.
2831 * Use cpu_register_map_client() to know when retrying the map operation is
2832 * likely to succeed.
2833 *
2834 * @as: #AddressSpace to be accessed
2835 * @addr: address within that address space
2836 * @plen: pointer to length of buffer; updated on return
2837 * @is_write: indicates the transfer direction
f26404fb 2838 * @attrs: memory attributes
ac1970fb 2839 */
a8170e5e 2840void *address_space_map(AddressSpace *as, hwaddr addr,
f26404fb 2841 hwaddr *plen, bool is_write, MemTxAttrs attrs);
ac1970fb
AK
2842
2843/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
2844 *
2845 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
2846 * the amount of memory that was actually read or written by the caller.
2847 *
2848 * @as: #AddressSpace used
57914ecb 2849 * @buffer: host pointer as returned by address_space_map()
ac1970fb
AK
2850 * @len: buffer length as returned by address_space_map()
2851 * @access_len: amount of data actually transferred
2852 * @is_write: indicates the transfer direction
2853 */
a8170e5e 2854void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
ae5883ab 2855 bool is_write, hwaddr access_len);
ac1970fb
AK
2856
2857
a203ac70 2858/* Internal functions, part of the implementation of address_space_read. */
b2a44fca 2859MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
daa3dda4 2860 MemTxAttrs attrs, void *buf, hwaddr len);
16620684 2861MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
a152be43 2862 MemTxAttrs attrs, void *buf,
0c249ff7 2863 hwaddr len, hwaddr addr1, hwaddr l,
16620684 2864 MemoryRegion *mr);
0878d0e1 2865void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
3cc8f884 2866
48564041
PB
2867/* Internal functions, part of the implementation of address_space_read_cached
2868 * and address_space_write_cached. */
38df19fa
PMD
2869MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
2870 hwaddr addr, void *buf, hwaddr len);
2871MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
2872 hwaddr addr, const void *buf,
2873 hwaddr len);
48564041 2874
3123f93d
JR
2875int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr);
2876bool prepare_mmio_access(MemoryRegion *mr);
2877
3cc8f884
PB
2878static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
2879{
2880 if (is_write) {
d489ae4a
AD
2881 return memory_region_is_ram(mr) && !mr->readonly &&
2882 !mr->rom_device && !memory_region_is_ram_device(mr);
3cc8f884 2883 } else {
4a2e242b
AW
2884 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
2885 memory_region_is_romd(mr);
3cc8f884 2886 }
3cc8f884
PB
2887}
2888
2889/**
2890 * address_space_read: read from an address space.
2891 *
2892 * Return a MemTxResult indicating whether the operation succeeded
2893 * or failed (eg unassigned memory, device rejected the transaction,
b2a44fca 2894 * IOMMU fault). Called within RCU critical section.
3cc8f884 2895 *
b2a44fca 2896 * @as: #AddressSpace to be accessed
3cc8f884
PB
2897 * @addr: address within that address space
2898 * @attrs: memory transaction attributes
2899 * @buf: buffer with the data transferred
5d248213 2900 * @len: length of the data transferred
3cc8f884
PB
2901 */
2902static inline __attribute__((__always_inline__))
b2a44fca 2903MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
daa3dda4 2904 MemTxAttrs attrs, void *buf,
0c249ff7 2905 hwaddr len)
3cc8f884
PB
2906{
2907 MemTxResult result = MEMTX_OK;
2908 hwaddr l, addr1;
2909 void *ptr;
2910 MemoryRegion *mr;
b2a44fca 2911 FlatView *fv;
3cc8f884
PB
2912
2913 if (__builtin_constant_p(len)) {
2914 if (len) {
293a733d 2915 RCU_READ_LOCK_GUARD();
b2a44fca 2916 fv = address_space_to_flatview(as);
3cc8f884 2917 l = len;
efa99a2f 2918 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
3cc8f884 2919 if (len == l && memory_access_is_direct(mr, false)) {
0878d0e1 2920 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3cc8f884
PB
2921 memcpy(buf, ptr, len);
2922 } else {
16620684
AK
2923 result = flatview_read_continue(fv, addr, attrs, buf, len,
2924 addr1, l, mr);
3cc8f884 2925 }
3cc8f884
PB
2926 }
2927 } else {
b2a44fca 2928 result = address_space_read_full(as, addr, attrs, buf, len);
3cc8f884
PB
2929 }
2930 return result;
2931}
a203ac70 2932
1f4e496e
PB
2933/**
2934 * address_space_read_cached: read from a cached RAM region
2935 *
2936 * @cache: Cached region to be addressed
2937 * @addr: address relative to the base of the RAM region
2938 * @buf: buffer with the data transferred
2939 * @len: length of the data transferred
2940 */
38df19fa 2941static inline MemTxResult
1f4e496e 2942address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
0c249ff7 2943 void *buf, hwaddr len)
1f4e496e
PB
2944{
2945 assert(addr < cache->len && len <= cache->len - addr);
fc1c8344 2946 fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
48564041
PB
2947 if (likely(cache->ptr)) {
2948 memcpy(buf, cache->ptr + addr, len);
38df19fa 2949 return MEMTX_OK;
48564041 2950 } else {
38df19fa 2951 return address_space_read_cached_slow(cache, addr, buf, len);
48564041 2952 }
1f4e496e
PB
2953}
2954
2955/**
2956 * address_space_write_cached: write to a cached RAM region
2957 *
2958 * @cache: Cached region to be addressed
2959 * @addr: address relative to the base of the RAM region
2960 * @buf: buffer with the data transferred
2961 * @len: length of the data transferred
2962 */
38df19fa 2963static inline MemTxResult
1f4e496e 2964address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
daa3dda4 2965 const void *buf, hwaddr len)
1f4e496e
PB
2966{
2967 assert(addr < cache->len && len <= cache->len - addr);
48564041
PB
2968 if (likely(cache->ptr)) {
2969 memcpy(cache->ptr + addr, buf, len);
38df19fa 2970 return MEMTX_OK;
48564041 2971 } else {
38df19fa 2972 return address_space_write_cached_slow(cache, addr, buf, len);
48564041 2973 }
1f4e496e
PB
2974}
2975
75f01c68
PMD
2976/**
2977 * address_space_set: Fill address space with a constant byte.
2978 *
2979 * Return a MemTxResult indicating whether the operation succeeded
2980 * or failed (eg unassigned memory, device rejected the transaction,
2981 * IOMMU fault).
2982 *
2983 * @as: #AddressSpace to be accessed
2984 * @addr: address within that address space
2985 * @c: constant byte to fill the memory
2986 * @len: the number of bytes to fill with the constant byte
2987 * @attrs: memory transaction attributes
2988 */
2989MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
2990 uint8_t c, hwaddr len, MemTxAttrs attrs);
2991
7a3df11c 2992#ifdef NEED_CPU_H
d5d680ca 2993/* enum device_endian to MemOp. */
7a3df11c
PB
2994static inline MemOp devend_memop(enum device_endian end)
2995{
2996 QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
2997 DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
2998
ee3eb3a7 2999#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
7a3df11c
PB
3000 /* Swap if non-host endianness or native (target) endianness */
3001 return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
3002#else
3003 const int non_host_endianness =
3004 DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN;
3005
3006 /* In this case, native (target) endianness needs no swap. */
3007 return (end == non_host_endianness) ? MO_BSWAP : 0;
3008#endif
3009}
3010#endif
d5d680ca 3011
d24f31db
DH
3012/*
3013 * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
3014 * to manage the actual amount of memory consumed by the VM (then, the memory
3015 * provided by RAM blocks might be bigger than the desired memory consumption).
3016 * This *must* be set if:
3017 * - Discarding parts of a RAM blocks does not result in the change being
3018 * reflected in the VM and the pages getting freed.
3019 * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
3020 * discards blindly.
3021 * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
3022 * encrypted VMs).
3023 * Technologies that only temporarily pin the current working set of a
3024 * driver are fine, because we don't expect such pages to be discarded
3025 * (esp. based on guest action like balloon inflation).
3026 *
3027 * This is *not* to be used to protect from concurrent discards (esp.,
3028 * postcopy).
3029 *
3030 * Returns 0 if successful. Returns -EBUSY if a technology that relies on
3031 * discards to work reliably is active.
3032 */
3033int ram_block_discard_disable(bool state);
3034
7e6d32eb
DH
3035/*
3036 * See ram_block_discard_disable(): only disable uncoordinated discards,
3037 * keeping coordinated discards (via the RamDiscardManager) enabled.
3038 */
3039int ram_block_uncoordinated_discard_disable(bool state);
3040
d24f31db
DH
3041/*
3042 * Inhibit technologies that disable discarding of pages in RAM blocks.
3043 *
3044 * Returns 0 if successful. Returns -EBUSY if discards are already set to
3045 * broken.
3046 */
3047int ram_block_discard_require(bool state);
3048
3049/*
7e6d32eb
DH
3050 * See ram_block_discard_require(): only inhibit technologies that disable
3051 * uncoordinated discarding of pages in RAM blocks, allowing co-existance with
3052 * technologies that only inhibit uncoordinated discards (via the
3053 * RamDiscardManager).
3054 */
3055int ram_block_coordinated_discard_require(bool state);
3056
3057/*
3058 * Test if any discarding of memory in ram blocks is disabled.
d24f31db
DH
3059 */
3060bool ram_block_discard_is_disabled(void);
3061
3062/*
7e6d32eb 3063 * Test if any discarding of memory in ram blocks is required to work reliably.
d24f31db
DH
3064 */
3065bool ram_block_discard_is_required(void);
3066
093bc2cd
AK
3067#endif
3068
3069#endif