]> git.proxmox.com Git - mirror_qemu.git/blame - include/exec/memory.h
Replace TARGET_WORDS_BIGENDIAN
[mirror_qemu.git] / include / exec / memory.h
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#ifndef MEMORY_H
15#define MEMORY_H
16
17#ifndef CONFIG_USER_ONLY
18
022c62cb
PB
19#include "exec/cpu-common.h"
20#include "exec/hwaddr.h"
cc05c43a 21#include "exec/memattrs.h"
e67c9046 22#include "exec/memop.h"
0987d735 23#include "exec/ramlist.h"
1b53ecd9 24#include "qemu/bswap.h"
1de7afc9 25#include "qemu/queue.h"
1de7afc9 26#include "qemu/int128.h"
06866575 27#include "qemu/notify.h"
b4fefef9 28#include "qom/object.h"
374f2981 29#include "qemu/rcu.h"
093bc2cd 30
07bdaa41
PB
31#define RAM_ADDR_INVALID (~(ram_addr_t)0)
32
052e87b0
PB
33#define MAX_PHYS_ADDR_SPACE_BITS 62
34#define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
35
bb3c92ed 36#define TYPE_MEMORY_REGION "memory-region"
8110fa1d
EH
37DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
38 TYPE_MEMORY_REGION)
b4fefef9 39
bb3c92ed 40#define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
db1015e9 41typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
8110fa1d
EH
42DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
43 IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
3df9d748 44
8947d7fc
DH
45#define TYPE_RAM_DISCARD_MANAGER "qemu:ram-discard-manager"
46typedef struct RamDiscardManagerClass RamDiscardManagerClass;
47typedef struct RamDiscardManager RamDiscardManager;
48DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
49 RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
50
20f5a302
AB
51#ifdef CONFIG_FUZZ
52void fuzz_dma_read_cb(size_t addr,
53 size_t len,
fc1c8344 54 MemoryRegion *mr);
e7d3222e
AB
55#else
56static inline void fuzz_dma_read_cb(size_t addr,
57 size_t len,
fc1c8344 58 MemoryRegion *mr)
e7d3222e
AB
59{
60 /* Do Nothing */
61}
20f5a302
AB
62#endif
63
63b41db4
HH
64/* Possible bits for global_dirty_log_{start|stop} */
65
66/* Dirty tracking enabled because migration is running */
67#define GLOBAL_DIRTY_MIGRATION (1U << 0)
68
69/* Dirty tracking enabled because measuring dirty rate */
70#define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
71
72#define GLOBAL_DIRTY_MASK (0x3)
73
74extern unsigned int global_dirty_tracking;
ae7a2bca 75
093bc2cd 76typedef struct MemoryRegionOps MemoryRegionOps;
74901c3b 77
f7806925
EA
78struct ReservedRegion {
79 hwaddr low;
80 hwaddr high;
81 unsigned type;
82};
83
8947d7fc
DH
84/**
85 * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
86 *
87 * @mr: the region, or %NULL if empty
88 * @fv: the flat view of the address space the region is mapped in
89 * @offset_within_region: the beginning of the section, relative to @mr's start
90 * @size: the size of the section; will not exceed @mr's boundaries
91 * @offset_within_address_space: the address of the first byte of the section
92 * relative to the region's address space
93 * @readonly: writes to this section are ignored
94 * @nonvolatile: this section is non-volatile
95 */
96struct MemoryRegionSection {
97 Int128 size;
98 MemoryRegion *mr;
99 FlatView *fv;
100 hwaddr offset_within_region;
101 hwaddr offset_within_address_space;
102 bool readonly;
103 bool nonvolatile;
104};
105
30951157
AK
106typedef struct IOMMUTLBEntry IOMMUTLBEntry;
107
108/* See address_space_translate: bit 0 is read, bit 1 is write. */
109typedef enum {
110 IOMMU_NONE = 0,
111 IOMMU_RO = 1,
112 IOMMU_WO = 2,
113 IOMMU_RW = 3,
114} IOMMUAccessFlags;
115
f06a696d
PX
116#define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
117
30951157
AK
118struct IOMMUTLBEntry {
119 AddressSpace *target_as;
120 hwaddr iova;
121 hwaddr translated_addr;
122 hwaddr addr_mask; /* 0xfff = 4k translation */
123 IOMMUAccessFlags perm;
124};
125
cdb30812
PX
126/*
127 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
128 * register with one or multiple IOMMU Notifier capability bit(s).
129 */
130typedef enum {
131 IOMMU_NOTIFIER_NONE = 0,
132 /* Notify cache invalidations */
133 IOMMU_NOTIFIER_UNMAP = 0x1,
134 /* Notify entry changes (newly created entries) */
135 IOMMU_NOTIFIER_MAP = 0x2,
b68ba1ca
EP
136 /* Notify changes on device IOTLB entries */
137 IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
cdb30812
PX
138} IOMMUNotifierFlag;
139
b68ba1ca
EP
140#define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
141#define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
142#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
143 IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
cdb30812 144
698feb5e
PX
145struct IOMMUNotifier;
146typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
147 IOMMUTLBEntry *data);
148
cdb30812 149struct IOMMUNotifier {
698feb5e 150 IOMMUNotify notify;
cdb30812 151 IOMMUNotifierFlag notifier_flags;
698feb5e
PX
152 /* Notify for address space range start <= addr <= end */
153 hwaddr start;
154 hwaddr end;
cb1efcf4 155 int iommu_idx;
cdb30812
PX
156 QLIST_ENTRY(IOMMUNotifier) node;
157};
158typedef struct IOMMUNotifier IOMMUNotifier;
159
5039caf3
EP
160typedef struct IOMMUTLBEvent {
161 IOMMUNotifierFlag type;
162 IOMMUTLBEntry entry;
163} IOMMUTLBEvent;
164
b0e5de93
JH
165/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
166#define RAM_PREALLOC (1 << 0)
167
168/* RAM is mmap-ed with MAP_SHARED */
169#define RAM_SHARED (1 << 1)
170
171/* Only a portion of RAM (used_length) is actually used, and migrated.
c7c0e724 172 * Resizing RAM while migrating can result in the migration being canceled.
b0e5de93
JH
173 */
174#define RAM_RESIZEABLE (1 << 2)
175
176/* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
177 * zero the page and wake waiting processes.
178 * (Set during postcopy)
179 */
180#define RAM_UF_ZEROPAGE (1 << 3)
181
182/* RAM can be migrated */
183#define RAM_MIGRATABLE (1 << 4)
184
a4de8552
JH
185/* RAM is a persistent kind memory */
186#define RAM_PMEM (1 << 5)
187
0e9b5cd6 188
278e2f55
AG
189/*
190 * UFFDIO_WRITEPROTECT is used on this RAMBlock to
191 * support 'write-tracking' migration type.
192 * Implies ram_state->ram_wt_enabled.
193 */
194#define RAM_UF_WRITEPROTECT (1 << 6)
195
8dbe22c6
DH
196/*
197 * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
198 * pages if applicable) is skipped: will bail out if not supported. When not
199 * set, the OS will do the reservation, if supported for the memory type.
200 */
201#define RAM_NORESERVE (1 << 7)
202
56918a12
SC
203/* RAM that isn't accessible through normal means. */
204#define RAM_PROTECTED (1 << 8)
205
698feb5e
PX
206static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
207 IOMMUNotifierFlag flags,
cb1efcf4
PM
208 hwaddr start, hwaddr end,
209 int iommu_idx)
698feb5e
PX
210{
211 n->notify = fn;
212 n->notifier_flags = flags;
213 n->start = start;
214 n->end = end;
cb1efcf4 215 n->iommu_idx = iommu_idx;
698feb5e
PX
216}
217
093bc2cd
AK
218/*
219 * Memory region callbacks
220 */
221struct MemoryRegionOps {
222 /* Read from the memory region. @addr is relative to @mr; @size is
223 * in bytes. */
224 uint64_t (*read)(void *opaque,
a8170e5e 225 hwaddr addr,
093bc2cd
AK
226 unsigned size);
227 /* Write to the memory region. @addr is relative to @mr; @size is
228 * in bytes. */
229 void (*write)(void *opaque,
a8170e5e 230 hwaddr addr,
093bc2cd
AK
231 uint64_t data,
232 unsigned size);
233
cc05c43a
PM
234 MemTxResult (*read_with_attrs)(void *opaque,
235 hwaddr addr,
236 uint64_t *data,
237 unsigned size,
238 MemTxAttrs attrs);
239 MemTxResult (*write_with_attrs)(void *opaque,
240 hwaddr addr,
241 uint64_t data,
242 unsigned size,
243 MemTxAttrs attrs);
244
093bc2cd
AK
245 enum device_endian endianness;
246 /* Guest-visible constraints: */
247 struct {
248 /* If nonzero, specify bounds on access sizes beyond which a machine
249 * check is thrown.
250 */
251 unsigned min_access_size;
252 unsigned max_access_size;
253 /* If true, unaligned accesses are supported. Otherwise unaligned
254 * accesses throw machine checks.
255 */
256 bool unaligned;
897fa7cf
AK
257 /*
258 * If present, and returns #false, the transaction is not accepted
259 * by the device (and results in machine dependent behaviour such
260 * as a machine check exception).
261 */
a8170e5e 262 bool (*accepts)(void *opaque, hwaddr addr,
8372d383
PM
263 unsigned size, bool is_write,
264 MemTxAttrs attrs);
093bc2cd
AK
265 } valid;
266 /* Internal implementation constraints: */
267 struct {
268 /* If nonzero, specifies the minimum size implemented. Smaller sizes
269 * will be rounded upwards and a partial result will be returned.
270 */
271 unsigned min_access_size;
272 /* If nonzero, specifies the maximum size implemented. Larger sizes
273 * will be done as a series of accesses with smaller sizes.
274 */
275 unsigned max_access_size;
276 /* If true, unaligned accesses are supported. Otherwise all accesses
277 * are converted to (possibly multiple) naturally aligned accesses.
278 */
edc1ba7a 279 bool unaligned;
093bc2cd
AK
280 } impl;
281};
282
1b53ecd9
MA
283typedef struct MemoryRegionClass {
284 /* private */
285 ObjectClass parent_class;
286} MemoryRegionClass;
287
288
f1334de6
AK
289enum IOMMUMemoryRegionAttr {
290 IOMMU_ATTR_SPAPR_TCE_FD
291};
292
acbef3cc 293/*
2ce931d0
PM
294 * IOMMUMemoryRegionClass:
295 *
296 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
297 * and provide an implementation of at least the @translate method here
298 * to handle requests to the memory region. Other methods are optional.
299 *
300 * The IOMMU implementation must use the IOMMU notifier infrastructure
301 * to report whenever mappings are changed, by calling
302 * memory_region_notify_iommu() (or, if necessary, by calling
3b5ebf85 303 * memory_region_notify_iommu_one() for each registered notifier).
21f40209
PM
304 *
305 * Conceptually an IOMMU provides a mapping from input address
306 * to an output TLB entry. If the IOMMU is aware of memory transaction
307 * attributes and the output TLB entry depends on the transaction
308 * attributes, we represent this using IOMMU indexes. Each index
309 * selects a particular translation table that the IOMMU has:
ffb716f0 310 *
21f40209 311 * @attrs_to_index returns the IOMMU index for a set of transaction attributes
ffb716f0 312 *
21f40209 313 * @translate takes an input address and an IOMMU index
ffb716f0 314 *
21f40209
PM
315 * and the mapping returned can only depend on the input address and the
316 * IOMMU index.
317 *
318 * Most IOMMUs don't care about the transaction attributes and support
319 * only a single IOMMU index. A more complex IOMMU might have one index
320 * for secure transactions and one for non-secure transactions.
2ce931d0 321 */
db1015e9 322struct IOMMUMemoryRegionClass {
ffb716f0 323 /* private: */
1b53ecd9 324 MemoryRegionClass parent_class;
30951157 325
ffb716f0
EH
326 /* public: */
327 /**
328 * @translate:
329 *
2ce931d0
PM
330 * Return a TLB entry that contains a given address.
331 *
332 * The IOMMUAccessFlags indicated via @flag are optional and may
333 * be specified as IOMMU_NONE to indicate that the caller needs
334 * the full translation information for both reads and writes. If
335 * the access flags are specified then the IOMMU implementation
336 * may use this as an optimization, to stop doing a page table
337 * walk as soon as it knows that the requested permissions are not
338 * allowed. If IOMMU_NONE is passed then the IOMMU must do the
339 * full page table walk and report the permissions in the returned
340 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
341 * return different mappings for reads and writes.)
342 *
343 * The returned information remains valid while the caller is
344 * holding the big QEMU lock or is inside an RCU critical section;
345 * if the caller wishes to cache the mapping beyond that it must
346 * register an IOMMU notifier so it can invalidate its cached
347 * information when the IOMMU mapping changes.
348 *
349 * @iommu: the IOMMUMemoryRegion
ffb716f0 350 *
2ce931d0 351 * @hwaddr: address to be translated within the memory region
ffb716f0
EH
352 *
353 * @flag: requested access permission
354 *
2c91bcf2 355 * @iommu_idx: IOMMU index for the translation
bf55b7af 356 */
3df9d748 357 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
2c91bcf2 358 IOMMUAccessFlags flag, int iommu_idx);
ffb716f0
EH
359 /**
360 * @get_min_page_size:
361 *
362 * Returns minimum supported page size in bytes.
363 *
2ce931d0
PM
364 * If this method is not provided then the minimum is assumed to
365 * be TARGET_PAGE_SIZE.
366 *
367 * @iommu: the IOMMUMemoryRegion
368 */
3df9d748 369 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
ffb716f0
EH
370 /**
371 * @notify_flag_changed:
372 *
373 * Called when IOMMU Notifier flag changes (ie when the set of
2ce931d0
PM
374 * events which IOMMU users are requesting notification for changes).
375 * Optional method -- need not be provided if the IOMMU does not
376 * need to know exactly which events must be notified.
377 *
378 * @iommu: the IOMMUMemoryRegion
ffb716f0 379 *
2ce931d0 380 * @old_flags: events which previously needed to be notified
ffb716f0 381 *
2ce931d0 382 * @new_flags: events which now need to be notified
549d4005
EA
383 *
384 * Returns 0 on success, or a negative errno; in particular
385 * returns -EINVAL if the new flag bitmap is not supported by the
386 * IOMMU memory region. In case of failure, the error object
387 * must be created
2ce931d0 388 */
549d4005
EA
389 int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
390 IOMMUNotifierFlag old_flags,
391 IOMMUNotifierFlag new_flags,
392 Error **errp);
ffb716f0
EH
393 /**
394 * @replay:
395 *
396 * Called to handle memory_region_iommu_replay().
2ce931d0
PM
397 *
398 * The default implementation of memory_region_iommu_replay() is to
399 * call the IOMMU translate method for every page in the address space
400 * with flag == IOMMU_NONE and then call the notifier if translate
401 * returns a valid mapping. If this method is implemented then it
402 * overrides the default behaviour, and must provide the full semantics
403 * of memory_region_iommu_replay(), by calling @notifier for every
404 * translation present in the IOMMU.
405 *
406 * Optional method -- an IOMMU only needs to provide this method
407 * if the default is inefficient or produces undesirable side effects.
408 *
409 * Note: this is not related to record-and-replay functionality.
410 */
3df9d748 411 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
f1334de6 412
ffb716f0
EH
413 /**
414 * @get_attr:
415 *
416 * Get IOMMU misc attributes. This is an optional method that
2ce931d0
PM
417 * can be used to allow users of the IOMMU to get implementation-specific
418 * information. The IOMMU implements this method to handle calls
419 * by IOMMU users to memory_region_iommu_get_attr() by filling in
420 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
421 * the IOMMU supports. If the method is unimplemented then
422 * memory_region_iommu_get_attr() will always return -EINVAL.
423 *
424 * @iommu: the IOMMUMemoryRegion
ffb716f0 425 *
2ce931d0 426 * @attr: attribute being queried
ffb716f0 427 *
2ce931d0
PM
428 * @data: memory to fill in with the attribute data
429 *
430 * Returns 0 on success, or a negative errno; in particular
431 * returns -EINVAL for unrecognized or unimplemented attribute types.
432 */
433 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
f1334de6 434 void *data);
21f40209 435
ffb716f0
EH
436 /**
437 * @attrs_to_index:
438 *
439 * Return the IOMMU index to use for a given set of transaction attributes.
21f40209
PM
440 *
441 * Optional method: if an IOMMU only supports a single IOMMU index then
442 * the default implementation of memory_region_iommu_attrs_to_index()
443 * will return 0.
444 *
445 * The indexes supported by an IOMMU must be contiguous, starting at 0.
446 *
447 * @iommu: the IOMMUMemoryRegion
448 * @attrs: memory transaction attributes
449 */
450 int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
451
ffb716f0
EH
452 /**
453 * @num_indexes:
454 *
455 * Return the number of IOMMU indexes this IOMMU supports.
21f40209
PM
456 *
457 * Optional method: if this method is not provided, then
458 * memory_region_iommu_num_indexes() will return 1, indicating that
459 * only a single IOMMU index is supported.
460 *
461 * @iommu: the IOMMUMemoryRegion
462 */
463 int (*num_indexes)(IOMMUMemoryRegion *iommu);
457f8cbb
BB
464
465 /**
466 * @iommu_set_page_size_mask:
467 *
468 * Restrict the page size mask that can be supported with a given IOMMU
469 * memory region. Used for example to propagate host physical IOMMU page
470 * size mask limitations to the virtual IOMMU.
471 *
472 * Optional method: if this method is not provided, then the default global
473 * page mask is used.
474 *
475 * @iommu: the IOMMUMemoryRegion
476 *
477 * @page_size_mask: a bitmask of supported page sizes. At least one bit,
478 * representing the smallest page size, must be set. Additional set bits
479 * represent supported block sizes. For example a host physical IOMMU that
480 * uses page tables with a page size of 4kB, and supports 2MB and 4GB
481 * blocks, will set mask 0x40201000. A granule of 4kB with indiscriminate
482 * block sizes is specified with mask 0xfffffffffffff000.
483 *
484 * Returns 0 on success, or a negative error. In case of failure, the error
485 * object must be created.
486 */
487 int (*iommu_set_page_size_mask)(IOMMUMemoryRegion *iommu,
488 uint64_t page_size_mask,
489 Error **errp);
db1015e9 490};
30951157 491
8947d7fc
DH
492typedef struct RamDiscardListener RamDiscardListener;
493typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
494 MemoryRegionSection *section);
495typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
496 MemoryRegionSection *section);
497
498struct RamDiscardListener {
499 /*
500 * @notify_populate:
501 *
502 * Notification that previously discarded memory is about to get populated.
503 * Listeners are able to object. If any listener objects, already
504 * successfully notified listeners are notified about a discard again.
505 *
506 * @rdl: the #RamDiscardListener getting notified
507 * @section: the #MemoryRegionSection to get populated. The section
508 * is aligned within the memory region to the minimum granularity
509 * unless it would exceed the registered section.
510 *
511 * Returns 0 on success. If the notification is rejected by the listener,
512 * an error is returned.
513 */
514 NotifyRamPopulate notify_populate;
515
516 /*
517 * @notify_discard:
518 *
519 * Notification that previously populated memory was discarded successfully
520 * and listeners should drop all references to such memory and prevent
521 * new population (e.g., unmap).
522 *
523 * @rdl: the #RamDiscardListener getting notified
524 * @section: the #MemoryRegionSection to get populated. The section
525 * is aligned within the memory region to the minimum granularity
526 * unless it would exceed the registered section.
527 */
528 NotifyRamDiscard notify_discard;
529
530 /*
531 * @double_discard_supported:
532 *
533 * The listener suppors getting @notify_discard notifications that span
534 * already discarded parts.
535 */
536 bool double_discard_supported;
537
538 MemoryRegionSection *section;
539 QLIST_ENTRY(RamDiscardListener) next;
540};
541
542static inline void ram_discard_listener_init(RamDiscardListener *rdl,
543 NotifyRamPopulate populate_fn,
544 NotifyRamDiscard discard_fn,
545 bool double_discard_supported)
546{
547 rdl->notify_populate = populate_fn;
548 rdl->notify_discard = discard_fn;
549 rdl->double_discard_supported = double_discard_supported;
550}
551
552typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque);
adaf9d92 553typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque);
8947d7fc
DH
554
555/*
556 * RamDiscardManagerClass:
557 *
558 * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
559 * regions are currently populated to be used/accessed by the VM, notifying
560 * after parts were discarded (freeing up memory) and before parts will be
561 * populated (consuming memory), to be used/acessed by the VM.
562 *
563 * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
564 * #MemoryRegion isn't mapped yet; it cannot change while the #MemoryRegion is
565 * mapped.
566 *
567 * The #RamDiscardManager is intended to be used by technologies that are
568 * incompatible with discarding of RAM (e.g., VFIO, which may pin all
569 * memory inside a #MemoryRegion), and require proper coordination to only
570 * map the currently populated parts, to hinder parts that are expected to
571 * remain discarded from silently getting populated and consuming memory.
572 * Technologies that support discarding of RAM don't have to bother and can
573 * simply map the whole #MemoryRegion.
574 *
575 * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
576 * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
577 * Logically unplugging memory consists of discarding RAM. The VM agreed to not
578 * access unplugged (discarded) memory - especially via DMA. virtio-mem will
579 * properly coordinate with listeners before memory is plugged (populated),
580 * and after memory is unplugged (discarded).
581 *
582 * Listeners are called in multiples of the minimum granularity (unless it
583 * would exceed the registered range) and changes are aligned to the minimum
584 * granularity within the #MemoryRegion. Listeners have to prepare for memory
585 * becomming discarded in a different granularity than it was populated and the
586 * other way around.
587 */
588struct RamDiscardManagerClass {
589 /* private */
590 InterfaceClass parent_class;
591
592 /* public */
593
594 /**
595 * @get_min_granularity:
596 *
597 * Get the minimum granularity in which listeners will get notified
598 * about changes within the #MemoryRegion via the #RamDiscardManager.
599 *
600 * @rdm: the #RamDiscardManager
601 * @mr: the #MemoryRegion
602 *
603 * Returns the minimum granularity.
604 */
605 uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
606 const MemoryRegion *mr);
607
608 /**
609 * @is_populated:
610 *
611 * Check whether the given #MemoryRegionSection is completely populated
612 * (i.e., no parts are currently discarded) via the #RamDiscardManager.
613 * There are no alignment requirements.
614 *
615 * @rdm: the #RamDiscardManager
616 * @section: the #MemoryRegionSection
617 *
618 * Returns whether the given range is completely populated.
619 */
620 bool (*is_populated)(const RamDiscardManager *rdm,
621 const MemoryRegionSection *section);
622
623 /**
624 * @replay_populated:
625 *
626 * Call the #ReplayRamPopulate callback for all populated parts within the
627 * #MemoryRegionSection via the #RamDiscardManager.
628 *
629 * In case any call fails, no further calls are made.
630 *
631 * @rdm: the #RamDiscardManager
632 * @section: the #MemoryRegionSection
633 * @replay_fn: the #ReplayRamPopulate callback
634 * @opaque: pointer to forward to the callback
635 *
636 * Returns 0 on success, or a negative error if any notification failed.
637 */
638 int (*replay_populated)(const RamDiscardManager *rdm,
639 MemoryRegionSection *section,
640 ReplayRamPopulate replay_fn, void *opaque);
641
adaf9d92
DH
642 /**
643 * @replay_discarded:
644 *
645 * Call the #ReplayRamDiscard callback for all discarded parts within the
646 * #MemoryRegionSection via the #RamDiscardManager.
647 *
648 * @rdm: the #RamDiscardManager
649 * @section: the #MemoryRegionSection
650 * @replay_fn: the #ReplayRamDiscard callback
651 * @opaque: pointer to forward to the callback
652 */
653 void (*replay_discarded)(const RamDiscardManager *rdm,
654 MemoryRegionSection *section,
655 ReplayRamDiscard replay_fn, void *opaque);
656
8947d7fc
DH
657 /**
658 * @register_listener:
659 *
660 * Register a #RamDiscardListener for the given #MemoryRegionSection and
661 * immediately notify the #RamDiscardListener about all populated parts
662 * within the #MemoryRegionSection via the #RamDiscardManager.
663 *
664 * In case any notification fails, no further notifications are triggered
665 * and an error is logged.
666 *
667 * @rdm: the #RamDiscardManager
668 * @rdl: the #RamDiscardListener
669 * @section: the #MemoryRegionSection
670 */
671 void (*register_listener)(RamDiscardManager *rdm,
672 RamDiscardListener *rdl,
673 MemoryRegionSection *section);
674
675 /**
676 * @unregister_listener:
677 *
678 * Unregister a previously registered #RamDiscardListener via the
679 * #RamDiscardManager after notifying the #RamDiscardListener about all
680 * populated parts becoming unpopulated within the registered
681 * #MemoryRegionSection.
682 *
683 * @rdm: the #RamDiscardManager
684 * @rdl: the #RamDiscardListener
685 */
686 void (*unregister_listener)(RamDiscardManager *rdm,
687 RamDiscardListener *rdl);
688};
689
690uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
691 const MemoryRegion *mr);
692
693bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
694 const MemoryRegionSection *section);
695
696int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
697 MemoryRegionSection *section,
698 ReplayRamPopulate replay_fn,
699 void *opaque);
700
adaf9d92
DH
701void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
702 MemoryRegionSection *section,
703 ReplayRamDiscard replay_fn,
704 void *opaque);
705
8947d7fc
DH
706void ram_discard_manager_register_listener(RamDiscardManager *rdm,
707 RamDiscardListener *rdl,
708 MemoryRegionSection *section);
709
710void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
711 RamDiscardListener *rdl);
712
093bc2cd 713typedef struct CoalescedMemoryRange CoalescedMemoryRange;
3e9d69e7 714typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
093bc2cd 715
08226b44
PB
716/** MemoryRegion:
717 *
718 * A struct representing a memory region.
719 */
093bc2cd 720struct MemoryRegion {
b4fefef9 721 Object parent_obj;
a676854f 722
08226b44 723 /* private: */
a676854f
PB
724
725 /* The following fields should fit in a cache line */
726 bool romd_mode;
727 bool ram;
728 bool subpage;
729 bool readonly; /* For RAM regions */
c26763f8 730 bool nonvolatile;
a676854f
PB
731 bool rom_device;
732 bool flush_coalesced_mmio;
a676854f 733 uint8_t dirty_log_mask;
3df9d748 734 bool is_iommu;
58eaa217 735 RAMBlock *ram_block;
612263cf 736 Object *owner;
a676854f
PB
737
738 const MemoryRegionOps *ops;
093bc2cd 739 void *opaque;
feca4ac1 740 MemoryRegion *container;
5ead6218 741 int mapped_via_alias; /* Mapped via an alias, container might be NULL */
08dafab4 742 Int128 size;
a8170e5e 743 hwaddr addr;
545e92e0 744 void (*destructor)(MemoryRegion *mr);
a2b257d6 745 uint64_t align;
14a3c10a 746 bool terminates;
21e00fa5 747 bool ram_device;
6bba19ba 748 bool enabled;
1660e72d 749 bool warning_printed; /* For reservations */
deb809ed 750 uint8_t vga_logging_count;
093bc2cd 751 MemoryRegion *alias;
a8170e5e 752 hwaddr alias_offset;
d33382da 753 int32_t priority;
b58deb34 754 QTAILQ_HEAD(, MemoryRegion) subregions;
093bc2cd 755 QTAILQ_ENTRY(MemoryRegion) subregions_link;
b58deb34 756 QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
302fa283 757 const char *name;
3e9d69e7
AK
758 unsigned ioeventfd_nb;
759 MemoryRegionIoeventfd *ioeventfds;
8947d7fc 760 RamDiscardManager *rdm; /* Only for RAM */
3df9d748
AK
761};
762
763struct IOMMUMemoryRegion {
764 MemoryRegion parent_obj;
765
cdb30812 766 QLIST_HEAD(, IOMMUNotifier) iommu_notify;
5bf3d319 767 IOMMUNotifierFlag iommu_notify_flags;
093bc2cd
AK
768};
769
512fa408
PX
770#define IOMMU_NOTIFIER_FOREACH(n, mr) \
771 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
772
c2fc83e8 773/**
301302f0 774 * struct MemoryListener: callbacks structure for updates to the physical memory map
c2fc83e8
PB
775 *
776 * Allows a component to adjust to changes in the guest-visible memory map.
777 * Use with memory_listener_register() and memory_listener_unregister().
778 */
779struct MemoryListener {
5d248213
PB
780 /**
781 * @begin:
782 *
783 * Called at the beginning of an address space update transaction.
784 * Followed by calls to #MemoryListener.region_add(),
785 * #MemoryListener.region_del(), #MemoryListener.region_nop(),
786 * #MemoryListener.log_start() and #MemoryListener.log_stop() in
787 * increasing address order.
788 *
789 * @listener: The #MemoryListener.
790 */
c2fc83e8 791 void (*begin)(MemoryListener *listener);
5d248213
PB
792
793 /**
794 * @commit:
795 *
796 * Called at the end of an address space update transaction,
797 * after the last call to #MemoryListener.region_add(),
798 * #MemoryListener.region_del() or #MemoryListener.region_nop(),
799 * #MemoryListener.log_start() and #MemoryListener.log_stop().
800 *
801 * @listener: The #MemoryListener.
802 */
c2fc83e8 803 void (*commit)(MemoryListener *listener);
5d248213
PB
804
805 /**
806 * @region_add:
807 *
808 * Called during an address space update transaction,
809 * for a section of the address space that is new in this address space
810 * space since the last transaction.
811 *
812 * @listener: The #MemoryListener.
813 * @section: The new #MemoryRegionSection.
814 */
c2fc83e8 815 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
5d248213
PB
816
817 /**
818 * @region_del:
819 *
820 * Called during an address space update transaction,
821 * for a section of the address space that has disappeared in the address
822 * space since the last transaction.
823 *
824 * @listener: The #MemoryListener.
825 * @section: The old #MemoryRegionSection.
826 */
c2fc83e8 827 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
5d248213
PB
828
829 /**
830 * @region_nop:
831 *
832 * Called during an address space update transaction,
833 * for a section of the address space that is in the same place in the address
834 * space as in the last transaction.
835 *
836 * @listener: The #MemoryListener.
837 * @section: The #MemoryRegionSection.
838 */
c2fc83e8 839 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
5d248213
PB
840
841 /**
842 * @log_start:
843 *
844 * Called during an address space update transaction, after
d7878875 845 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
5d248213
PB
846 * #MemoryListener.region_nop(), if dirty memory logging clients have
847 * become active since the last transaction.
848 *
849 * @listener: The #MemoryListener.
850 * @section: The #MemoryRegionSection.
851 * @old: A bitmap of dirty memory logging clients that were active in
852 * the previous transaction.
853 * @new: A bitmap of dirty memory logging clients that are active in
854 * the current transaction.
855 */
b2dfd71c
PB
856 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
857 int old, int new);
5d248213
PB
858
859 /**
860 * @log_stop:
861 *
862 * Called during an address space update transaction, after
863 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
864 * #MemoryListener.region_nop() and possibly after
865 * #MemoryListener.log_start(), if dirty memory logging clients have
866 * become inactive since the last transaction.
867 *
868 * @listener: The #MemoryListener.
869 * @section: The #MemoryRegionSection.
870 * @old: A bitmap of dirty memory logging clients that were active in
871 * the previous transaction.
872 * @new: A bitmap of dirty memory logging clients that are active in
873 * the current transaction.
874 */
b2dfd71c
PB
875 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
876 int old, int new);
5d248213
PB
877
878 /**
879 * @log_sync:
880 *
881 * Called by memory_region_snapshot_and_clear_dirty() and
882 * memory_global_dirty_log_sync(), before accessing QEMU's "official"
883 * copy of the dirty memory bitmap for a #MemoryRegionSection.
884 *
885 * @listener: The #MemoryListener.
886 * @section: The #MemoryRegionSection.
887 */
c2fc83e8 888 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
5d248213 889
b87eaa9b
PX
890 /**
891 * @log_sync_global:
892 *
893 * This is the global version of @log_sync when the listener does
894 * not have a way to synchronize the log with finer granularity.
895 * When the listener registers with @log_sync_global defined, then
896 * its @log_sync must be NULL. Vice versa.
897 *
898 * @listener: The #MemoryListener.
899 */
900 void (*log_sync_global)(MemoryListener *listener);
901
5d248213
PB
902 /**
903 * @log_clear:
904 *
905 * Called before reading the dirty memory bitmap for a
906 * #MemoryRegionSection.
907 *
908 * @listener: The #MemoryListener.
909 * @section: The #MemoryRegionSection.
910 */
077874e0 911 void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
5d248213
PB
912
913 /**
914 * @log_global_start:
915 *
916 * Called by memory_global_dirty_log_start(), which
917 * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
918 * the address space. #MemoryListener.log_global_start() is also
919 * called when a #MemoryListener is added, if global dirty logging is
920 * active at that time.
921 *
922 * @listener: The #MemoryListener.
923 */
c2fc83e8 924 void (*log_global_start)(MemoryListener *listener);
5d248213
PB
925
926 /**
927 * @log_global_stop:
928 *
929 * Called by memory_global_dirty_log_stop(), which
930 * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
931 * the address space.
932 *
933 * @listener: The #MemoryListener.
934 */
c2fc83e8 935 void (*log_global_stop)(MemoryListener *listener);
5d248213
PB
936
937 /**
938 * @log_global_after_sync:
939 *
940 * Called after reading the dirty memory bitmap
941 * for any #MemoryRegionSection.
942 *
943 * @listener: The #MemoryListener.
944 */
9458a9a1 945 void (*log_global_after_sync)(MemoryListener *listener);
5d248213
PB
946
947 /**
948 * @eventfd_add:
949 *
950 * Called during an address space update transaction,
951 * for a section of the address space that has had a new ioeventfd
952 * registration since the last transaction.
953 *
954 * @listener: The #MemoryListener.
955 * @section: The new #MemoryRegionSection.
956 * @match_data: The @match_data parameter for the new ioeventfd.
957 * @data: The @data parameter for the new ioeventfd.
958 * @e: The #EventNotifier parameter for the new ioeventfd.
959 */
c2fc83e8
PB
960 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
961 bool match_data, uint64_t data, EventNotifier *e);
5d248213
PB
962
963 /**
964 * @eventfd_del:
965 *
966 * Called during an address space update transaction,
967 * for a section of the address space that has dropped an ioeventfd
968 * registration since the last transaction.
969 *
970 * @listener: The #MemoryListener.
971 * @section: The new #MemoryRegionSection.
972 * @match_data: The @match_data parameter for the dropped ioeventfd.
973 * @data: The @data parameter for the dropped ioeventfd.
974 * @e: The #EventNotifier parameter for the dropped ioeventfd.
975 */
c2fc83e8
PB
976 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
977 bool match_data, uint64_t data, EventNotifier *e);
5d248213
PB
978
979 /**
980 * @coalesced_io_add:
981 *
982 * Called during an address space update transaction,
983 * for a section of the address space that has had a new coalesced
984 * MMIO range registration since the last transaction.
985 *
986 * @listener: The #MemoryListener.
987 * @section: The new #MemoryRegionSection.
988 * @addr: The starting address for the coalesced MMIO range.
989 * @len: The length of the coalesced MMIO range.
990 */
e6d34aee 991 void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
c2fc83e8 992 hwaddr addr, hwaddr len);
5d248213
PB
993
994 /**
995 * @coalesced_io_del:
996 *
997 * Called during an address space update transaction,
998 * for a section of the address space that has dropped a coalesced
999 * MMIO range since the last transaction.
1000 *
1001 * @listener: The #MemoryListener.
1002 * @section: The new #MemoryRegionSection.
1003 * @addr: The starting address for the coalesced MMIO range.
1004 * @len: The length of the coalesced MMIO range.
1005 */
e6d34aee 1006 void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
c2fc83e8 1007 hwaddr addr, hwaddr len);
5d248213
PB
1008 /**
1009 * @priority:
1010 *
1011 * Govern the order in which memory listeners are invoked. Lower priorities
1012 * are invoked earlier for "add" or "start" callbacks, and later for "delete"
1013 * or "stop" callbacks.
1014 */
c2fc83e8 1015 unsigned priority;
5d248213 1016
142518bd
PX
1017 /**
1018 * @name:
1019 *
1020 * Name of the listener. It can be used in contexts where we'd like to
1021 * identify one memory listener with the rest.
1022 */
1023 const char *name;
1024
5d248213 1025 /* private: */
d45fa784 1026 AddressSpace *address_space;
c2fc83e8 1027 QTAILQ_ENTRY(MemoryListener) link;
9a54635d 1028 QTAILQ_ENTRY(MemoryListener) link_as;
c2fc83e8
PB
1029};
1030
9ad2bbc1 1031/**
301302f0 1032 * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
9ad2bbc1
AK
1033 */
1034struct AddressSpace {
08226b44 1035 /* private: */
374f2981 1036 struct rcu_head rcu;
7dca8043 1037 char *name;
9ad2bbc1 1038 MemoryRegion *root;
374f2981
PB
1039
1040 /* Accessed via RCU. */
9ad2bbc1 1041 struct FlatView *current_map;
374f2981 1042
9ad2bbc1
AK
1043 int ioeventfd_nb;
1044 struct MemoryRegionIoeventfd *ioeventfds;
eae3eb3e 1045 QTAILQ_HEAD(, MemoryListener) listeners;
0d673e36 1046 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
9ad2bbc1
AK
1047};
1048
785a507e
PB
1049typedef struct AddressSpaceDispatch AddressSpaceDispatch;
1050typedef struct FlatRange FlatRange;
1051
1052/* Flattened global view of current active memory hierarchy. Kept in sorted
1053 * order.
1054 */
1055struct FlatView {
1056 struct rcu_head rcu;
1057 unsigned ref;
1058 FlatRange *ranges;
1059 unsigned nr;
1060 unsigned nr_allocated;
1061 struct AddressSpaceDispatch *dispatch;
1062 MemoryRegion *root;
1063};
1064
1065static inline FlatView *address_space_to_flatview(AddressSpace *as)
1066{
d73415a3 1067 return qatomic_rcu_read(&as->current_map);
785a507e
PB
1068}
1069
a5e32ec1
PM
1070/**
1071 * typedef flatview_cb: callback for flatview_for_each_range()
1072 *
1073 * @start: start address of the range within the FlatView
1074 * @len: length of the range in bytes
1075 * @mr: MemoryRegion covering this range
b3566001 1076 * @offset_in_region: offset of the first byte of the range within @mr
a5e32ec1
PM
1077 * @opaque: data pointer passed to flatview_for_each_range()
1078 *
1079 * Returns: true to stop the iteration, false to keep going.
1080 */
d1e8cf77
PM
1081typedef bool (*flatview_cb)(Int128 start,
1082 Int128 len,
a5e32ec1 1083 const MemoryRegion *mr,
b3566001 1084 hwaddr offset_in_region,
a5e32ec1 1085 void *opaque);
fb5ef4ee 1086
a5e32ec1
PM
1087/**
1088 * flatview_for_each_range: Iterate through a FlatView
1089 * @fv: the FlatView to iterate through
1090 * @cb: function to call for each range
1091 * @opaque: opaque data pointer to pass to @cb
1092 *
1093 * A FlatView is made up of a list of non-overlapping ranges, each of
1094 * which is a slice of a MemoryRegion. This function iterates through
1095 * each range in @fv, calling @cb. The callback function can terminate
1096 * iteration early by returning 'true'.
1097 */
1098void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
16620684 1099
9366cf02
DDAG
1100static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
1101 MemoryRegionSection *b)
1102{
1103 return a->mr == b->mr &&
1104 a->fv == b->fv &&
1105 a->offset_within_region == b->offset_within_region &&
1106 a->offset_within_address_space == b->offset_within_address_space &&
1107 int128_eq(a->size, b->size) &&
1108 a->readonly == b->readonly &&
1109 a->nonvolatile == b->nonvolatile;
1110}
1111
22843838
DH
1112/**
1113 * memory_region_section_new_copy: Copy a memory region section
1114 *
1115 * Allocate memory for a new copy, copy the memory region section, and
1116 * properly take a reference on all relevant members.
1117 *
1118 * @s: the #MemoryRegionSection to copy
1119 */
1120MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
1121
1122/**
1123 * memory_region_section_new_copy: Free a copied memory region section
1124 *
1125 * Free a copy of a memory section created via memory_region_section_new_copy().
1126 * properly dropping references on all relevant members.
1127 *
1128 * @s: the #MemoryRegionSection to copy
1129 */
1130void memory_region_section_free_copy(MemoryRegionSection *s);
1131
093bc2cd
AK
1132/**
1133 * memory_region_init: Initialize a memory region
1134 *
69ddaf66 1135 * The region typically acts as a container for other memory regions. Use
093bc2cd
AK
1136 * memory_region_add_subregion() to add subregions.
1137 *
1138 * @mr: the #MemoryRegion to be initialized
2c9b15ca 1139 * @owner: the object that tracks the region's reference count
093bc2cd
AK
1140 * @name: used for debugging; not visible to the user or ABI
1141 * @size: size of the region; any subregions beyond this size will be clipped
1142 */
1143void memory_region_init(MemoryRegion *mr,
d32335e8 1144 Object *owner,
093bc2cd
AK
1145 const char *name,
1146 uint64_t size);
46637be2
PB
1147
1148/**
1149 * memory_region_ref: Add 1 to a memory region's reference count
1150 *
1151 * Whenever memory regions are accessed outside the BQL, they need to be
1152 * preserved against hot-unplug. MemoryRegions actually do not have their
1153 * own reference count; they piggyback on a QOM object, their "owner".
1154 * This function adds a reference to the owner.
1155 *
1156 * All MemoryRegions must have an owner if they can disappear, even if the
1157 * device they belong to operates exclusively under the BQL. This is because
1158 * the region could be returned at any time by memory_region_find, and this
1159 * is usually under guest control.
1160 *
1161 * @mr: the #MemoryRegion
1162 */
1163void memory_region_ref(MemoryRegion *mr);
1164
1165/**
1166 * memory_region_unref: Remove 1 to a memory region's reference count
1167 *
1168 * Whenever memory regions are accessed outside the BQL, they need to be
1169 * preserved against hot-unplug. MemoryRegions actually do not have their
1170 * own reference count; they piggyback on a QOM object, their "owner".
1171 * This function removes a reference to the owner and possibly destroys it.
1172 *
1173 * @mr: the #MemoryRegion
1174 */
1175void memory_region_unref(MemoryRegion *mr);
1176
093bc2cd
AK
1177/**
1178 * memory_region_init_io: Initialize an I/O memory region.
1179 *
69ddaf66 1180 * Accesses into the region will cause the callbacks in @ops to be called.
093bc2cd
AK
1181 * if @size is nonzero, subregions will be clipped to @size.
1182 *
1183 * @mr: the #MemoryRegion to be initialized.
2c9b15ca 1184 * @owner: the object that tracks the region's reference count
093bc2cd
AK
1185 * @ops: a structure containing read and write callbacks to be used when
1186 * I/O is performed on the region.
b6af0975 1187 * @opaque: passed to the read and write callbacks of the @ops structure.
093bc2cd
AK
1188 * @name: used for debugging; not visible to the user or ABI
1189 * @size: size of the region.
1190 */
1191void memory_region_init_io(MemoryRegion *mr,
d32335e8 1192 Object *owner,
093bc2cd
AK
1193 const MemoryRegionOps *ops,
1194 void *opaque,
1195 const char *name,
1196 uint64_t size);
1197
1198/**
1cfe48c1
PM
1199 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
1200 * into the region will modify memory
1201 * directly.
093bc2cd
AK
1202 *
1203 * @mr: the #MemoryRegion to be initialized.
2c9b15ca 1204 * @owner: the object that tracks the region's reference count
e8f5fe2d
DDAG
1205 * @name: Region name, becomes part of RAMBlock name used in migration stream
1206 * must be unique within any device
093bc2cd 1207 * @size: size of the region.
49946538 1208 * @errp: pointer to Error*, to store an error if it happens.
a5c0234b
PM
1209 *
1210 * Note that this function does not do anything to cause the data in the
1211 * RAM memory region to be migrated; that is the responsibility of the caller.
093bc2cd 1212 */
1cfe48c1 1213void memory_region_init_ram_nomigrate(MemoryRegion *mr,
d32335e8 1214 Object *owner,
1cfe48c1
PM
1215 const char *name,
1216 uint64_t size,
1217 Error **errp);
093bc2cd 1218
06329cce 1219/**
7f863cba
DH
1220 * memory_region_init_ram_flags_nomigrate: Initialize RAM memory region.
1221 * Accesses into the region will
1222 * modify memory directly.
06329cce
MA
1223 *
1224 * @mr: the #MemoryRegion to be initialized.
1225 * @owner: the object that tracks the region's reference count
1226 * @name: Region name, becomes part of RAMBlock name used in migration stream
1227 * must be unique within any device
1228 * @size: size of the region.
8dbe22c6 1229 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE.
06329cce
MA
1230 * @errp: pointer to Error*, to store an error if it happens.
1231 *
7f863cba
DH
1232 * Note that this function does not do anything to cause the data in the
1233 * RAM memory region to be migrated; that is the responsibility of the caller.
06329cce 1234 */
7f863cba
DH
1235void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
1236 Object *owner,
1237 const char *name,
1238 uint64_t size,
1239 uint32_t ram_flags,
1240 Error **errp);
06329cce 1241
60786ef3
MT
1242/**
1243 * memory_region_init_resizeable_ram: Initialize memory region with resizeable
1244 * RAM. Accesses into the region will
1245 * modify memory directly. Only an initial
1246 * portion of this RAM is actually used.
c7c0e724
DH
1247 * Changing the size while migrating
1248 * can result in the migration being
1249 * canceled.
60786ef3
MT
1250 *
1251 * @mr: the #MemoryRegion to be initialized.
1252 * @owner: the object that tracks the region's reference count
e8f5fe2d
DDAG
1253 * @name: Region name, becomes part of RAMBlock name used in migration stream
1254 * must be unique within any device
60786ef3
MT
1255 * @size: used size of the region.
1256 * @max_size: max size of the region.
1257 * @resized: callback to notify owner about used size change.
1258 * @errp: pointer to Error*, to store an error if it happens.
a5c0234b
PM
1259 *
1260 * Note that this function does not do anything to cause the data in the
1261 * RAM memory region to be migrated; that is the responsibility of the caller.
60786ef3
MT
1262 */
1263void memory_region_init_resizeable_ram(MemoryRegion *mr,
d32335e8 1264 Object *owner,
60786ef3
MT
1265 const char *name,
1266 uint64_t size,
1267 uint64_t max_size,
1268 void (*resized)(const char*,
1269 uint64_t length,
1270 void *host),
1271 Error **errp);
d5dbde46 1272#ifdef CONFIG_POSIX
cbfc0171 1273
0b183fc8
PB
1274/**
1275 * memory_region_init_ram_from_file: Initialize RAM memory region with a
1276 * mmap-ed backend.
1277 *
1278 * @mr: the #MemoryRegion to be initialized.
1279 * @owner: the object that tracks the region's reference count
e8f5fe2d
DDAG
1280 * @name: Region name, becomes part of RAMBlock name used in migration stream
1281 * must be unique within any device
0b183fc8 1282 * @size: size of the region.
98376843
HZ
1283 * @align: alignment of the region base address; if 0, the default alignment
1284 * (getpagesize()) will be used.
8dbe22c6
DH
1285 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
1286 * RAM_NORESERVE,
0b183fc8 1287 * @path: the path in which to allocate the RAM.
369d6dc4 1288 * @readonly: true to open @path for reading, false for read/write.
7f56e740 1289 * @errp: pointer to Error*, to store an error if it happens.
a5c0234b
PM
1290 *
1291 * Note that this function does not do anything to cause the data in the
1292 * RAM memory region to be migrated; that is the responsibility of the caller.
0b183fc8
PB
1293 */
1294void memory_region_init_ram_from_file(MemoryRegion *mr,
d32335e8 1295 Object *owner,
0b183fc8
PB
1296 const char *name,
1297 uint64_t size,
98376843 1298 uint64_t align,
cbfc0171 1299 uint32_t ram_flags,
7f56e740 1300 const char *path,
369d6dc4 1301 bool readonly,
7f56e740 1302 Error **errp);
fea617c5
MAL
1303
1304/**
1305 * memory_region_init_ram_from_fd: Initialize RAM memory region with a
1306 * mmap-ed backend.
1307 *
1308 * @mr: the #MemoryRegion to be initialized.
1309 * @owner: the object that tracks the region's reference count
1310 * @name: the name of the region.
1311 * @size: size of the region.
8dbe22c6 1312 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
56918a12 1313 * RAM_NORESERVE, RAM_PROTECTED.
fea617c5 1314 * @fd: the fd to mmap.
44a4ff31 1315 * @offset: offset within the file referenced by fd
fea617c5 1316 * @errp: pointer to Error*, to store an error if it happens.
a5c0234b
PM
1317 *
1318 * Note that this function does not do anything to cause the data in the
1319 * RAM memory region to be migrated; that is the responsibility of the caller.
fea617c5
MAL
1320 */
1321void memory_region_init_ram_from_fd(MemoryRegion *mr,
d32335e8 1322 Object *owner,
fea617c5
MAL
1323 const char *name,
1324 uint64_t size,
d5015b80 1325 uint32_t ram_flags,
fea617c5 1326 int fd,
44a4ff31 1327 ram_addr_t offset,
fea617c5 1328 Error **errp);
0b183fc8
PB
1329#endif
1330
093bc2cd 1331/**
1a7e8cae
BZ
1332 * memory_region_init_ram_ptr: Initialize RAM memory region from a
1333 * user-provided pointer. Accesses into the
1334 * region will modify memory directly.
093bc2cd
AK
1335 *
1336 * @mr: the #MemoryRegion to be initialized.
2c9b15ca 1337 * @owner: the object that tracks the region's reference count
e8f5fe2d
DDAG
1338 * @name: Region name, becomes part of RAMBlock name used in migration stream
1339 * must be unique within any device
093bc2cd
AK
1340 * @size: size of the region.
1341 * @ptr: memory to be mapped; must contain at least @size bytes.
a5c0234b
PM
1342 *
1343 * Note that this function does not do anything to cause the data in the
1344 * RAM memory region to be migrated; that is the responsibility of the caller.
093bc2cd
AK
1345 */
1346void memory_region_init_ram_ptr(MemoryRegion *mr,
d32335e8 1347 Object *owner,
093bc2cd
AK
1348 const char *name,
1349 uint64_t size,
1350 void *ptr);
1351
21e00fa5
AW
1352/**
1353 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
1354 * a user-provided pointer.
1355 *
1356 * A RAM device represents a mapping to a physical device, such as to a PCI
1357 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
1358 * into the VM address space and access to the region will modify memory
1359 * directly. However, the memory region should not be included in a memory
1360 * dump (device may not be enabled/mapped at the time of the dump), and
1361 * operations incompatible with manipulating MMIO should be avoided. Replaces
1362 * skip_dump flag.
1363 *
1364 * @mr: the #MemoryRegion to be initialized.
1365 * @owner: the object that tracks the region's reference count
1366 * @name: the name of the region.
1367 * @size: size of the region.
1368 * @ptr: memory to be mapped; must contain at least @size bytes.
a5c0234b
PM
1369 *
1370 * Note that this function does not do anything to cause the data in the
1371 * RAM memory region to be migrated; that is the responsibility of the caller.
1372 * (For RAM device memory regions, migrating the contents rarely makes sense.)
21e00fa5
AW
1373 */
1374void memory_region_init_ram_device_ptr(MemoryRegion *mr,
d32335e8 1375 Object *owner,
21e00fa5
AW
1376 const char *name,
1377 uint64_t size,
1378 void *ptr);
1379
093bc2cd
AK
1380/**
1381 * memory_region_init_alias: Initialize a memory region that aliases all or a
1382 * part of another memory region.
1383 *
1384 * @mr: the #MemoryRegion to be initialized.
2c9b15ca 1385 * @owner: the object that tracks the region's reference count
093bc2cd
AK
1386 * @name: used for debugging; not visible to the user or ABI
1387 * @orig: the region to be referenced; @mr will be equivalent to
1388 * @orig between @offset and @offset + @size - 1.
1389 * @offset: start of the section in @orig to be referenced.
1390 * @size: size of the region.
1391 */
1392void memory_region_init_alias(MemoryRegion *mr,
d32335e8 1393 Object *owner,
093bc2cd
AK
1394 const char *name,
1395 MemoryRegion *orig,
a8170e5e 1396 hwaddr offset,
093bc2cd 1397 uint64_t size);
d0a9b5bc 1398
a1777f7f 1399/**
b59821a9 1400 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
a1777f7f 1401 *
b59821a9 1402 * This has the same effect as calling memory_region_init_ram_nomigrate()
a1777f7f
PM
1403 * and then marking the resulting region read-only with
1404 * memory_region_set_readonly().
1405 *
b59821a9
PM
1406 * Note that this function does not do anything to cause the data in the
1407 * RAM side of the memory region to be migrated; that is the responsibility
1408 * of the caller.
1409 *
a1777f7f
PM
1410 * @mr: the #MemoryRegion to be initialized.
1411 * @owner: the object that tracks the region's reference count
e8f5fe2d
DDAG
1412 * @name: Region name, becomes part of RAMBlock name used in migration stream
1413 * must be unique within any device
a1777f7f
PM
1414 * @size: size of the region.
1415 * @errp: pointer to Error*, to store an error if it happens.
1416 */
b59821a9 1417void memory_region_init_rom_nomigrate(MemoryRegion *mr,
d32335e8 1418 Object *owner,
b59821a9
PM
1419 const char *name,
1420 uint64_t size,
1421 Error **errp);
a1777f7f 1422
d0a9b5bc 1423/**
b59821a9
PM
1424 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
1425 * Writes are handled via callbacks.
1426 *
1427 * Note that this function does not do anything to cause the data in the
1428 * RAM side of the memory region to be migrated; that is the responsibility
1429 * of the caller.
d0a9b5bc
AK
1430 *
1431 * @mr: the #MemoryRegion to be initialized.
2c9b15ca 1432 * @owner: the object that tracks the region's reference count
39e0b03d 1433 * @ops: callbacks for write access handling (must not be NULL).
57914ecb 1434 * @opaque: passed to the read and write callbacks of the @ops structure.
e8f5fe2d
DDAG
1435 * @name: Region name, becomes part of RAMBlock name used in migration stream
1436 * must be unique within any device
d0a9b5bc 1437 * @size: size of the region.
33e0eb52 1438 * @errp: pointer to Error*, to store an error if it happens.
d0a9b5bc 1439 */
b59821a9 1440void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
d32335e8 1441 Object *owner,
b59821a9
PM
1442 const MemoryRegionOps *ops,
1443 void *opaque,
1444 const char *name,
1445 uint64_t size,
1446 Error **errp);
d0a9b5bc 1447
30951157 1448/**
1221a474
AK
1449 * memory_region_init_iommu: Initialize a memory region of a custom type
1450 * that translates addresses
30951157
AK
1451 *
1452 * An IOMMU region translates addresses and forwards accesses to a target
1453 * memory region.
1454 *
2ce931d0
PM
1455 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
1456 * @_iommu_mr should be a pointer to enough memory for an instance of
1457 * that subclass, @instance_size is the size of that subclass, and
1458 * @mrtypename is its name. This function will initialize @_iommu_mr as an
1459 * instance of the subclass, and its methods will then be called to handle
1460 * accesses to the memory region. See the documentation of
1461 * #IOMMUMemoryRegionClass for further details.
1462 *
1221a474
AK
1463 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
1464 * @instance_size: the IOMMUMemoryRegion subclass instance size
57914ecb 1465 * @mrtypename: the type name of the #IOMMUMemoryRegion
2c9b15ca 1466 * @owner: the object that tracks the region's reference count
30951157
AK
1467 * @name: used for debugging; not visible to the user or ABI
1468 * @size: size of the region.
1469 */
1221a474
AK
1470void memory_region_init_iommu(void *_iommu_mr,
1471 size_t instance_size,
1472 const char *mrtypename,
1473 Object *owner,
30951157
AK
1474 const char *name,
1475 uint64_t size);
1476
b08199c6
PM
1477/**
1478 * memory_region_init_ram - Initialize RAM memory region. Accesses into the
1479 * region will modify memory directly.
1480 *
1481 * @mr: the #MemoryRegion to be initialized
1482 * @owner: the object that tracks the region's reference count (must be
1483 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
1484 * @name: name of the memory region
1485 * @size: size of the region in bytes
1486 * @errp: pointer to Error*, to store an error if it happens.
1487 *
1488 * This function allocates RAM for a board model or device, and
1489 * arranges for it to be migrated (by calling vmstate_register_ram()
1490 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1491 * @owner is NULL).
1492 *
1493 * TODO: Currently we restrict @owner to being either NULL (for
1494 * global RAM regions with no owner) or devices, so that we can
1495 * give the RAM block a unique name for migration purposes.
1496 * We should lift this restriction and allow arbitrary Objects.
1497 * If you pass a non-NULL non-device @owner then we will assert.
1498 */
1499void memory_region_init_ram(MemoryRegion *mr,
d32335e8 1500 Object *owner,
b08199c6
PM
1501 const char *name,
1502 uint64_t size,
1503 Error **errp);
1504
1505/**
1506 * memory_region_init_rom: Initialize a ROM memory region.
1507 *
1508 * This has the same effect as calling memory_region_init_ram()
1509 * and then marking the resulting region read-only with
1510 * memory_region_set_readonly(). This includes arranging for the
1511 * contents to be migrated.
1512 *
1513 * TODO: Currently we restrict @owner to being either NULL (for
1514 * global RAM regions with no owner) or devices, so that we can
1515 * give the RAM block a unique name for migration purposes.
1516 * We should lift this restriction and allow arbitrary Objects.
1517 * If you pass a non-NULL non-device @owner then we will assert.
1518 *
1519 * @mr: the #MemoryRegion to be initialized.
1520 * @owner: the object that tracks the region's reference count
1521 * @name: Region name, becomes part of RAMBlock name used in migration stream
1522 * must be unique within any device
1523 * @size: size of the region.
1524 * @errp: pointer to Error*, to store an error if it happens.
1525 */
1526void memory_region_init_rom(MemoryRegion *mr,
d32335e8 1527 Object *owner,
b08199c6
PM
1528 const char *name,
1529 uint64_t size,
1530 Error **errp);
1531
1532/**
1533 * memory_region_init_rom_device: Initialize a ROM memory region.
1534 * Writes are handled via callbacks.
1535 *
1536 * This function initializes a memory region backed by RAM for reads
1537 * and callbacks for writes, and arranges for the RAM backing to
1538 * be migrated (by calling vmstate_register_ram()
1539 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1540 * @owner is NULL).
1541 *
1542 * TODO: Currently we restrict @owner to being either NULL (for
1543 * global RAM regions with no owner) or devices, so that we can
1544 * give the RAM block a unique name for migration purposes.
1545 * We should lift this restriction and allow arbitrary Objects.
1546 * If you pass a non-NULL non-device @owner then we will assert.
1547 *
1548 * @mr: the #MemoryRegion to be initialized.
1549 * @owner: the object that tracks the region's reference count
1550 * @ops: callbacks for write access handling (must not be NULL).
5d248213 1551 * @opaque: passed to the read and write callbacks of the @ops structure.
b08199c6
PM
1552 * @name: Region name, becomes part of RAMBlock name used in migration stream
1553 * must be unique within any device
1554 * @size: size of the region.
1555 * @errp: pointer to Error*, to store an error if it happens.
1556 */
1557void memory_region_init_rom_device(MemoryRegion *mr,
d32335e8 1558 Object *owner,
b08199c6
PM
1559 const MemoryRegionOps *ops,
1560 void *opaque,
1561 const char *name,
1562 uint64_t size,
1563 Error **errp);
1564
1565
803c0816
PB
1566/**
1567 * memory_region_owner: get a memory region's owner.
1568 *
1569 * @mr: the memory region being queried.
1570 */
d32335e8 1571Object *memory_region_owner(MemoryRegion *mr);
803c0816 1572
093bc2cd
AK
1573/**
1574 * memory_region_size: get a memory region's size.
1575 *
1576 * @mr: the memory region being queried.
1577 */
1578uint64_t memory_region_size(MemoryRegion *mr);
1579
8ea9252a
AK
1580/**
1581 * memory_region_is_ram: check whether a memory region is random access
1582 *
847b31f0 1583 * Returns %true if a memory region is random access.
8ea9252a
AK
1584 *
1585 * @mr: the memory region being queried
1586 */
1619d1fe
PB
1587static inline bool memory_region_is_ram(MemoryRegion *mr)
1588{
1589 return mr->ram;
1590}
8ea9252a 1591
e4dc3f59 1592/**
21e00fa5 1593 * memory_region_is_ram_device: check whether a memory region is a ram device
e4dc3f59 1594 *
847b31f0 1595 * Returns %true if a memory region is a device backed ram region
e4dc3f59
ND
1596 *
1597 * @mr: the memory region being queried
1598 */
21e00fa5 1599bool memory_region_is_ram_device(MemoryRegion *mr);
e4dc3f59 1600
fd062573 1601/**
5f9a5ea1 1602 * memory_region_is_romd: check whether a memory region is in ROMD mode
fd062573 1603 *
5f9a5ea1 1604 * Returns %true if a memory region is a ROM device and currently set to allow
fd062573
BS
1605 * direct reads.
1606 *
1607 * @mr: the memory region being queried
1608 */
1609static inline bool memory_region_is_romd(MemoryRegion *mr)
1610{
5f9a5ea1 1611 return mr->rom_device && mr->romd_mode;
fd062573
BS
1612}
1613
56918a12
SC
1614/**
1615 * memory_region_is_protected: check whether a memory region is protected
1616 *
1617 * Returns %true if a memory region is protected RAM and cannot be accessed
1618 * via standard mechanisms, e.g. DMA.
1619 *
1620 * @mr: the memory region being queried
1621 */
1622bool memory_region_is_protected(MemoryRegion *mr);
1623
30951157 1624/**
3df9d748 1625 * memory_region_get_iommu: check whether a memory region is an iommu
30951157 1626 *
3df9d748
AK
1627 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
1628 * otherwise NULL.
30951157
AK
1629 *
1630 * @mr: the memory region being queried
1631 */
3df9d748 1632static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
1619d1fe 1633{
12d37882 1634 if (mr->alias) {
3df9d748
AK
1635 return memory_region_get_iommu(mr->alias);
1636 }
1637 if (mr->is_iommu) {
1638 return (IOMMUMemoryRegion *) mr;
12d37882 1639 }
3df9d748 1640 return NULL;
1619d1fe
PB
1641}
1642
1221a474
AK
1643/**
1644 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
1645 * if an iommu or NULL if not
1646 *
57914ecb
JZ
1647 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
1648 * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
1221a474 1649 *
5d248213 1650 * @iommu_mr: the memory region being queried
1221a474
AK
1651 */
1652static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
1653 IOMMUMemoryRegion *iommu_mr)
1654{
1655 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
1656}
1657
3df9d748 1658#define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
30951157 1659
f682e9c2
AK
1660/**
1661 * memory_region_iommu_get_min_page_size: get minimum supported page size
1662 * for an iommu
1663 *
1664 * Returns minimum supported page size for an iommu.
1665 *
3df9d748 1666 * @iommu_mr: the memory region being queried
f682e9c2 1667 */
3df9d748 1668uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
f682e9c2 1669
06866575
DG
1670/**
1671 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1672 *
cdb30812
PX
1673 * Note: for any IOMMU implementation, an in-place mapping change
1674 * should be notified with an UNMAP followed by a MAP.
1675 *
3df9d748 1676 * @iommu_mr: the memory region that was changed
cb1efcf4 1677 * @iommu_idx: the IOMMU index for the translation table which has changed
5039caf3
EP
1678 * @event: TLB event with the new entry in the IOMMU translation table.
1679 * The entry replaces all old entries for the same virtual I/O address
1680 * range.
06866575 1681 */
3df9d748 1682void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
cb1efcf4 1683 int iommu_idx,
5039caf3 1684 IOMMUTLBEvent event);
06866575 1685
bd2bfa4c 1686/**
3b5ebf85 1687 * memory_region_notify_iommu_one: notify a change in an IOMMU translation
bd2bfa4c
PX
1688 * entry to a single notifier
1689 *
1690 * This works just like memory_region_notify_iommu(), but it only
1691 * notifies a specific notifier, not all of them.
1692 *
1693 * @notifier: the notifier to be notified
5039caf3
EP
1694 * @event: TLB event with the new entry in the IOMMU translation table.
1695 * The entry replaces all old entries for the same virtual I/O address
1696 * range.
bd2bfa4c 1697 */
3b5ebf85 1698void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
5039caf3 1699 IOMMUTLBEvent *event);
bd2bfa4c 1700
06866575
DG
1701/**
1702 * memory_region_register_iommu_notifier: register a notifier for changes to
1703 * IOMMU translation entries.
1704 *
549d4005
EA
1705 * Returns 0 on success, or a negative errno otherwise. In particular,
1706 * -EINVAL indicates that at least one of the attributes of the notifier
1707 * is not supported (flag/range) by the IOMMU memory region. In case of error
1708 * the error object must be created.
1709 *
06866575 1710 * @mr: the memory region to observe
cdb30812
PX
1711 * @n: the IOMMUNotifier to be added; the notify callback receives a
1712 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1713 * ceases to be valid on exit from the notifier.
5d248213 1714 * @errp: pointer to Error*, to store an error if it happens.
06866575 1715 */
549d4005
EA
1716int memory_region_register_iommu_notifier(MemoryRegion *mr,
1717 IOMMUNotifier *n, Error **errp);
06866575 1718
a788f227
DG
1719/**
1720 * memory_region_iommu_replay: replay existing IOMMU translations to
f682e9c2
AK
1721 * a notifier with the minimum page granularity returned by
1722 * mr->iommu_ops->get_page_size().
a788f227 1723 *
2ce931d0
PM
1724 * Note: this is not related to record-and-replay functionality.
1725 *
3df9d748 1726 * @iommu_mr: the memory region to observe
a788f227 1727 * @n: the notifier to which to replay iommu mappings
a788f227 1728 */
3df9d748 1729void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
a788f227 1730
06866575
DG
1731/**
1732 * memory_region_unregister_iommu_notifier: unregister a notifier for
1733 * changes to IOMMU translation entries.
1734 *
d22d8956
AK
1735 * @mr: the memory region which was observed and for which notity_stopped()
1736 * needs to be called
06866575
DG
1737 * @n: the notifier to be removed.
1738 */
cdb30812
PX
1739void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1740 IOMMUNotifier *n);
06866575 1741
f1334de6
AK
1742/**
1743 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1744 * defined on the IOMMU.
1745 *
2ce931d0
PM
1746 * Returns 0 on success, or a negative errno otherwise. In particular,
1747 * -EINVAL indicates that the IOMMU does not support the requested
1748 * attribute.
f1334de6
AK
1749 *
1750 * @iommu_mr: the memory region
1751 * @attr: the requested attribute
1752 * @data: a pointer to the requested attribute data
1753 */
1754int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1755 enum IOMMUMemoryRegionAttr attr,
1756 void *data);
1757
21f40209
PM
1758/**
1759 * memory_region_iommu_attrs_to_index: return the IOMMU index to
1760 * use for translations with the given memory transaction attributes.
1761 *
1762 * @iommu_mr: the memory region
1763 * @attrs: the memory transaction attributes
1764 */
1765int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1766 MemTxAttrs attrs);
1767
1768/**
1769 * memory_region_iommu_num_indexes: return the total number of IOMMU
1770 * indexes that this IOMMU supports.
1771 *
1772 * @iommu_mr: the memory region
1773 */
1774int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
1775
457f8cbb
BB
1776/**
1777 * memory_region_iommu_set_page_size_mask: set the supported page
1778 * sizes for a given IOMMU memory region
1779 *
1780 * @iommu_mr: IOMMU memory region
1781 * @page_size_mask: supported page size mask
1782 * @errp: pointer to Error*, to store an error if it happens.
1783 */
1784int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
1785 uint64_t page_size_mask,
1786 Error **errp);
1787
8991c79b
AK
1788/**
1789 * memory_region_name: get a memory region's name
1790 *
1791 * Returns the string that was used to initialize the memory region.
1792 *
1793 * @mr: the memory region being queried
1794 */
5d546d4b 1795const char *memory_region_name(const MemoryRegion *mr);
8991c79b 1796
55043ba3
AK
1797/**
1798 * memory_region_is_logging: return whether a memory region is logging writes
1799 *
2d1a35be 1800 * Returns %true if the memory region is logging writes for the given client
55043ba3
AK
1801 *
1802 * @mr: the memory region being queried
2d1a35be 1803 * @client: the client being queried
55043ba3 1804 */
2d1a35be
PB
1805bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
1806
1807/**
1808 * memory_region_get_dirty_log_mask: return the clients for which a
1809 * memory region is logging writes.
1810 *
677e7805
PB
1811 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1812 * are the bit indices.
2d1a35be
PB
1813 *
1814 * @mr: the memory region being queried
1815 */
1816uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
55043ba3 1817
ce7923da
AK
1818/**
1819 * memory_region_is_rom: check whether a memory region is ROM
1820 *
847b31f0 1821 * Returns %true if a memory region is read-only memory.
ce7923da
AK
1822 *
1823 * @mr: the memory region being queried
1824 */
1619d1fe
PB
1825static inline bool memory_region_is_rom(MemoryRegion *mr)
1826{
1827 return mr->ram && mr->readonly;
1828}
1829
c26763f8
MAL
1830/**
1831 * memory_region_is_nonvolatile: check whether a memory region is non-volatile
1832 *
1833 * Returns %true is a memory region is non-volatile memory.
1834 *
1835 * @mr: the memory region being queried
1836 */
1837static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
1838{
1839 return mr->nonvolatile;
1840}
ce7923da 1841
a35ba7be
PB
1842/**
1843 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
1844 *
1845 * Returns a file descriptor backing a file-based RAM memory region,
1846 * or -1 if the region is not a file-based RAM memory region.
1847 *
1848 * @mr: the RAM or alias memory region being queried.
1849 */
1850int memory_region_get_fd(MemoryRegion *mr);
1851
07bdaa41
PB
1852/**
1853 * memory_region_from_host: Convert a pointer into a RAM memory region
1854 * and an offset within it.
1855 *
1856 * Given a host pointer inside a RAM memory region (created with
1857 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
1858 * the MemoryRegion and the offset within it.
1859 *
1860 * Use with care; by the time this function returns, the returned pointer is
1861 * not protected by RCU anymore. If the caller is not within an RCU critical
1862 * section and does not hold the iothread lock, it must have other means of
1863 * protecting the pointer, such as a reference to the region that includes
1864 * the incoming ram_addr_t.
1865 *
57914ecb
JZ
1866 * @ptr: the host pointer to be converted
1867 * @offset: the offset within memory region
07bdaa41
PB
1868 */
1869MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
1870
093bc2cd
AK
1871/**
1872 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1873 *
1874 * Returns a host pointer to a RAM memory region (created with
49b24afc
PB
1875 * memory_region_init_ram() or memory_region_init_ram_ptr()).
1876 *
1877 * Use with care; by the time this function returns, the returned pointer is
1878 * not protected by RCU anymore. If the caller is not within an RCU critical
1879 * section and does not hold the iothread lock, it must have other means of
1880 * protecting the pointer, such as a reference to the region that includes
1881 * the incoming ram_addr_t.
093bc2cd
AK
1882 *
1883 * @mr: the memory region being queried.
1884 */
1885void *memory_region_get_ram_ptr(MemoryRegion *mr);
1886
37d7c084
PB
1887/* memory_region_ram_resize: Resize a RAM region.
1888 *
c7c0e724
DH
1889 * Resizing RAM while migrating can result in the migration being canceled.
1890 * Care has to be taken if the guest might have already detected the memory.
37d7c084
PB
1891 *
1892 * @mr: a memory region created with @memory_region_init_resizeable_ram.
1893 * @newsize: the new size the region
1894 * @errp: pointer to Error*, to store an error if it happens.
1895 */
1896void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
1897 Error **errp);
9ecc996a
PMD
1898
1899/**
1900 * memory_region_msync: Synchronize selected address range of
1901 * a memory mapped region
1902 *
1903 * @mr: the memory region to be msync
1904 * @addr: the initial address of the range to be sync
1905 * @size: the size of the range to be sync
1906 */
1907void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
1908
61c490e2 1909/**
9ecc996a 1910 * memory_region_writeback: Trigger cache writeback for
5d248213 1911 * selected address range
61c490e2 1912 *
5d248213
PB
1913 * @mr: the memory region to be updated
1914 * @addr: the initial address of the range to be written back
1915 * @size: the size of the range to be written back
61c490e2 1916 */
4dfe59d1 1917void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
37d7c084 1918
093bc2cd
AK
1919/**
1920 * memory_region_set_log: Turn dirty logging on or off for a region.
1921 *
1922 * Turns dirty logging on or off for a specified client (display, migration).
1923 * Only meaningful for RAM regions.
1924 *
1925 * @mr: the memory region being updated.
1926 * @log: whether dirty logging is to be enabled or disabled.
dbddac6d 1927 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
093bc2cd
AK
1928 */
1929void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
1930
093bc2cd 1931/**
fd4aa979 1932 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
093bc2cd 1933 *
fd4aa979
BS
1934 * Marks a range of bytes as dirty, after it has been dirtied outside
1935 * guest code.
093bc2cd 1936 *
fd4aa979 1937 * @mr: the memory region being dirtied.
093bc2cd 1938 * @addr: the address (relative to the start of the region) being dirtied.
fd4aa979 1939 * @size: size of the range being dirtied.
093bc2cd 1940 */
a8170e5e
AK
1941void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1942 hwaddr size);
093bc2cd 1943
077874e0
PX
1944/**
1945 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
1946 *
1947 * This function is called when the caller wants to clear the remote
1948 * dirty bitmap of a memory range within the memory region. This can
1949 * be used by e.g. KVM to manually clear dirty log when
1950 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
1951 * kernel.
1952 *
1953 * @mr: the memory region to clear the dirty log upon
1954 * @start: start address offset within the memory region
1955 * @len: length of the memory region to clear dirty bitmap
1956 */
1957void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
1958 hwaddr len);
1959
8deaf12c
GH
1960/**
1961 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
1962 * bitmap and clear it.
1963 *
1964 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
1965 * returns the snapshot. The snapshot can then be used to query dirty
77302fb5
PB
1966 * status, using memory_region_snapshot_get_dirty. Snapshotting allows
1967 * querying the same page multiple times, which is especially useful for
1968 * display updates where the scanlines often are not page aligned.
8deaf12c
GH
1969 *
1970 * The dirty bitmap region which gets copyed into the snapshot (and
1971 * cleared afterwards) can be larger than requested. The boundaries
1972 * are rounded up/down so complete bitmap longs (covering 64 pages on
1973 * 64bit hosts) can be copied over into the bitmap snapshot. Which
1974 * isn't a problem for display updates as the extra pages are outside
1975 * the visible area, and in case the visible area changes a full
1976 * display redraw is due anyway. Should other use cases for this
1977 * function emerge we might have to revisit this implementation
1978 * detail.
1979 *
1980 * Use g_free to release DirtyBitmapSnapshot.
1981 *
1982 * @mr: the memory region being queried.
1983 * @addr: the address (relative to the start of the region) being queried.
1984 * @size: the size of the range being queried.
1985 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
1986 */
1987DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1988 hwaddr addr,
1989 hwaddr size,
1990 unsigned client);
1991
1992/**
1993 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
1994 * in the specified dirty bitmap snapshot.
1995 *
1996 * @mr: the memory region being queried.
1997 * @snap: the dirty bitmap snapshot
1998 * @addr: the address (relative to the start of the region) being queried.
1999 * @size: the size of the range being queried.
2000 */
2001bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
2002 DirtyBitmapSnapshot *snap,
2003 hwaddr addr, hwaddr size);
2004
093bc2cd
AK
2005/**
2006 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
2007 * client.
2008 *
2009 * Marks a range of pages as no longer dirty.
2010 *
2011 * @mr: the region being updated.
2012 * @addr: the start of the subrange being cleaned.
2013 * @size: the size of the subrange being cleaned.
2014 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
2015 * %DIRTY_MEMORY_VGA.
2016 */
a8170e5e
AK
2017void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2018 hwaddr size, unsigned client);
093bc2cd 2019
047be4ed
SH
2020/**
2021 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
2022 * TBs (for self-modifying code).
2023 *
2024 * The MemoryRegionOps->write() callback of a ROM device must use this function
2025 * to mark byte ranges that have been modified internally, such as by directly
2026 * accessing the memory returned by memory_region_get_ram_ptr().
2027 *
2028 * This function marks the range dirty and invalidates TBs so that TCG can
2029 * detect self-modifying code.
2030 *
2031 * @mr: the region being flushed.
2032 * @addr: the start, relative to the start of the region, of the range being
2033 * flushed.
2034 * @size: the size, in bytes, of the range being flushed.
2035 */
2036void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
2037
093bc2cd
AK
2038/**
2039 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
2040 *
2041 * Allows a memory region to be marked as read-only (turning it into a ROM).
2042 * only useful on RAM regions.
2043 *
2044 * @mr: the region being updated.
2045 * @readonly: whether rhe region is to be ROM or RAM.
2046 */
2047void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
2048
c26763f8
MAL
2049/**
2050 * memory_region_set_nonvolatile: Turn a memory region non-volatile
2051 *
2052 * Allows a memory region to be marked as non-volatile.
2053 * only useful on RAM regions.
2054 *
2055 * @mr: the region being updated.
2056 * @nonvolatile: whether rhe region is to be non-volatile.
2057 */
2058void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
2059
d0a9b5bc 2060/**
5f9a5ea1 2061 * memory_region_rom_device_set_romd: enable/disable ROMD mode
d0a9b5bc
AK
2062 *
2063 * Allows a ROM device (initialized with memory_region_init_rom_device() to
5f9a5ea1
JK
2064 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
2065 * device is mapped to guest memory and satisfies read access directly.
2066 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
2067 * Writes are always handled by the #MemoryRegion.write function.
d0a9b5bc
AK
2068 *
2069 * @mr: the memory region to be updated
5f9a5ea1 2070 * @romd_mode: %true to put the region into ROMD mode
d0a9b5bc 2071 */
5f9a5ea1 2072void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
d0a9b5bc 2073
093bc2cd
AK
2074/**
2075 * memory_region_set_coalescing: Enable memory coalescing for the region.
2076 *
2077 * Enabled writes to a region to be queued for later processing. MMIO ->write
2078 * callbacks may be delayed until a non-coalesced MMIO is issued.
2079 * Only useful for IO regions. Roughly similar to write-combining hardware.
2080 *
2081 * @mr: the memory region to be write coalesced
2082 */
2083void memory_region_set_coalescing(MemoryRegion *mr);
2084
2085/**
2086 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
2087 * a region.
2088 *
2089 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
2090 * Multiple calls can be issued coalesced disjoint ranges.
2091 *
2092 * @mr: the memory region to be updated.
2093 * @offset: the start of the range within the region to be coalesced.
2094 * @size: the size of the subrange to be coalesced.
2095 */
2096void memory_region_add_coalescing(MemoryRegion *mr,
a8170e5e 2097 hwaddr offset,
093bc2cd
AK
2098 uint64_t size);
2099
2100/**
2101 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
2102 *
2103 * Disables any coalescing caused by memory_region_set_coalescing() or
2104 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
2105 * hardware.
2106 *
2107 * @mr: the memory region to be updated.
2108 */
2109void memory_region_clear_coalescing(MemoryRegion *mr);
2110
d410515e
JK
2111/**
2112 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
2113 * accesses.
2114 *
2115 * Ensure that pending coalesced MMIO request are flushed before the memory
2116 * region is accessed. This property is automatically enabled for all regions
2117 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
2118 *
2119 * @mr: the memory region to be updated.
2120 */
2121void memory_region_set_flush_coalesced(MemoryRegion *mr);
2122
2123/**
2124 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
2125 * accesses.
2126 *
2127 * Clear the automatic coalesced MMIO flushing enabled via
2128 * memory_region_set_flush_coalesced. Note that this service has no effect on
2129 * memory regions that have MMIO coalescing enabled for themselves. For them,
2130 * automatic flushing will stop once coalescing is disabled.
2131 *
2132 * @mr: the memory region to be updated.
2133 */
2134void memory_region_clear_flush_coalesced(MemoryRegion *mr);
2135
3e9d69e7
AK
2136/**
2137 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
2138 * is written to a location.
2139 *
2140 * Marks a word in an IO region (initialized with memory_region_init_io())
2141 * as a trigger for an eventfd event. The I/O callback will not be called.
69ddaf66 2142 * The caller must be prepared to handle failure (that is, take the required
3e9d69e7
AK
2143 * action if the callback _is_ called).
2144 *
2145 * @mr: the memory region being updated.
2146 * @addr: the address within @mr that is to be monitored
2147 * @size: the size of the access to trigger the eventfd
2148 * @match_data: whether to match against @data, instead of just @addr
2149 * @data: the data to match against the guest write
57914ecb 2150 * @e: event notifier to be triggered when @addr, @size, and @data all match.
3e9d69e7
AK
2151 **/
2152void memory_region_add_eventfd(MemoryRegion *mr,
a8170e5e 2153 hwaddr addr,
3e9d69e7
AK
2154 unsigned size,
2155 bool match_data,
2156 uint64_t data,
753d5e14 2157 EventNotifier *e);
3e9d69e7
AK
2158
2159/**
69ddaf66 2160 * memory_region_del_eventfd: Cancel an eventfd.
3e9d69e7 2161 *
69ddaf66
ASRJ
2162 * Cancels an eventfd trigger requested by a previous
2163 * memory_region_add_eventfd() call.
3e9d69e7
AK
2164 *
2165 * @mr: the memory region being updated.
2166 * @addr: the address within @mr that is to be monitored
2167 * @size: the size of the access to trigger the eventfd
2168 * @match_data: whether to match against @data, instead of just @addr
2169 * @data: the data to match against the guest write
57914ecb 2170 * @e: event notifier to be triggered when @addr, @size, and @data all match.
3e9d69e7
AK
2171 */
2172void memory_region_del_eventfd(MemoryRegion *mr,
a8170e5e 2173 hwaddr addr,
3e9d69e7
AK
2174 unsigned size,
2175 bool match_data,
2176 uint64_t data,
753d5e14
PB
2177 EventNotifier *e);
2178
093bc2cd 2179/**
69ddaf66 2180 * memory_region_add_subregion: Add a subregion to a container.
093bc2cd 2181 *
69ddaf66 2182 * Adds a subregion at @offset. The subregion may not overlap with other
093bc2cd
AK
2183 * subregions (except for those explicitly marked as overlapping). A region
2184 * may only be added once as a subregion (unless removed with
2185 * memory_region_del_subregion()); use memory_region_init_alias() if you
2186 * want a region to be a subregion in multiple locations.
2187 *
2188 * @mr: the region to contain the new subregion; must be a container
2189 * initialized with memory_region_init().
2190 * @offset: the offset relative to @mr where @subregion is added.
2191 * @subregion: the subregion to be added.
2192 */
2193void memory_region_add_subregion(MemoryRegion *mr,
a8170e5e 2194 hwaddr offset,
093bc2cd
AK
2195 MemoryRegion *subregion);
2196/**
1a7e8cae
BZ
2197 * memory_region_add_subregion_overlap: Add a subregion to a container
2198 * with overlap.
093bc2cd 2199 *
69ddaf66 2200 * Adds a subregion at @offset. The subregion may overlap with other
093bc2cd
AK
2201 * subregions. Conflicts are resolved by having a higher @priority hide a
2202 * lower @priority. Subregions without priority are taken as @priority 0.
2203 * A region may only be added once as a subregion (unless removed with
2204 * memory_region_del_subregion()); use memory_region_init_alias() if you
2205 * want a region to be a subregion in multiple locations.
2206 *
2207 * @mr: the region to contain the new subregion; must be a container
2208 * initialized with memory_region_init().
2209 * @offset: the offset relative to @mr where @subregion is added.
2210 * @subregion: the subregion to be added.
2211 * @priority: used for resolving overlaps; highest priority wins.
2212 */
2213void memory_region_add_subregion_overlap(MemoryRegion *mr,
a8170e5e 2214 hwaddr offset,
093bc2cd 2215 MemoryRegion *subregion,
a1ff8ae0 2216 int priority);
e34911c4
AK
2217
2218/**
2219 * memory_region_get_ram_addr: Get the ram address associated with a memory
2220 * region
5d248213
PB
2221 *
2222 * @mr: the region to be queried
e34911c4 2223 */
7ebb2745 2224ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
e34911c4 2225
a2b257d6 2226uint64_t memory_region_get_alignment(const MemoryRegion *mr);
093bc2cd
AK
2227/**
2228 * memory_region_del_subregion: Remove a subregion.
2229 *
2230 * Removes a subregion from its container.
2231 *
2232 * @mr: the container to be updated.
2233 * @subregion: the region being removed; must be a current subregion of @mr.
2234 */
2235void memory_region_del_subregion(MemoryRegion *mr,
2236 MemoryRegion *subregion);
2237
6bba19ba
AK
2238/*
2239 * memory_region_set_enabled: dynamically enable or disable a region
2240 *
2241 * Enables or disables a memory region. A disabled memory region
2242 * ignores all accesses to itself and its subregions. It does not
2243 * obscure sibling subregions with lower priority - it simply behaves as
2244 * if it was removed from the hierarchy.
2245 *
2246 * Regions default to being enabled.
2247 *
2248 * @mr: the region to be updated
2249 * @enabled: whether to enable or disable the region
2250 */
2251void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
2252
2282e1af
AK
2253/*
2254 * memory_region_set_address: dynamically update the address of a region
2255 *
feca4ac1 2256 * Dynamically updates the address of a region, relative to its container.
2282e1af
AK
2257 * May be used on regions are currently part of a memory hierarchy.
2258 *
2259 * @mr: the region to be updated
feca4ac1 2260 * @addr: new address, relative to container region
2282e1af 2261 */
a8170e5e 2262void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
2282e1af 2263
e7af4c67
MT
2264/*
2265 * memory_region_set_size: dynamically update the size of a region.
2266 *
2267 * Dynamically updates the size of a region.
2268 *
2269 * @mr: the region to be updated
2270 * @size: used size of the region.
2271 */
2272void memory_region_set_size(MemoryRegion *mr, uint64_t size);
2273
4703359e
AK
2274/*
2275 * memory_region_set_alias_offset: dynamically update a memory alias's offset
2276 *
2277 * Dynamically updates the offset into the target region that an alias points
2278 * to, as if the fourth argument to memory_region_init_alias() has changed.
2279 *
2280 * @mr: the #MemoryRegion to be updated; should be an alias.
2281 * @offset: the new offset into the target memory region
2282 */
2283void memory_region_set_alias_offset(MemoryRegion *mr,
a8170e5e 2284 hwaddr offset);
4703359e 2285
3ce10901 2286/**
feca4ac1
PB
2287 * memory_region_present: checks if an address relative to a @container
2288 * translates into #MemoryRegion within @container
3ce10901 2289 *
feca4ac1 2290 * Answer whether a #MemoryRegion within @container covers the address
3ce10901
PB
2291 * @addr.
2292 *
feca4ac1
PB
2293 * @container: a #MemoryRegion within which @addr is a relative address
2294 * @addr: the area within @container to be searched
3ce10901 2295 */
feca4ac1 2296bool memory_region_present(MemoryRegion *container, hwaddr addr);
3ce10901 2297
eed2bacf
IM
2298/**
2299 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
455faf03
DH
2300 * into another memory region, which does not necessarily imply that it is
2301 * mapped into an address space.
eed2bacf
IM
2302 *
2303 * @mr: a #MemoryRegion which should be checked if it's mapped
2304 */
2305bool memory_region_is_mapped(MemoryRegion *mr);
2306
8947d7fc
DH
2307/**
2308 * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
2309 * #MemoryRegion
2310 *
2311 * The #RamDiscardManager cannot change while a memory region is mapped.
2312 *
2313 * @mr: the #MemoryRegion
2314 */
2315RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
2316
2317/**
2318 * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
2319 * #RamDiscardManager assigned
2320 *
2321 * @mr: the #MemoryRegion
2322 */
2323static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
2324{
2325 return !!memory_region_get_ram_discard_manager(mr);
2326}
2327
2328/**
2329 * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
2330 * #MemoryRegion
2331 *
2332 * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
2333 * that does not cover RAM, or a #MemoryRegion that already has a
2334 * #RamDiscardManager assigned.
2335 *
2336 * @mr: the #MemoryRegion
2337 * @rdm: #RamDiscardManager to set
2338 */
2339void memory_region_set_ram_discard_manager(MemoryRegion *mr,
2340 RamDiscardManager *rdm);
2341
e2177955 2342/**
73034e9e
PB
2343 * memory_region_find: translate an address/size relative to a
2344 * MemoryRegion into a #MemoryRegionSection.
e2177955 2345 *
73034e9e
PB
2346 * Locates the first #MemoryRegion within @mr that overlaps the range
2347 * given by @addr and @size.
e2177955
AK
2348 *
2349 * Returns a #MemoryRegionSection that describes a contiguous overlap.
2350 * It will have the following characteristics:
08226b44
PB
2351 * - @size = 0 iff no overlap was found
2352 * - @mr is non-%NULL iff an overlap was found
e2177955 2353 *
73034e9e
PB
2354 * Remember that in the return value the @offset_within_region is
2355 * relative to the returned region (in the .@mr field), not to the
2356 * @mr argument.
2357 *
2358 * Similarly, the .@offset_within_address_space is relative to the
2359 * address space that contains both regions, the passed and the
2360 * returned one. However, in the special case where the @mr argument
feca4ac1 2361 * has no container (and thus is the root of the address space), the
73034e9e 2362 * following will hold:
08226b44
PB
2363 * - @offset_within_address_space >= @addr
2364 * - @offset_within_address_space + .@size <= @addr + @size
73034e9e
PB
2365 *
2366 * @mr: a MemoryRegion within which @addr is a relative address
2367 * @addr: start of the area within @as to be searched
e2177955
AK
2368 * @size: size of the area to be searched
2369 */
73034e9e 2370MemoryRegionSection memory_region_find(MemoryRegion *mr,
a8170e5e 2371 hwaddr addr, uint64_t size);
e2177955 2372
86e775c6 2373/**
9c1f8f44 2374 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
86e775c6 2375 *
9c1f8f44 2376 * Synchronizes the dirty page log for all address spaces.
86e775c6 2377 */
9c1f8f44 2378void memory_global_dirty_log_sync(void);
9458a9a1
PB
2379
2380/**
2381 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2382 *
2383 * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
2384 * This function must be called after the dirty log bitmap is cleared, and
2385 * before dirty guest memory pages are read. If you are using
2386 * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
2387 * care of doing this.
2388 */
2389void memory_global_after_dirty_log_sync(void);
86e775c6 2390
69ddaf66
ASRJ
2391/**
2392 * memory_region_transaction_begin: Start a transaction.
2393 *
2394 * During a transaction, changes will be accumulated and made visible
dabdf394 2395 * only when the transaction ends (is committed).
4ef4db86
AK
2396 */
2397void memory_region_transaction_begin(void);
69ddaf66
ASRJ
2398
2399/**
2400 * memory_region_transaction_commit: Commit a transaction and make changes
2401 * visible to the guest.
4ef4db86
AK
2402 */
2403void memory_region_transaction_commit(void);
2404
7664e80c
AK
2405/**
2406 * memory_listener_register: register callbacks to be called when memory
2407 * sections are mapped or unmapped into an address
2408 * space
2409 *
2410 * @listener: an object containing the callbacks to be called
7376e582 2411 * @filter: if non-%NULL, only regions in this address space will be observed
7664e80c 2412 */
f6790af6 2413void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
7664e80c
AK
2414
2415/**
2416 * memory_listener_unregister: undo the effect of memory_listener_register()
2417 *
2418 * @listener: an object containing the callbacks to be removed
2419 */
2420void memory_listener_unregister(MemoryListener *listener);
2421
2422/**
2423 * memory_global_dirty_log_start: begin dirty logging for all regions
63b41db4
HH
2424 *
2425 * @flags: purpose of starting dirty log, migration or dirty rate
7664e80c 2426 */
63b41db4 2427void memory_global_dirty_log_start(unsigned int flags);
7664e80c
AK
2428
2429/**
1a7e8cae 2430 * memory_global_dirty_log_stop: end dirty logging for all regions
63b41db4
HH
2431 *
2432 * @flags: purpose of stopping dirty log, migration or dirty rate
7664e80c 2433 */
63b41db4 2434void memory_global_dirty_log_stop(unsigned int flags);
7664e80c 2435
2261d393 2436void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
314e2987 2437
3b643495
PM
2438/**
2439 * memory_region_dispatch_read: perform a read directly to the specified
2440 * MemoryRegion.
2441 *
2442 * @mr: #MemoryRegion to access
2443 * @addr: address within that region
2444 * @pval: pointer to uint64_t which the data is written to
e67c9046 2445 * @op: size, sign, and endianness of the memory operation
3b643495
PM
2446 * @attrs: memory transaction attributes to use for the access
2447 */
2448MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
2449 hwaddr addr,
2450 uint64_t *pval,
e67c9046 2451 MemOp op,
3b643495
PM
2452 MemTxAttrs attrs);
2453/**
2454 * memory_region_dispatch_write: perform a write directly to the specified
2455 * MemoryRegion.
2456 *
2457 * @mr: #MemoryRegion to access
2458 * @addr: address within that region
2459 * @data: data to write
e67c9046 2460 * @op: size, sign, and endianness of the memory operation
3b643495
PM
2461 * @attrs: memory transaction attributes to use for the access
2462 */
2463MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
2464 hwaddr addr,
2465 uint64_t data,
e67c9046 2466 MemOp op,
3b643495
PM
2467 MemTxAttrs attrs);
2468
9ad2bbc1
AK
2469/**
2470 * address_space_init: initializes an address space
2471 *
2472 * @as: an uninitialized #AddressSpace
67cc32eb 2473 * @root: a #MemoryRegion that routes addresses for the address space
7dca8043
AK
2474 * @name: an address space name. The name is only used for debugging
2475 * output.
9ad2bbc1 2476 */
7dca8043 2477void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
9ad2bbc1 2478
83f3c251
AK
2479/**
2480 * address_space_destroy: destroy an address space
2481 *
2482 * Releases all resources associated with an address space. After an address space
2483 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
2484 * as well.
2485 *
2486 * @as: address space to be destroyed
2487 */
2488void address_space_destroy(AddressSpace *as);
2489
a2166410
GK
2490/**
2491 * address_space_remove_listeners: unregister all listeners of an address space
2492 *
2493 * Removes all callbacks previously registered with memory_listener_register()
2494 * for @as.
2495 *
2496 * @as: an initialized #AddressSpace
2497 */
2498void address_space_remove_listeners(AddressSpace *as);
2499
ac1970fb
AK
2500/**
2501 * address_space_rw: read from or write to an address space.
2502 *
5c9eb028
PM
2503 * Return a MemTxResult indicating whether the operation succeeded
2504 * or failed (eg unassigned memory, device rejected the transaction,
2505 * IOMMU fault).
fd8aaa76 2506 *
ac1970fb
AK
2507 * @as: #AddressSpace to be accessed
2508 * @addr: address within that address space
5c9eb028 2509 * @attrs: memory transaction attributes
ac1970fb 2510 * @buf: buffer with the data transferred
57914ecb 2511 * @len: the number of bytes to read or write
ac1970fb
AK
2512 * @is_write: indicates the transfer direction
2513 */
5c9eb028 2514MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
daa3dda4 2515 MemTxAttrs attrs, void *buf,
0c249ff7 2516 hwaddr len, bool is_write);
ac1970fb
AK
2517
2518/**
2519 * address_space_write: write to address space.
2520 *
5c9eb028
PM
2521 * Return a MemTxResult indicating whether the operation succeeded
2522 * or failed (eg unassigned memory, device rejected the transaction,
2523 * IOMMU fault).
fd8aaa76 2524 *
ac1970fb
AK
2525 * @as: #AddressSpace to be accessed
2526 * @addr: address within that address space
5c9eb028 2527 * @attrs: memory transaction attributes
ac1970fb 2528 * @buf: buffer with the data transferred
57914ecb 2529 * @len: the number of bytes to write
ac1970fb 2530 */
5c9eb028
PM
2531MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
2532 MemTxAttrs attrs,
daa3dda4 2533 const void *buf, hwaddr len);
ac1970fb 2534
3c8133f9
PM
2535/**
2536 * address_space_write_rom: write to address space, including ROM.
2537 *
2538 * This function writes to the specified address space, but will
2539 * write data to both ROM and RAM. This is used for non-guest
2540 * writes like writes from the gdb debug stub or initial loading
2541 * of ROM contents.
2542 *
2543 * Note that portions of the write which attempt to write data to
2544 * a device will be silently ignored -- only real RAM and ROM will
2545 * be written to.
2546 *
2547 * Return a MemTxResult indicating whether the operation succeeded
2548 * or failed (eg unassigned memory, device rejected the transaction,
2549 * IOMMU fault).
2550 *
2551 * @as: #AddressSpace to be accessed
2552 * @addr: address within that address space
2553 * @attrs: memory transaction attributes
2554 * @buf: buffer with the data transferred
2555 * @len: the number of bytes to write
2556 */
2557MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
2558 MemTxAttrs attrs,
daa3dda4 2559 const void *buf, hwaddr len);
3c8133f9 2560
3cc8f884 2561/* address_space_ld*: load from an address space
50013115
PM
2562 * address_space_st*: store to an address space
2563 *
2564 * These functions perform a load or store of the byte, word,
2565 * longword or quad to the specified address within the AddressSpace.
2566 * The _le suffixed functions treat the data as little endian;
2567 * _be indicates big endian; no suffix indicates "same endianness
2568 * as guest CPU".
2569 *
2570 * The "guest CPU endianness" accessors are deprecated for use outside
2571 * target-* code; devices should be CPU-agnostic and use either the LE
2572 * or the BE accessors.
2573 *
2574 * @as #AddressSpace to be accessed
2575 * @addr: address within that address space
2576 * @val: data value, for stores
2577 * @attrs: memory transaction attributes
2578 * @result: location to write the success/failure of the transaction;
2579 * if NULL, this information is discarded
2580 */
4269c82b
PB
2581
2582#define SUFFIX
2583#define ARG1 as
2584#define ARG1_DECL AddressSpace *as
0979ed01 2585#include "exec/memory_ldst.h.inc"
4269c82b
PB
2586
2587#define SUFFIX
2588#define ARG1 as
2589#define ARG1_DECL AddressSpace *as
0979ed01 2590#include "exec/memory_ldst_phys.h.inc"
0ce265ff 2591
1f4e496e 2592struct MemoryRegionCache {
48564041 2593 void *ptr;
1f4e496e 2594 hwaddr xlat;
1f4e496e 2595 hwaddr len;
48564041
PB
2596 FlatView *fv;
2597 MemoryRegionSection mrs;
2598 bool is_write;
1f4e496e
PB
2599};
2600
48564041
PB
2601#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
2602
5eba0404 2603
4269c82b
PB
2604/* address_space_ld*_cached: load from a cached #MemoryRegion
2605 * address_space_st*_cached: store into a cached #MemoryRegion
2606 *
2607 * These functions perform a load or store of the byte, word,
2608 * longword or quad to the specified address. The address is
2609 * a physical address in the AddressSpace, but it must lie within
2610 * a #MemoryRegion that was mapped with address_space_cache_init.
2611 *
2612 * The _le suffixed functions treat the data as little endian;
2613 * _be indicates big endian; no suffix indicates "same endianness
2614 * as guest CPU".
2615 *
2616 * The "guest CPU endianness" accessors are deprecated for use outside
2617 * target-* code; devices should be CPU-agnostic and use either the LE
2618 * or the BE accessors.
2619 *
2620 * @cache: previously initialized #MemoryRegionCache to be accessed
2621 * @addr: address within the address space
2622 * @val: data value, for stores
2623 * @attrs: memory transaction attributes
2624 * @result: location to write the success/failure of the transaction;
2625 * if NULL, this information is discarded
2626 */
2627
48564041 2628#define SUFFIX _cached_slow
4269c82b
PB
2629#define ARG1 cache
2630#define ARG1_DECL MemoryRegionCache *cache
0979ed01 2631#include "exec/memory_ldst.h.inc"
4269c82b 2632
48564041
PB
2633/* Inline fast path for direct RAM access. */
2634static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
2635 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
2636{
2637 assert(addr < cache->len);
2638 if (likely(cache->ptr)) {
2639 return ldub_p(cache->ptr + addr);
2640 } else {
2641 return address_space_ldub_cached_slow(cache, addr, attrs, result);
2642 }
2643}
2644
2645static inline void address_space_stb_cached(MemoryRegionCache *cache,
4121f4b3 2646 hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
48564041
PB
2647{
2648 assert(addr < cache->len);
2649 if (likely(cache->ptr)) {
2650 stb_p(cache->ptr + addr, val);
2651 } else {
2652 address_space_stb_cached_slow(cache, addr, val, attrs, result);
2653 }
2654}
2655
2656#define ENDIANNESS _le
0979ed01 2657#include "exec/memory_ldst_cached.h.inc"
48564041
PB
2658
2659#define ENDIANNESS _be
0979ed01 2660#include "exec/memory_ldst_cached.h.inc"
48564041 2661
4269c82b
PB
2662#define SUFFIX _cached
2663#define ARG1 cache
2664#define ARG1_DECL MemoryRegionCache *cache
0979ed01 2665#include "exec/memory_ldst_phys.h.inc"
4269c82b 2666
1f4e496e
PB
2667/* address_space_cache_init: prepare for repeated access to a physical
2668 * memory region
2669 *
2670 * @cache: #MemoryRegionCache to be filled
2671 * @as: #AddressSpace to be accessed
2672 * @addr: address within that address space
2673 * @len: length of buffer
2674 * @is_write: indicates the transfer direction
2675 *
2676 * Will only work with RAM, and may map a subset of the requested range by
2677 * returning a value that is less than @len. On failure, return a negative
2678 * errno value.
2679 *
2680 * Because it only works with RAM, this function can be used for
2681 * read-modify-write operations. In this case, is_write should be %true.
2682 *
2683 * Note that addresses passed to the address_space_*_cached functions
2684 * are relative to @addr.
2685 */
2686int64_t address_space_cache_init(MemoryRegionCache *cache,
2687 AddressSpace *as,
2688 hwaddr addr,
2689 hwaddr len,
2690 bool is_write);
2691
2692/**
2693 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
2694 *
2695 * @cache: The #MemoryRegionCache to operate on.
2696 * @addr: The first physical address that was written, relative to the
2697 * address that was passed to @address_space_cache_init.
2698 * @access_len: The number of bytes that were written starting at @addr.
2699 */
2700void address_space_cache_invalidate(MemoryRegionCache *cache,
2701 hwaddr addr,
2702 hwaddr access_len);
2703
2704/**
2705 * address_space_cache_destroy: free a #MemoryRegionCache
2706 *
2707 * @cache: The #MemoryRegionCache whose memory should be released.
2708 */
2709void address_space_cache_destroy(MemoryRegionCache *cache);
2710
052c8fa9
JW
2711/* address_space_get_iotlb_entry: translate an address into an IOTLB
2712 * entry. Should be called from an RCU critical section.
2713 */
2714IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
7446eb07 2715 bool is_write, MemTxAttrs attrs);
1f4e496e 2716
149f54b5 2717/* address_space_translate: translate an address range into an address space
41063e1e
PB
2718 * into a MemoryRegion and an address range into that section. Should be
2719 * called from an RCU critical section, to avoid that the last reference
2720 * to the returned region disappears after address_space_translate returns.
149f54b5 2721 *
57914ecb 2722 * @fv: #FlatView to be accessed
149f54b5
PB
2723 * @addr: address within that address space
2724 * @xlat: pointer to address within the returned memory region section's
2725 * #MemoryRegion.
2726 * @len: pointer to length
2727 * @is_write: indicates the transfer direction
bc6b1cec 2728 * @attrs: memory attributes
149f54b5 2729 */
16620684
AK
2730MemoryRegion *flatview_translate(FlatView *fv,
2731 hwaddr addr, hwaddr *xlat,
efa99a2f
PM
2732 hwaddr *len, bool is_write,
2733 MemTxAttrs attrs);
16620684
AK
2734
2735static inline MemoryRegion *address_space_translate(AddressSpace *as,
2736 hwaddr addr, hwaddr *xlat,
bc6b1cec
PM
2737 hwaddr *len, bool is_write,
2738 MemTxAttrs attrs)
16620684
AK
2739{
2740 return flatview_translate(address_space_to_flatview(as),
efa99a2f 2741 addr, xlat, len, is_write, attrs);
16620684 2742}
149f54b5 2743
51644ab7
PB
2744/* address_space_access_valid: check for validity of accessing an address
2745 * space range
2746 *
30951157
AK
2747 * Check whether memory is assigned to the given address space range, and
2748 * access is permitted by any IOMMU regions that are active for the address
2749 * space.
51644ab7
PB
2750 *
2751 * For now, addr and len should be aligned to a page size. This limitation
2752 * will be lifted in the future.
2753 *
2754 * @as: #AddressSpace to be accessed
2755 * @addr: address within that address space
2756 * @len: length of the area to be checked
2757 * @is_write: indicates the transfer direction
fddffa42 2758 * @attrs: memory attributes
51644ab7 2759 */
0c249ff7 2760bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
fddffa42 2761 bool is_write, MemTxAttrs attrs);
51644ab7 2762
ac1970fb
AK
2763/* address_space_map: map a physical memory region into a host virtual address
2764 *
2765 * May map a subset of the requested range, given by and returned in @plen.
77f55eac
PP
2766 * May return %NULL and set *@plen to zero(0), if resources needed to perform
2767 * the mapping are exhausted.
ac1970fb
AK
2768 * Use only for reads OR writes - not for read-modify-write operations.
2769 * Use cpu_register_map_client() to know when retrying the map operation is
2770 * likely to succeed.
2771 *
2772 * @as: #AddressSpace to be accessed
2773 * @addr: address within that address space
2774 * @plen: pointer to length of buffer; updated on return
2775 * @is_write: indicates the transfer direction
f26404fb 2776 * @attrs: memory attributes
ac1970fb 2777 */
a8170e5e 2778void *address_space_map(AddressSpace *as, hwaddr addr,
f26404fb 2779 hwaddr *plen, bool is_write, MemTxAttrs attrs);
ac1970fb
AK
2780
2781/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
2782 *
2783 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
2784 * the amount of memory that was actually read or written by the caller.
2785 *
2786 * @as: #AddressSpace used
57914ecb 2787 * @buffer: host pointer as returned by address_space_map()
ac1970fb
AK
2788 * @len: buffer length as returned by address_space_map()
2789 * @access_len: amount of data actually transferred
2790 * @is_write: indicates the transfer direction
2791 */
a8170e5e 2792void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
ae5883ab 2793 bool is_write, hwaddr access_len);
ac1970fb
AK
2794
2795
a203ac70 2796/* Internal functions, part of the implementation of address_space_read. */
b2a44fca 2797MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
daa3dda4 2798 MemTxAttrs attrs, void *buf, hwaddr len);
16620684 2799MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
a152be43 2800 MemTxAttrs attrs, void *buf,
0c249ff7 2801 hwaddr len, hwaddr addr1, hwaddr l,
16620684 2802 MemoryRegion *mr);
0878d0e1 2803void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
3cc8f884 2804
48564041
PB
2805/* Internal functions, part of the implementation of address_space_read_cached
2806 * and address_space_write_cached. */
38df19fa
PMD
2807MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
2808 hwaddr addr, void *buf, hwaddr len);
2809MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
2810 hwaddr addr, const void *buf,
2811 hwaddr len);
48564041 2812
3cc8f884
PB
2813static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
2814{
2815 if (is_write) {
d489ae4a
AD
2816 return memory_region_is_ram(mr) && !mr->readonly &&
2817 !mr->rom_device && !memory_region_is_ram_device(mr);
3cc8f884 2818 } else {
4a2e242b
AW
2819 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
2820 memory_region_is_romd(mr);
3cc8f884 2821 }
3cc8f884
PB
2822}
2823
2824/**
2825 * address_space_read: read from an address space.
2826 *
2827 * Return a MemTxResult indicating whether the operation succeeded
2828 * or failed (eg unassigned memory, device rejected the transaction,
b2a44fca 2829 * IOMMU fault). Called within RCU critical section.
3cc8f884 2830 *
b2a44fca 2831 * @as: #AddressSpace to be accessed
3cc8f884
PB
2832 * @addr: address within that address space
2833 * @attrs: memory transaction attributes
2834 * @buf: buffer with the data transferred
5d248213 2835 * @len: length of the data transferred
3cc8f884
PB
2836 */
2837static inline __attribute__((__always_inline__))
b2a44fca 2838MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
daa3dda4 2839 MemTxAttrs attrs, void *buf,
0c249ff7 2840 hwaddr len)
3cc8f884
PB
2841{
2842 MemTxResult result = MEMTX_OK;
2843 hwaddr l, addr1;
2844 void *ptr;
2845 MemoryRegion *mr;
b2a44fca 2846 FlatView *fv;
3cc8f884
PB
2847
2848 if (__builtin_constant_p(len)) {
2849 if (len) {
293a733d 2850 RCU_READ_LOCK_GUARD();
b2a44fca 2851 fv = address_space_to_flatview(as);
3cc8f884 2852 l = len;
efa99a2f 2853 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
3cc8f884 2854 if (len == l && memory_access_is_direct(mr, false)) {
0878d0e1 2855 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3cc8f884
PB
2856 memcpy(buf, ptr, len);
2857 } else {
16620684
AK
2858 result = flatview_read_continue(fv, addr, attrs, buf, len,
2859 addr1, l, mr);
3cc8f884 2860 }
3cc8f884
PB
2861 }
2862 } else {
b2a44fca 2863 result = address_space_read_full(as, addr, attrs, buf, len);
3cc8f884
PB
2864 }
2865 return result;
2866}
a203ac70 2867
1f4e496e
PB
2868/**
2869 * address_space_read_cached: read from a cached RAM region
2870 *
2871 * @cache: Cached region to be addressed
2872 * @addr: address relative to the base of the RAM region
2873 * @buf: buffer with the data transferred
2874 * @len: length of the data transferred
2875 */
38df19fa 2876static inline MemTxResult
1f4e496e 2877address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
0c249ff7 2878 void *buf, hwaddr len)
1f4e496e
PB
2879{
2880 assert(addr < cache->len && len <= cache->len - addr);
fc1c8344 2881 fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
48564041
PB
2882 if (likely(cache->ptr)) {
2883 memcpy(buf, cache->ptr + addr, len);
38df19fa 2884 return MEMTX_OK;
48564041 2885 } else {
38df19fa 2886 return address_space_read_cached_slow(cache, addr, buf, len);
48564041 2887 }
1f4e496e
PB
2888}
2889
2890/**
2891 * address_space_write_cached: write to a cached RAM region
2892 *
2893 * @cache: Cached region to be addressed
2894 * @addr: address relative to the base of the RAM region
2895 * @buf: buffer with the data transferred
2896 * @len: length of the data transferred
2897 */
38df19fa 2898static inline MemTxResult
1f4e496e 2899address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
daa3dda4 2900 const void *buf, hwaddr len)
1f4e496e
PB
2901{
2902 assert(addr < cache->len && len <= cache->len - addr);
48564041
PB
2903 if (likely(cache->ptr)) {
2904 memcpy(cache->ptr + addr, buf, len);
38df19fa 2905 return MEMTX_OK;
48564041 2906 } else {
38df19fa 2907 return address_space_write_cached_slow(cache, addr, buf, len);
48564041 2908 }
1f4e496e
PB
2909}
2910
75f01c68
PMD
2911/**
2912 * address_space_set: Fill address space with a constant byte.
2913 *
2914 * Return a MemTxResult indicating whether the operation succeeded
2915 * or failed (eg unassigned memory, device rejected the transaction,
2916 * IOMMU fault).
2917 *
2918 * @as: #AddressSpace to be accessed
2919 * @addr: address within that address space
2920 * @c: constant byte to fill the memory
2921 * @len: the number of bytes to fill with the constant byte
2922 * @attrs: memory transaction attributes
2923 */
2924MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
2925 uint8_t c, hwaddr len, MemTxAttrs attrs);
2926
7a3df11c 2927#ifdef NEED_CPU_H
d5d680ca 2928/* enum device_endian to MemOp. */
7a3df11c
PB
2929static inline MemOp devend_memop(enum device_endian end)
2930{
2931 QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
2932 DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
2933
ee3eb3a7 2934#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
7a3df11c
PB
2935 /* Swap if non-host endianness or native (target) endianness */
2936 return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
2937#else
2938 const int non_host_endianness =
2939 DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN;
2940
2941 /* In this case, native (target) endianness needs no swap. */
2942 return (end == non_host_endianness) ? MO_BSWAP : 0;
2943#endif
2944}
2945#endif
d5d680ca 2946
d24f31db
DH
2947/*
2948 * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
2949 * to manage the actual amount of memory consumed by the VM (then, the memory
2950 * provided by RAM blocks might be bigger than the desired memory consumption).
2951 * This *must* be set if:
2952 * - Discarding parts of a RAM blocks does not result in the change being
2953 * reflected in the VM and the pages getting freed.
2954 * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
2955 * discards blindly.
2956 * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
2957 * encrypted VMs).
2958 * Technologies that only temporarily pin the current working set of a
2959 * driver are fine, because we don't expect such pages to be discarded
2960 * (esp. based on guest action like balloon inflation).
2961 *
2962 * This is *not* to be used to protect from concurrent discards (esp.,
2963 * postcopy).
2964 *
2965 * Returns 0 if successful. Returns -EBUSY if a technology that relies on
2966 * discards to work reliably is active.
2967 */
2968int ram_block_discard_disable(bool state);
2969
7e6d32eb
DH
2970/*
2971 * See ram_block_discard_disable(): only disable uncoordinated discards,
2972 * keeping coordinated discards (via the RamDiscardManager) enabled.
2973 */
2974int ram_block_uncoordinated_discard_disable(bool state);
2975
d24f31db
DH
2976/*
2977 * Inhibit technologies that disable discarding of pages in RAM blocks.
2978 *
2979 * Returns 0 if successful. Returns -EBUSY if discards are already set to
2980 * broken.
2981 */
2982int ram_block_discard_require(bool state);
2983
2984/*
7e6d32eb
DH
2985 * See ram_block_discard_require(): only inhibit technologies that disable
2986 * uncoordinated discarding of pages in RAM blocks, allowing co-existance with
2987 * technologies that only inhibit uncoordinated discards (via the
2988 * RamDiscardManager).
2989 */
2990int ram_block_coordinated_discard_require(bool state);
2991
2992/*
2993 * Test if any discarding of memory in ram blocks is disabled.
d24f31db
DH
2994 */
2995bool ram_block_discard_is_disabled(void);
2996
2997/*
7e6d32eb 2998 * Test if any discarding of memory in ram blocks is required to work reliably.
d24f31db
DH
2999 */
3000bool ram_block_discard_is_required(void);
3001
093bc2cd
AK
3002#endif
3003
3004#endif